diff options
author | Ed Schouten <ed@FreeBSD.org> | 2013-05-27 18:27:12 +0000 |
---|---|---|
committer | Ed Schouten <ed@FreeBSD.org> | 2013-05-27 18:27:12 +0000 |
commit | 11023dc647fd8f41418da90d59db138400d0f334 (patch) | |
tree | 50f0ab80515576749ef638dd0766b70a65904bfa | |
parent | 58aabf08b77d221489f10e274812ec60917c21a8 (diff) | |
download | src-test2-11023dc647fd8f41418da90d59db138400d0f334.tar.gz src-test2-11023dc647fd8f41418da90d59db138400d0f334.zip |
Notes
434 files changed, 22298 insertions, 8908 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index 04d6e9763bf8..a57751ce6f61 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,6 +15,19 @@ include(LLVMParseArguments) # runtime libraries. cmake_minimum_required(VERSION 2.8.8) +# Compute the Clang version from the LLVM version. +# FIXME: We should be able to reuse CLANG_VERSION variable calculated +# in Clang cmake files, instead of copying the rules here. +string(REGEX MATCH "[0-9]+\\.[0-9]+(\\.[0-9]+)?" CLANG_VERSION + ${PACKAGE_VERSION}) +# Setup the paths where compiler-rt runtimes and headers should be stored. +set(LIBCLANG_INSTALL_PATH lib${LLVM_LIBDIR_SUFFIX}/clang/${CLANG_VERSION}) +string(TOLOWER ${CMAKE_SYSTEM_NAME} LIBCLANG_OS_DIR) +set(CLANG_RESOURCE_DIR ${LLVM_BINARY_DIR}/lib/clang/${CLANG_VERSION}) +set(COMPILER_RT_LIBRARY_OUTPUT_DIR ${CLANG_RESOURCE_DIR}/lib/${LIBCLANG_OS_DIR}) +set(COMPILER_RT_LIBRARY_INSTALL_DIR + ${LIBCLANG_INSTALL_PATH}/lib/${LIBCLANG_OS_DIR}) + # Add path for custom modules set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} @@ -23,6 +36,9 @@ set(CMAKE_MODULE_PATH include(AddCompilerRT) set(COMPILER_RT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) +# Setup custom SDK sysroots. +set(COMPILER_RT_DARWIN_SDK_SYSROOT ${COMPILER_RT_SOURCE_DIR}/SDKs/darwin) +set(COMPILER_RT_LINUX_SDK_SYSROOT ${COMPILER_RT_SOURCE_DIR}/SDKs/linux) # Detect whether the current target platform is 32-bit or 64-bit, and setup # the correct commandline flags needed to attempt to target 32-bit and 64-bit. @@ -37,15 +53,8 @@ else() set(TARGET_32_BIT_CFLAGS "-m32") endif() -# FIXME: Below we assume that the target build of LLVM/Clang is x86, which is -# not at all valid. Much of this can be fixed just by switching to use -# a just-built-clang binary for the compiles. - -set(TARGET_x86_64_CFLAGS ${TARGET_64_BIT_CFLAGS}) -set(TARGET_i386_CFLAGS ${TARGET_32_BIT_CFLAGS}) - -set(COMPILER_RT_SUPPORTED_ARCH - x86_64 i386) +# List of architectures we can target. +set(COMPILER_RT_SUPPORTED_ARCH) function(get_target_flags_for_arch arch out_var) list(FIND COMPILER_RT_SUPPORTED_ARCH ${arch} ARCH_INDEX) @@ -60,27 +69,45 @@ endfunction() # platform. We use the results of these tests to build only the various target # runtime libraries supported by our current compilers cross-compiling # abilities. -set(SIMPLE_SOURCE64 ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/simple64.c) -file(WRITE ${SIMPLE_SOURCE64} "#include <stdlib.h>\nint main() {}") -try_compile(CAN_TARGET_x86_64 ${CMAKE_BINARY_DIR} ${SIMPLE_SOURCE64} - COMPILE_DEFINITIONS "${TARGET_x86_64_CFLAGS}" - CMAKE_FLAGS "-DCMAKE_EXE_LINKER_FLAGS:STRING=${TARGET_x86_64_CFLAGS}") - -set(SIMPLE_SOURCE32 ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/simple32.c) -file(WRITE ${SIMPLE_SOURCE32} "#include <stdlib.h>\nint main() {}") -try_compile(CAN_TARGET_i386 ${CMAKE_BINARY_DIR} ${SIMPLE_SOURCE32} - COMPILE_DEFINITIONS "${TARGET_i386_CFLAGS}" - CMAKE_FLAGS "-DCMAKE_EXE_LINKER_FLAGS:STRING=${TARGET_i386_CFLAGS}") +set(SIMPLE_SOURCE ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/simple.c) +file(WRITE ${SIMPLE_SOURCE} "#include <stdlib.h>\nint main() {}") + +# test_target_arch(<arch> <target flags...>) +# Sets the target flags for a given architecture and determines if this +# architecture is supported by trying to build a simple file. +macro(test_target_arch arch) + set(TARGET_${arch}_CFLAGS ${ARGN}) + try_compile(CAN_TARGET_${arch} ${CMAKE_BINARY_DIR} ${SIMPLE_SOURCE} + COMPILE_DEFINITIONS "${TARGET_${arch}_CFLAGS}" + CMAKE_FLAGS "-DCMAKE_EXE_LINKER_FLAGS:STRING=${TARGET_${arch}_CFLAGS}") + if(${CAN_TARGET_${arch}}) + list(APPEND COMPILER_RT_SUPPORTED_ARCH ${arch}) + endif() +endmacro() + +if("${LLVM_NATIVE_ARCH}" STREQUAL "X86") + test_target_arch(x86_64 ${TARGET_64_BIT_CFLAGS}) + test_target_arch(i386 ${TARGET_32_BIT_CFLAGS}) +elseif("${LLVM_NATIVE_ARCH}" STREQUAL "PowerPC") + # Explicitly set -m flag on powerpc, because on ppc64 defaults for gcc and + # clang are different. + test_target_arch(powerpc64 "-m64") + test_target_arch(powerpc "-m32") +endif() # We only support running instrumented tests when we're not cross compiling # and target a unix-like system. On Android we define the rules for building # unit tests, but don't execute them. if("${CMAKE_HOST_SYSTEM}" STREQUAL "${CMAKE_SYSTEM}" AND UNIX AND NOT ANDROID) - set(COMPILER_RT_CAN_EXECUTE_TESTS TRUE) + option(COMPILER_RT_CAN_EXECUTE_TESTS "Can we execute instrumented tests" ON) else() - set(COMPILER_RT_CAN_EXECUTE_TESTS FALSE) + option(COMPILER_RT_CAN_EXECUTE_TESTS "Can we execute instrumented tests" OFF) endif() - + +# Check if compiler-rt is built with libc++. +find_flag_in_string("${CMAKE_CXX_FLAGS}" "-stdlib=libc++" + COMPILER_RT_USES_LIBCXX) + function(filter_available_targets out_var) set(archs) foreach(arch ${ARGN}) @@ -99,6 +126,8 @@ set(SANITIZER_COMMON_CFLAGS -fno-exceptions -fomit-frame-pointer -funwind-tables + -fno-stack-protector + -Wno-gnu # Variadic macros with 0 arguments for ... -O3 ) if(NOT WIN32) @@ -120,51 +149,36 @@ check_cxx_compiler_flag(-Wno-c99-extensions SUPPORTS_NO_C99_EXTENSIONS_FLAG) if(SUPPORTS_NO_C99_EXTENSIONS_FLAG) list(APPEND SANITIZER_COMMON_CFLAGS -Wno-c99-extensions) endif() +# Sanitizer may not have libstdc++, so we can have problems with virtual +# destructors. +check_cxx_compiler_flag(-Wno-non-virtual-dtor SUPPORTS_NO_NON_VIRTUAL_DTOR_FLAG) +if (SUPPORTS_NO_NON_VIRTUAL_DTOR_FLAG) + list(APPEND SANITIZER_COMMON_CFLAGS -Wno-non-virtual-dtor) +endif() + +# Setup min Mac OS X version. if(APPLE) - list(APPEND SANITIZER_COMMON_CFLAGS -mmacosx-version-min=10.5) + if(COMPILER_RT_USES_LIBCXX) + set(SANITIZER_MIN_OSX_VERSION 10.7) + else() + set(SANITIZER_MIN_OSX_VERSION 10.5) + endif() + list(APPEND SANITIZER_COMMON_CFLAGS + -mmacosx-version-min=${SANITIZER_MIN_OSX_VERSION}) endif() # Architectures supported by Sanitizer runtimes. Specific sanitizers may # support only subset of these (e.g. TSan works on x86_64 only). filter_available_targets(SANITIZER_COMMON_SUPPORTED_ARCH - x86_64 i386) - -# Compute the Clang version from the LLVM version. -# FIXME: We should be able to reuse CLANG_VERSION variable calculated -# in Clang cmake files, instead of copying the rules here. -string(REGEX MATCH "[0-9]+\\.[0-9]+(\\.[0-9]+)?" CLANG_VERSION - ${PACKAGE_VERSION}) -# Setup the paths where compiler-rt runtimes and headers should be stored. -set(LIBCLANG_INSTALL_PATH lib${LLVM_LIBDIR_SUFFIX}/clang/${CLANG_VERSION}) -string(TOLOWER ${CMAKE_SYSTEM_NAME} LIBCLANG_OS_DIR) - -# Install compiler-rt headers. -install(DIRECTORY include/ - DESTINATION ${LIBCLANG_INSTALL_PATH}/include - FILES_MATCHING - PATTERN "*.h" - PATTERN ".svn" EXCLUDE - ) - -# Call add_clang_compiler_rt_libraries to make sure that targets are built -# and installed in the directories where Clang driver expects to find them. -macro(add_clang_compiler_rt_libraries) - # Setup output directories so that clang in build tree works. - set_target_properties(${ARGN} PROPERTIES - ARCHIVE_OUTPUT_DIRECTORY - ${LLVM_BINARY_DIR}/lib/clang/${CLANG_VERSION}/lib/${LIBCLANG_OS_DIR} - LIBRARY_OUTPUT_DIRECTORY - ${LLVM_BINARY_DIR}/lib/clang/${CLANG_VERSION}/lib/${LIBCLANG_OS_DIR} - ) - # Add installation command. - install(TARGETS ${ARGN} - ARCHIVE DESTINATION ${LIBCLANG_INSTALL_PATH}/lib/${LIBCLANG_OS_DIR} - LIBRARY DESTINATION ${LIBCLANG_INSTALL_PATH}/lib/${LIBCLANG_OS_DIR} - ) -endmacro(add_clang_compiler_rt_libraries) + x86_64 i386 powerpc64 powerpc) # Add the public header's directory to the includes for all of compiler-rt. include_directories(include) +add_subdirectory(include) + +set(SANITIZER_COMMON_LIT_TEST_DEPS + clang clang-headers FileCheck count not llvm-nm llvm-symbolizer + compiler-rt-headers) add_subdirectory(lib) @@ -255,10 +255,10 @@ $(Tmp.ObjPath)/%.o: $(Tmp.SrcPath)/%.S $(Tmp.Dependencies) $(Tmp.ObjPath)/.dir $(Verb) $(Tmp.CC) $(Tmp.CFLAGS) -c -o $$@ $$< $(Tmp.ObjPath)/%.o: $(Tmp.SrcPath)/%.c $(Tmp.Dependencies) $(Tmp.ObjPath)/.dir $(Summary) " COMPILE: $(Tmp.Name)/$(Tmp.Config)/$(Tmp.Arch): $$<" - $(Verb) $(Tmp.CC) $(Tmp.CFLAGS) -c $(COMMON_CFLAGS) -o $$@ $$< + $(Verb) $(Tmp.CC) $(COMMON_CFLAGS) $(Tmp.CFLAGS) -c -o $$@ $$< $(Tmp.ObjPath)/%.o: $(Tmp.SrcPath)/%.cc $(Tmp.Dependencies) $(Tmp.ObjPath)/.dir $(Summary) " COMPILE: $(Tmp.Name)/$(Tmp.Config)/$(Tmp.Arch): $$<" - $(Verb) $(Tmp.CC) $(Tmp.CFLAGS) -c $(COMMON_CXXFLAGS) -o $$@ $$< + $(Verb) $(Tmp.CC) $(COMMON_CXXFLAGS) $(Tmp.CFLAGS) -c -o $$@ $$< .PRECIOUS: $(Tmp.ObjPath)/.dir endef diff --git a/SDKs/darwin/usr/include/fcntl.h b/SDKs/darwin/usr/include/fcntl.h new file mode 100644 index 000000000000..a5f91e3a5bc6 --- /dev/null +++ b/SDKs/darwin/usr/include/fcntl.h @@ -0,0 +1,17 @@ +/* ===-- fcntl.h - stub SDK header for compiler-rt --------------------------=== + * + * The LLVM Compiler Infrastructure + * + * This file is dual licensed under the MIT and the University of Illinois Open + * Source Licenses. See LICENSE.TXT for details. + * + * ===-----------------------------------------------------------------------=== + * + * This is a stub SDK header file. This file is not part of the interface of + * this library nor an official version of the appropriate SDK header. It is + * intended only to stub the features of this header required by compiler-rt. + * + * ===-----------------------------------------------------------------------=== + */ + +#include <sys/fcntl.h> diff --git a/SDKs/darwin/usr/include/stdio.h b/SDKs/darwin/usr/include/stdio.h index 63b10a86b632..006652fb9b88 100644 --- a/SDKs/darwin/usr/include/stdio.h +++ b/SDKs/darwin/usr/include/stdio.h @@ -24,15 +24,18 @@ extern "C" { typedef struct __sFILE FILE; typedef __SIZE_TYPE__ size_t; -/* Determine the appropriate fopen() and fwrite() functions. */ +/* Determine the appropriate fdopen, fopen(), and fwrite() functions. */ #if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) # if defined(__i386) +# define __FDOPEN_NAME "_fdopen$UNIX2003" # define __FOPEN_NAME "_fopen$UNIX2003" # define __FWRITE_NAME "_fwrite$UNIX2003" # elif defined(__x86_64__) +# define __FDOPEN_NAME "_fdopen" # define __FOPEN_NAME "_fopen" # define __FWRITE_NAME "_fwrite" # elif defined(__arm) +# define __FDOPEN_NAME "_fdopen" # define __FOPEN_NAME "_fopen" # define __FWRITE_NAME "_fwrite" # else @@ -40,9 +43,11 @@ typedef __SIZE_TYPE__ size_t; # endif #elif defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) # if defined(__i386) || defined (__x86_64) +# define __FDOPEN_NAME "_fdopen" # define __FOPEN_NAME "_fopen" # define __FWRITE_NAME "_fwrite" # elif defined(__arm) +# define __FDOPEN_NAME "_fdopen" # define __FOPEN_NAME "_fopen" # define __FWRITE_NAME "_fwrite" # else @@ -68,13 +73,13 @@ extern FILE *__stderrp; int fclose(FILE *); int fflush(FILE *); FILE *fopen(const char * __restrict, const char * __restrict) __asm(__FOPEN_NAME); +FILE *fdopen(int, const char *) __asm(__FDOPEN_NAME); int fprintf(FILE * __restrict, const char * __restrict, ...); size_t fwrite(const void * __restrict, size_t, size_t, FILE * __restrict) __asm(__FWRITE_NAME); size_t fread(void * __restrict, size_t, size_t, FILE * __restrict); long ftell(FILE *); int fseek(FILE *, long, int); - int snprintf(char * __restrict, size_t, const char * __restrict, ...); #if defined(__cplusplus) diff --git a/SDKs/darwin/usr/include/stdlib.h b/SDKs/darwin/usr/include/stdlib.h index c18c2e49a329..b6d3171cff49 100644 --- a/SDKs/darwin/usr/include/stdlib.h +++ b/SDKs/darwin/usr/include/stdlib.h @@ -22,9 +22,11 @@ typedef __SIZE_TYPE__ size_t; void abort(void) __attribute__((__noreturn__)); +int atexit(void (*)(void)); int atoi(const char *); void free(void *); char *getenv(const char *); void *malloc(size_t); +void *realloc(void *, size_t); #endif /* __STDLIB_H__ */ diff --git a/SDKs/darwin/usr/include/string.h b/SDKs/darwin/usr/include/string.h index bee9d46cddc4..c7da1f57ba57 100644 --- a/SDKs/darwin/usr/include/string.h +++ b/SDKs/darwin/usr/include/string.h @@ -21,6 +21,7 @@ typedef __SIZE_TYPE__ size_t; int memcmp(const void *, const void *, size_t); void *memcpy(void *, const void *, size_t); +void *memset(void *, int, size_t); char *strcat(char *, const char *); char *strcpy(char *, const char *); char *strdup(const char *); diff --git a/SDKs/darwin/usr/include/sys/fcntl.h b/SDKs/darwin/usr/include/sys/fcntl.h new file mode 100644 index 000000000000..b71706bf453b --- /dev/null +++ b/SDKs/darwin/usr/include/sys/fcntl.h @@ -0,0 +1,52 @@ +/* ===-- fcntl.h - stub SDK header for compiler-rt --------------------------=== + * + * The LLVM Compiler Infrastructure + * + * This file is dual licensed under the MIT and the University of Illinois Open + * Source Licenses. See LICENSE.TXT for details. + * + * ===-----------------------------------------------------------------------=== + * + * This is a stub SDK header file. This file is not part of the interface of + * this library nor an official version of the appropriate SDK header. It is + * intended only to stub the features of this header required by compiler-rt. + * + * ===-----------------------------------------------------------------------=== + */ + +#ifndef _SYS_FCNTL_H_ +#define _SYS_FCNTL_H_ + +/* Determine the appropriate open function. */ +#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) +# if defined(__i386) +# define __OPEN_NAME "_open$UNIX2003" +# elif defined(__x86_64__) +# define __OPEN_NAME "_open" +# elif defined(__arm) +# define __OPEN_NAME "_open" +# else +# error "unrecognized architecture for targetting OS X" +# endif +#elif defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) +# if defined(__i386) || defined (__x86_64) +# define __OPEN_NAME "_open" +# elif defined(__arm) +# define __OPEN_NAME "_open" +# else +# error "unrecognized architecture for targetting iOS" +# endif +#else +# error "unrecognized architecture for targetting Darwin" +#endif + +#define O_RDONLY 0x0000 /* open for reading only */ +#define O_WRONLY 0x0001 /* open for writing only */ +#define O_RDWR 0x0002 /* open for reading and writing */ +#define O_ACCMODE 0x0003 /* mask for above modes */ + +#define O_CREAT 0x0200 /* create if nonexistant */ + +int open(const char *, int, ...) __asm(__OPEN_NAME); + +#endif /* !_SYS_FCNTL_H_ */ diff --git a/SDKs/darwin/usr/include/sys/mman.h b/SDKs/darwin/usr/include/sys/mman.h new file mode 100644 index 000000000000..84561f1b6abd --- /dev/null +++ b/SDKs/darwin/usr/include/sys/mman.h @@ -0,0 +1,42 @@ +/* ===-- mman.h - stub SDK header for compiler-rt ---------------------------=== + * + * The LLVM Compiler Infrastructure + * + * This file is dual licensed under the MIT and the University of Illinois Open + * Source Licenses. See LICENSE.TXT for details. + * + * ===-----------------------------------------------------------------------=== + * + * This is a stub SDK header file. This file is not part of the interface of + * this library nor an official version of the appropriate SDK header. It is + * intended only to stub the features of this header required by compiler-rt. + * + * ===-----------------------------------------------------------------------=== + */ + +#ifndef __SYS_MMAN_H__ +#define __SYS_MMAN_H__ + +typedef __SIZE_TYPE__ size_t; + +#define PROT_NONE 0x00 +#define PROT_READ 0x01 +#define PROT_WRITE 0x02 +#define PROT_EXEC 0x04 + +#define MAP_SHARED 0x0001 +#define MAP_PRIVATE 0x0002 + +#define MAP_FILE 0x0000 +#define MAP_ANON 0x1000 + +#define MS_ASYNC 0x0001 +#define MS_INVALIDATE 0x0002 +#define MS_SYNC 0x0010 + +void *mmap(void *addr, size_t len, int prot, int flags, int fd, + long long offset); +int munmap(void *addr, size_t len); +int msync(void *addr, size_t len, int flags); + +#endif /* __SYS_MMAN_H__ */ diff --git a/SDKs/linux/usr/include/fcntl.h b/SDKs/linux/usr/include/fcntl.h new file mode 100644 index 000000000000..a5f91e3a5bc6 --- /dev/null +++ b/SDKs/linux/usr/include/fcntl.h @@ -0,0 +1,17 @@ +/* ===-- fcntl.h - stub SDK header for compiler-rt --------------------------=== + * + * The LLVM Compiler Infrastructure + * + * This file is dual licensed under the MIT and the University of Illinois Open + * Source Licenses. See LICENSE.TXT for details. + * + * ===-----------------------------------------------------------------------=== + * + * This is a stub SDK header file. This file is not part of the interface of + * this library nor an official version of the appropriate SDK header. It is + * intended only to stub the features of this header required by compiler-rt. + * + * ===-----------------------------------------------------------------------=== + */ + +#include <sys/fcntl.h> diff --git a/SDKs/linux/usr/include/stdio.h b/SDKs/linux/usr/include/stdio.h index 7c258d2aca17..fba593640c36 100644 --- a/SDKs/linux/usr/include/stdio.h +++ b/SDKs/linux/usr/include/stdio.h @@ -33,6 +33,7 @@ extern struct _IO_FILE *stderr; extern int fclose(FILE *); extern int fflush(FILE *); extern FILE *fopen(const char * restrict, const char * restrict); +extern FILE *fdopen(int, const char * restrict); extern int fprintf(FILE * restrict, const char * restrict, ...); extern size_t fwrite(const void * restrict, size_t, size_t, FILE * restrict); extern size_t fread(void * restrict, size_t, size_t, FILE * restrict); diff --git a/SDKs/linux/usr/include/stdlib.h b/SDKs/linux/usr/include/stdlib.h index 2a6617ae3cf1..966b29db6e10 100644 --- a/SDKs/linux/usr/include/stdlib.h +++ b/SDKs/linux/usr/include/stdlib.h @@ -22,6 +22,7 @@ typedef __SIZE_TYPE__ size_t; void abort(void) __attribute__((__nothrow__)) __attribute__((__noreturn__)); +int atexit(void (*)(void)) __attribute__((__nothrow__)); int atoi(const char *) __attribute__((__nothrow__)); void free(void *) __attribute__((__nothrow__)); char *getenv(const char *) __attribute__((__nothrow__)) @@ -29,5 +30,7 @@ char *getenv(const char *) __attribute__((__nothrow__)) __attribute__((__warn_unused_result__)); void *malloc(size_t) __attribute__((__nothrow__)) __attribute((__malloc__)) __attribute__((__warn_unused_result__)); +void *realloc(void *, size_t) __attribute__((__nothrow__)) __attribute((__malloc__)) + __attribute__((__warn_unused_result__)); #endif /* __STDLIB_H__ */ diff --git a/SDKs/linux/usr/include/string.h b/SDKs/linux/usr/include/string.h index bee9d46cddc4..c7da1f57ba57 100644 --- a/SDKs/linux/usr/include/string.h +++ b/SDKs/linux/usr/include/string.h @@ -21,6 +21,7 @@ typedef __SIZE_TYPE__ size_t; int memcmp(const void *, const void *, size_t); void *memcpy(void *, const void *, size_t); +void *memset(void *, int, size_t); char *strcat(char *, const char *); char *strcpy(char *, const char *); char *strdup(const char *); diff --git a/SDKs/linux/usr/include/sys/fcntl.h b/SDKs/linux/usr/include/sys/fcntl.h new file mode 100644 index 000000000000..1512bf9b4e55 --- /dev/null +++ b/SDKs/linux/usr/include/sys/fcntl.h @@ -0,0 +1,29 @@ +/* ===-- fcntl.h - stub SDK header for compiler-rt --------------------------=== + * + * The LLVM Compiler Infrastructure + * + * This file is dual licensed under the MIT and the University of Illinois Open + * Source Licenses. See LICENSE.TXT for details. + * + * ===-----------------------------------------------------------------------=== + * + * This is a stub SDK header file. This file is not part of the interface of + * this library nor an official version of the appropriate SDK header. It is + * intended only to stub the features of this header required by compiler-rt. + * + * ===-----------------------------------------------------------------------=== + */ + +#ifndef _SYS_FCNTL_H_ +#define _SYS_FCNTL_H_ + +#define O_RDONLY 0x0000 +#define O_WRONLY 0x0001 +#define O_RDWR 0x0002 +#define O_ACCMODE 0x0003 + +#define O_CREAT 0x0200 + +int open(const char *, int, ...); + +#endif /* _SYS_FCNTL_H_ */ diff --git a/SDKs/linux/usr/include/sys/mman.h b/SDKs/linux/usr/include/sys/mman.h index 7c4d05181f54..bfb7f8bb02de 100644 --- a/SDKs/linux/usr/include/sys/mman.h +++ b/SDKs/linux/usr/include/sys/mman.h @@ -19,10 +19,28 @@ typedef __SIZE_TYPE__ size_t; -#define PROT_READ 0x1 -#define PROT_WRITE 0x2 -#define PROT_EXEC 0x4 +#define PROT_NONE 0x00 +#define PROT_READ 0x01 +#define PROT_WRITE 0x02 +#define PROT_EXEC 0x04 +#define MAP_SHARED 0x0001 +#define MAP_PRIVATE 0x0002 + +#define MAP_FILE 0x0000 +#define MAP_ANON 0x1000 + +#define MS_ASYNC 0x0001 +#define MS_INVALIDATE 0x0002 +#define MS_SYNC 0x0010 + +extern void *mmap(void *addr, size_t len, int prot, int flags, int fd, + long long offset) + __attribute__((__nothrow__)); +extern int munmap(void *addr, size_t len) + __attribute__((__nothrow__)); +extern int msync(void *addr, size_t len, int flags) + __attribute__((__nothrow__)); extern int mprotect (void *__addr, size_t __len, int __prot) __attribute__((__nothrow__)); diff --git a/cmake/Modules/AddCompilerRT.cmake b/cmake/Modules/AddCompilerRT.cmake index e90253fdfa1e..bf114a401ef0 100644 --- a/cmake/Modules/AddCompilerRT.cmake +++ b/cmake/Modules/AddCompilerRT.cmake @@ -18,6 +18,92 @@ macro(add_compiler_rt_object_library name arch) endif() endmacro() +# Same as above, but adds universal osx library with name "<name>.osx" +# targeting multiple architectures. +# add_compiler_rt_osx_object_library(<name> ARCH <architectures> +# SOURCES <source files> +# CFLAGS <compile flags>) +macro(add_compiler_rt_osx_object_library name) + parse_arguments(LIB "ARCH;SOURCES;CFLAGS" "" ${ARGN}) + set(libname "${name}.osx") + add_library(${libname} OBJECT ${LIB_SOURCES}) + set_target_compile_flags(${libname} ${LIB_CFLAGS}) + set_target_properties(${libname} PROPERTIES OSX_ARCHITECTURES "${LIB_ARCH}") +endmacro() + +# Adds static runtime for a given architecture and puts it in the proper +# directory in the build and install trees. +# add_compiler_rt_static_runtime(<name> <arch> +# SOURCES <source files> +# CFLAGS <compile flags> +# DEFS <compile definitions> +# SYMS <symbols file>) +macro(add_compiler_rt_static_runtime name arch) + if(CAN_TARGET_${arch}) + parse_arguments(LIB "SOURCES;CFLAGS;DEFS;SYMS" "" ${ARGN}) + add_library(${name} STATIC ${LIB_SOURCES}) + # Setup compile flags and definitions. + set_target_compile_flags(${name} + ${TARGET_${arch}_CFLAGS} ${LIB_CFLAGS}) + set_property(TARGET ${name} APPEND PROPERTY + COMPILE_DEFINITIONS ${LIB_DEFS}) + # Setup correct output directory in the build tree. + set_target_properties(${name} PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${COMPILER_RT_LIBRARY_OUTPUT_DIR}) + # Add installation command. + install(TARGETS ${name} + ARCHIVE DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR}) + # Generate the .syms file if possible. + if(LIB_SYMS) + get_target_property(libfile ${name} LOCATION) + configure_file(${LIB_SYMS} ${libfile}.syms) + install(FILES ${libfile}.syms + DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR}) + endif(LIB_SYMS) + else() + message(FATAL_ERROR "Archtecture ${arch} can't be targeted") + endif() +endmacro() + +# Same as add_compiler_rt_static_runtime, but creates a universal library +# for several architectures. +# add_compiler_rt_osx_static_runtime(<name> ARCH <architectures> +# SOURCES <source files> +# CFLAGS <compile flags> +# DEFS <compile definitions>) +macro(add_compiler_rt_osx_static_runtime name) + parse_arguments(LIB "ARCH;SOURCES;CFLAGS;DEFS" "" ${ARGN}) + add_library(${name} STATIC ${LIB_SOURCES}) + set_target_compile_flags(${name} ${LIB_CFLAGS}) + set_property(TARGET ${name} APPEND PROPERTY + COMPILE_DEFINITIONS ${LIB_DEFS}) + set_target_properties(${name} PROPERTIES + OSX_ARCHITECTURES "${LIB_ARCH}" + ARCHIVE_OUTPUT_DIRECTORY ${COMPILER_RT_LIBRARY_OUTPUT_DIR}) + install(TARGETS ${name} + ARCHIVE DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR}) +endmacro() + +# Adds dynamic runtime library on osx, which supports multiple architectures. +# add_compiler_rt_osx_dynamic_runtime(<name> ARCH <architectures> +# SOURCES <source files> +# CFLAGS <compile flags> +# DEFS <compile definitions> +# LINKFLAGS <link flags>) +macro(add_compiler_rt_osx_dynamic_runtime name) + parse_arguments(LIB "ARCH;SOURCES;CFLAGS;DEFS;LINKFLAGS" "" ${ARGN}) + add_library(${name} SHARED ${LIB_SOURCES}) + set_target_compile_flags(${name} ${LIB_CFLAGS}) + set_target_link_flags(${name} ${LIB_LINKFLAGS}) + set_property(TARGET ${name} APPEND PROPERTY + COMPILE_DEFINITIONS ${LIB_DEFS}) + set_target_properties(${name} PROPERTIES + OSX_ARCHITECTURES "${LIB_ARCH}" + LIBRARY_OUTPUT_DIRECTORY ${COMPILER_RT_LIBRARY_OUTPUT_DIR}) + install(TARGETS ${name} + LIBRARY DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR}) +endmacro() + # Unittests support. set(COMPILER_RT_GTEST_PATH ${LLVM_MAIN_SRC_DIR}/utils/unittest/googletest) set(COMPILER_RT_GTEST_SOURCE ${COMPILER_RT_GTEST_PATH}/gtest-all.cc) @@ -35,15 +121,21 @@ set(COMPILER_RT_GTEST_INCLUDE_CFLAGS # LINK_FLAGS <link flags>) macro(add_compiler_rt_test test_suite test_name) parse_arguments(TEST "OBJECTS;DEPS;LINK_FLAGS" "" ${ARGN}) - get_unittest_directory(OUTPUT_DIR) - file(MAKE_DIRECTORY ${OUTPUT_DIR}) - set(output_bin "${OUTPUT_DIR}/${test_name}") - add_custom_command( - OUTPUT ${output_bin} + set(output_bin "${CMAKE_CURRENT_BINARY_DIR}/${test_name}") + add_custom_target(${test_name} COMMAND clang ${TEST_OBJECTS} -o "${output_bin}" ${TEST_LINK_FLAGS} DEPENDS clang ${TEST_DEPS}) - add_custom_target(${test_name} DEPENDS ${output_bin}) # Make the test suite depend on the binary. add_dependencies(${test_suite} ${test_name}) endmacro() + +macro(add_compiler_rt_resource_file target_name file_name) + set(src_file "${CMAKE_CURRENT_SOURCE_DIR}/${file_name}") + set(dst_file "${CLANG_RESOURCE_DIR}/${file_name}") + add_custom_target(${target_name} + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src_file} ${dst_file} + DEPENDS ${file_name}) + # Install in Clang resource directory. + install(FILES ${file_name} DESTINATION ${LIBCLANG_INSTALL_PATH}) +endmacro() diff --git a/cmake/Modules/CompilerRTUtils.cmake b/cmake/Modules/CompilerRTUtils.cmake index 50f068091e64..f9760f40dbd5 100644 --- a/cmake/Modules/CompilerRTUtils.cmake +++ b/cmake/Modules/CompilerRTUtils.cmake @@ -15,3 +15,14 @@ function(set_target_link_flags target) set_property(TARGET ${target} PROPERTY LINK_FLAGS "${argstring}") endfunction() +# Check if a given flag is present in a space-separated flag_string. +# Store the result in out_var. +function(find_flag_in_string flag_string flag out_var) + string(REPLACE " " ";" flag_list ${flag_string}) + list(FIND flag_list ${flag} flag_pos) + if(NOT flag_pos EQUAL -1) + set(${out_var} TRUE PARENT_SCOPE) + else() + set(${out_var} FALSE PARENT_SCOPE) + endif() +endfunction() diff --git a/include/CMakeLists.txt b/include/CMakeLists.txt new file mode 100644 index 000000000000..700b5326b06c --- /dev/null +++ b/include/CMakeLists.txt @@ -0,0 +1,39 @@ +set(SANITIZER_HEADERS + sanitizer/asan_interface.h + sanitizer/common_interface_defs.h + sanitizer/linux_syscall_hooks.h + sanitizer/msan_interface.h) + +set(output_dir ${LLVM_BINARY_DIR}/lib/clang/${CLANG_VERSION}/include) + +if(MSVC_IDE OR XCODE) + set(other_output_dir ${LLVM_BINARY_DIR}/bin/lib/clang/${CLANG_VERSION}/include) +endif() + +# Copy compiler-rt headers to the build tree. +set(out_files) +foreach( f ${SANITIZER_HEADERS} ) + set( src ${CMAKE_CURRENT_SOURCE_DIR}/${f} ) + set( dst ${output_dir}/${f} ) + add_custom_command(OUTPUT ${dst} + DEPENDS ${src} + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src} ${dst} + COMMENT "Copying compiler-rt's ${f}...") + list(APPEND out_files ${dst}) + + if(other_output_dir) + set(other_dst ${other_output_dir}/${f}) + add_custom_command(OUTPUT ${other_dst} + DEPENDS ${src} + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${src} ${other_dst} + COMMENT "Copying compiler-rt's ${f}...") + list(APPEND out_files ${other_dst}) + endif() +endforeach( f ) + +add_custom_target(compiler-rt-headers ALL DEPENDS ${out_files}) + +# Install sanitizer headers. +install(FILES ${SANITIZER_HEADERS} + PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ + DESTINATION ${LIBCLANG_INSTALL_PATH}/include/sanitizer) diff --git a/include/sanitizer/asan_interface.h b/include/sanitizer/asan_interface.h index 6afc3800f4e7..8adf3f17f24b 100644 --- a/include/sanitizer/asan_interface.h +++ b/include/sanitizer/asan_interface.h @@ -7,69 +7,18 @@ // //===----------------------------------------------------------------------===// // -// This file is a part of AddressSanitizer, an address sanity checker. +// This file is a part of AddressSanitizer. // -// This header can be included by the instrumented program to fetch -// data (mostly allocator statistics) from ASan runtime library. +// Public interface header. //===----------------------------------------------------------------------===// #ifndef SANITIZER_ASAN_INTERFACE_H #define SANITIZER_ASAN_INTERFACE_H #include <sanitizer/common_interface_defs.h> -// ----------- ATTENTION ------------- -// This header should NOT include any other headers from ASan runtime. -// All functions in this header are extern "C" and start with __asan_. - -using __sanitizer::uptr; - +#ifdef __cplusplus extern "C" { - // This function should be called at the very beginning of the process, - // before any instrumented code is executed and before any call to malloc. - void __asan_init() SANITIZER_INTERFACE_ATTRIBUTE; - - // This structure describes an instrumented global variable. - struct __asan_global { - uptr beg; // The address of the global. - uptr size; // The original size of the global. - uptr size_with_redzone; // The size with the redzone. - const char *name; // Name as a C string. - uptr has_dynamic_init; // Non-zero if the global has dynamic initializer. - }; - - // These two functions should be called by the instrumented code. - // 'globals' is an array of structures describing 'n' globals. - void __asan_register_globals(__asan_global *globals, uptr n) - SANITIZER_INTERFACE_ATTRIBUTE; - void __asan_unregister_globals(__asan_global *globals, uptr n) - SANITIZER_INTERFACE_ATTRIBUTE; - - // These two functions should be called before and after dynamic initializers - // run, respectively. They should be called with parameters describing all - // dynamically initialized globals defined in the calling TU. - void __asan_before_dynamic_init(uptr first_addr, uptr last_addr) - SANITIZER_INTERFACE_ATTRIBUTE; - void __asan_after_dynamic_init() - SANITIZER_INTERFACE_ATTRIBUTE; - - // These two functions are used by the instrumented code in the - // use-after-return mode. __asan_stack_malloc allocates size bytes of - // fake stack and __asan_stack_free poisons it. real_stack is a pointer to - // the real stack region. - uptr __asan_stack_malloc(uptr size, uptr real_stack) - SANITIZER_INTERFACE_ATTRIBUTE; - void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) - SANITIZER_INTERFACE_ATTRIBUTE; - - // These two functions are used by instrumented code in the - // use-after-scope mode. They mark memory for local variables as - // unaddressable when they leave scope and addressable before the - // function exits. - void __asan_poison_stack_memory(uptr addr, uptr size) - SANITIZER_INTERFACE_ATTRIBUTE; - void __asan_unpoison_stack_memory(uptr addr, uptr size) - SANITIZER_INTERFACE_ATTRIBUTE; - +#endif // Marks memory region [addr, addr+size) as unaddressable. // This memory must be previously allocated by the user program. Accessing // addresses in this region from instrumented code is forbidden until @@ -78,8 +27,7 @@ extern "C" { // to ASan alignment restrictions. // Method is NOT thread-safe in the sense that no two threads can // (un)poison memory in the same memory region simultaneously. - void __asan_poison_memory_region(void const volatile *addr, uptr size) - SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_poison_memory_region(void const volatile *addr, size_t size); // Marks memory region [addr, addr+size) as addressable. // This memory must be previously allocated by the user program. Accessing // addresses in this region is allowed until this region is poisoned again. @@ -87,15 +35,10 @@ extern "C" { // ASan alignment restrictions. // Method is NOT thread-safe in the sense that no two threads can // (un)poison memory in the same memory region simultaneously. - void __asan_unpoison_memory_region(void const volatile *addr, uptr size) - SANITIZER_INTERFACE_ATTRIBUTE; - - // Performs cleanup before a NoReturn function. Must be called before things - // like _exit and execl to avoid false positives on stack. - void __asan_handle_no_return() SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_unpoison_memory_region(void const volatile *addr, size_t size); -// User code should use macro instead of functions. -#if __has_feature(address_sanitizer) +// User code should use macros instead of functions. +#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) #define ASAN_POISON_MEMORY_REGION(addr, size) \ __asan_poison_memory_region((addr), (size)) #define ASAN_UNPOISON_MEMORY_REGION(addr, size) \ @@ -109,104 +52,86 @@ extern "C" { // Returns true iff addr is poisoned (i.e. 1-byte read/write access to this // address will result in error report from AddressSanitizer). - bool __asan_address_is_poisoned(void const volatile *addr) - SANITIZER_INTERFACE_ATTRIBUTE; + bool __asan_address_is_poisoned(void const volatile *addr); // If at least on byte in [beg, beg+size) is poisoned, return the address // of the first such byte. Otherwise return 0. - uptr __asan_region_is_poisoned(uptr beg, uptr size) - SANITIZER_INTERFACE_ATTRIBUTE; + void *__asan_region_is_poisoned(void *beg, size_t size); // Print the description of addr (useful when debugging in gdb). - void __asan_describe_address(uptr addr) - SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_describe_address(void *addr); // This is an internal function that is called to report an error. // However it is still a part of the interface because users may want to // set a breakpoint on this function in a debugger. - void __asan_report_error(uptr pc, uptr bp, uptr sp, - uptr addr, bool is_write, uptr access_size) - SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_report_error(void *pc, void *bp, void *sp, + void *addr, bool is_write, size_t access_size); // Sets the exit code to use when reporting an error. // Returns the old value. - int __asan_set_error_exit_code(int exit_code) - SANITIZER_INTERFACE_ATTRIBUTE; + int __asan_set_error_exit_code(int exit_code); // Sets the callback to be called right before death on error. // Passing 0 will unset the callback. - void __asan_set_death_callback(void (*callback)(void)) - SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_set_death_callback(void (*callback)(void)); - void __asan_set_error_report_callback(void (*callback)(const char*)) - SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_set_error_report_callback(void (*callback)(const char*)); // User may provide function that would be called right when ASan detects // an error. This can be used to notice cases when ASan detects an error, but // the program crashes before ASan report is printed. - /* OPTIONAL */ void __asan_on_error() - SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_on_error(); // User may provide its own implementation for symbolization function. // It should print the description of instruction at address "pc" to // "out_buffer". Description should be at most "out_size" bytes long. // User-specified function should return true if symbolization was // successful. - /* OPTIONAL */ bool __asan_symbolize(const void *pc, char *out_buffer, - int out_size) - SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; + bool __asan_symbolize(const void *pc, char *out_buffer, + int out_size); // Returns the estimated number of bytes that will be reserved by allocator // for request of "size" bytes. If ASan allocator can't allocate that much // memory, returns the maximal possible allocation size, otherwise returns // "size". - uptr __asan_get_estimated_allocated_size(uptr size) - SANITIZER_INTERFACE_ATTRIBUTE; + size_t __asan_get_estimated_allocated_size(size_t size); // Returns true if p was returned by the ASan allocator and // is not yet freed. - bool __asan_get_ownership(const void *p) - SANITIZER_INTERFACE_ATTRIBUTE; + bool __asan_get_ownership(const void *p); // Returns the number of bytes reserved for the pointer p. // Requires (get_ownership(p) == true) or (p == 0). - uptr __asan_get_allocated_size(const void *p) - SANITIZER_INTERFACE_ATTRIBUTE; + size_t __asan_get_allocated_size(const void *p); // Number of bytes, allocated and not yet freed by the application. - uptr __asan_get_current_allocated_bytes() - SANITIZER_INTERFACE_ATTRIBUTE; + size_t __asan_get_current_allocated_bytes(); // Number of bytes, mmaped by asan allocator to fulfill allocation requests. // Generally, for request of X bytes, allocator can reserve and add to free // lists a large number of chunks of size X to use them for future requests. // All these chunks count toward the heap size. Currently, allocator never // releases memory to OS (instead, it just puts freed chunks to free lists). - uptr __asan_get_heap_size() - SANITIZER_INTERFACE_ATTRIBUTE; + size_t __asan_get_heap_size(); // Number of bytes, mmaped by asan allocator, which can be used to fulfill // allocation requests. When a user program frees memory chunk, it can first // fall into quarantine and will count toward __asan_get_free_bytes() later. - uptr __asan_get_free_bytes() - SANITIZER_INTERFACE_ATTRIBUTE; + size_t __asan_get_free_bytes(); // Number of bytes in unmapped pages, that are released to OS. Currently, // always returns 0. - uptr __asan_get_unmapped_bytes() - SANITIZER_INTERFACE_ATTRIBUTE; + size_t __asan_get_unmapped_bytes(); // Prints accumulated stats to stderr. Used for debugging. - void __asan_print_accumulated_stats() - SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_print_accumulated_stats(); // This function may be optionally provided by user and should return // a string containing ASan runtime options. See asan_flags.h for details. - /* OPTIONAL */ const char* __asan_default_options() - SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; + const char* __asan_default_options(); // Malloc hooks that may be optionally provided by user. // __asan_malloc_hook(ptr, size) is called immediately after // allocation of "size" bytes, which returned "ptr". // __asan_free_hook(ptr) is called immediately before // deallocation of "ptr". - /* OPTIONAL */ void __asan_malloc_hook(void *ptr, uptr size) - SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; - /* OPTIONAL */ void __asan_free_hook(void *ptr) - SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_malloc_hook(void *ptr, size_t size); + void __asan_free_hook(void *ptr); +#ifdef __cplusplus } // extern "C" +#endif #endif // SANITIZER_ASAN_INTERFACE_H diff --git a/include/sanitizer/common_interface_defs.h b/include/sanitizer/common_interface_defs.h index 9d8fa5582b67..31d0dea5484b 100644 --- a/include/sanitizer/common_interface_defs.h +++ b/include/sanitizer/common_interface_defs.h @@ -7,86 +7,52 @@ // //===----------------------------------------------------------------------===// // -// This file is shared between AddressSanitizer and ThreadSanitizer. -// It contains basic macro and types. -// NOTE: This file may be included into user code. +// Common part of the public sanitizer interface. //===----------------------------------------------------------------------===// #ifndef SANITIZER_COMMON_INTERFACE_DEFS_H #define SANITIZER_COMMON_INTERFACE_DEFS_H -// ----------- ATTENTION ------------- -// This header should NOT include any other headers to avoid portability issues. +#include <stddef.h> +#include <stdint.h> -#if defined(_WIN32) -// FIXME find out what we need on Windows. __declspec(dllexport) ? -# define SANITIZER_INTERFACE_ATTRIBUTE -# define SANITIZER_WEAK_ATTRIBUTE -#elif defined(SANITIZER_GO) -# define SANITIZER_INTERFACE_ATTRIBUTE -# define SANITIZER_WEAK_ATTRIBUTE -#else -# define SANITIZER_INTERFACE_ATTRIBUTE __attribute__((visibility("default"))) -# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak)) -#endif - -#ifdef __linux__ -# define SANITIZER_SUPPORTS_WEAK_HOOKS 1 -#else -# define SANITIZER_SUPPORTS_WEAK_HOOKS 0 -#endif - -// __has_feature +// GCC does not understand __has_feature. #if !defined(__has_feature) # define __has_feature(x) 0 #endif -// For portability reasons we do not include stddef.h, stdint.h or any other -// system header, but we do need some basic types that are not defined -// in a portable way by the language itself. -namespace __sanitizer { - -#if defined(_WIN64) -// 64-bit Windows uses LLP64 data model. -typedef unsigned long long uptr; // NOLINT -typedef signed long long sptr; // NOLINT -#else -typedef unsigned long uptr; // NOLINT -typedef signed long sptr; // NOLINT -#endif // defined(_WIN64) -#if defined(__x86_64__) -// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use -// 64-bit pointer to unwind stack frame. -typedef unsigned long long uhwptr; // NOLINT -#else -typedef uptr uhwptr; // NOLINT -#endif -typedef unsigned char u8; -typedef unsigned short u16; // NOLINT -typedef unsigned int u32; -typedef unsigned long long u64; // NOLINT -typedef signed char s8; -typedef signed short s16; // NOLINT -typedef signed int s32; -typedef signed long long s64; // NOLINT - -} // namespace __sanitizer - +#ifdef __cplusplus extern "C" { +#endif // Tell the tools to write their reports to "path.<pid>" instead of stderr. - void __sanitizer_set_report_path(const char *path) - SANITIZER_INTERFACE_ATTRIBUTE; + void __sanitizer_set_report_path(const char *path); // Tell the tools to write their reports to given file descriptor instead of // stderr. - void __sanitizer_set_report_fd(int fd) - SANITIZER_INTERFACE_ATTRIBUTE; + void __sanitizer_set_report_fd(int fd); // Notify the tools that the sandbox is going to be turned on. The reserved // parameter will be used in the future to hold a structure with functions // that the tools may call to bypass the sandbox. - void __sanitizer_sandbox_on_notify(void *reserved) - SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; + void __sanitizer_sandbox_on_notify(void *reserved); + + // This function is called by the tool when it has just finished reporting + // an error. 'error_summary' is a one-line string that summarizes + // the error message. This function can be overridden by the client. + void __sanitizer_report_error_summary(const char *error_summary); + + // Some of the sanitizers (e.g. asan/tsan) may miss bugs that happen + // in unaligned loads/stores. In order to find such bugs reliably one needs + // to replace plain unaligned loads/stores with these calls. + uint16_t __sanitizer_unaligned_load16(const void *p); + uint32_t __sanitizer_unaligned_load32(const void *p); + uint64_t __sanitizer_unaligned_load64(const void *p); + void __sanitizer_unaligned_store16(void *p, uint16_t x); + void __sanitizer_unaligned_store32(void *p, uint32_t x); + void __sanitizer_unaligned_store64(void *p, uint64_t x); + +#ifdef __cplusplus } // extern "C" +#endif #endif // SANITIZER_COMMON_INTERFACE_DEFS_H diff --git a/include/sanitizer/linux_syscall_hooks.h b/include/sanitizer/linux_syscall_hooks.h new file mode 100644 index 000000000000..894d5c2bebff --- /dev/null +++ b/include/sanitizer/linux_syscall_hooks.h @@ -0,0 +1,802 @@ +//===-- linux_syscall_hooks.h ---------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of public sanitizer interface. +// +// System call handlers. +// +// Interface methods declared in this header implement pre- and post- syscall +// actions for the active sanitizer. +// Usage: +// __sanitizer_syscall_pre_getfoo(...args...); +// int res = syscall(__NR_getfoo, ...args...); +// __sanitizer_syscall_post_getfoo(res, ...args...); +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_LINUX_SYSCALL_HOOKS_H +#define SANITIZER_LINUX_SYSCALL_HOOKS_H + +#ifdef __cplusplus +extern "C" { +#endif + +void __sanitizer_syscall_pre_rt_sigpending(void *p, size_t s); +void __sanitizer_syscall_pre_getdents(int fd, void *dirp, int count); +void __sanitizer_syscall_pre_getdents64(int fd, void *dirp, int count); +void __sanitizer_syscall_pre_recvmsg(int sockfd, void *msg, int flags); +void __sanitizer_syscall_pre_wait4(int pid, int *status, int options, void *r); +void __sanitizer_syscall_pre_waitpid(int pid, int *status, int options); + +void __sanitizer_syscall_post_rt_sigpending(long res, void *p, size_t s); +void __sanitizer_syscall_post_getdents(long res, int fd, void *dirp, int count); +void __sanitizer_syscall_post_getdents64(long res, int fd, void *dirp, + int count); +void __sanitizer_syscall_post_recvmsg(long res, int sockfd, void *msg, + int flags); +void __sanitizer_syscall_post_wait4(long res, int pid, int *status, int options, + void *r); +void __sanitizer_syscall_post_waitpid(long res, int pid, int *status, + int options); + +// And now a few syscalls we don't handle yet. + +#define __sanitizer_syscall_pre_accept(...) +#define __sanitizer_syscall_pre_accept4(...) +#define __sanitizer_syscall_pre_access(...) +#define __sanitizer_syscall_pre_acct(...) +#define __sanitizer_syscall_pre_add_key(...) +#define __sanitizer_syscall_pre_adjtimex(...) +#define __sanitizer_syscall_pre_afs_syscall(...) +#define __sanitizer_syscall_pre_alarm(...) +#define __sanitizer_syscall_pre_arch_prctl(...) +#define __sanitizer_syscall_pre_bdflush(...) +#define __sanitizer_syscall_pre_bind(...) +#define __sanitizer_syscall_pre_break(...) +#define __sanitizer_syscall_pre_brk(...) +#define __sanitizer_syscall_pre_capget(...) +#define __sanitizer_syscall_pre_capset(...) +#define __sanitizer_syscall_pre_chdir(...) +#define __sanitizer_syscall_pre_chmod(...) +#define __sanitizer_syscall_pre_chown(...) +#define __sanitizer_syscall_pre_chown32(...) +#define __sanitizer_syscall_pre_chroot(...) +#define __sanitizer_syscall_pre_clock_adjtime(...) +#define __sanitizer_syscall_pre_clock_getres(...) +#define __sanitizer_syscall_pre_clock_gettime(...) +#define __sanitizer_syscall_pre_clock_nanosleep(...) +#define __sanitizer_syscall_pre_clock_settime(...) +#define __sanitizer_syscall_pre_clone(...) +#define __sanitizer_syscall_pre_close(...) +#define __sanitizer_syscall_pre_connect(...) +#define __sanitizer_syscall_pre_creat(...) +#define __sanitizer_syscall_pre_create_module(...) +#define __sanitizer_syscall_pre_delete_module(...) +#define __sanitizer_syscall_pre_dup(...) +#define __sanitizer_syscall_pre_dup2(...) +#define __sanitizer_syscall_pre_dup3(...) +#define __sanitizer_syscall_pre_epoll_create(...) +#define __sanitizer_syscall_pre_epoll_create1(...) +#define __sanitizer_syscall_pre_epoll_ctl(...) +#define __sanitizer_syscall_pre_epoll_ctl_old(...) +#define __sanitizer_syscall_pre_epoll_pwait(...) +#define __sanitizer_syscall_pre_epoll_wait(...) +#define __sanitizer_syscall_pre_epoll_wait_old(...) +#define __sanitizer_syscall_pre_eventfd(...) +#define __sanitizer_syscall_pre_eventfd2(...) +#define __sanitizer_syscall_pre_execve(...) +#define __sanitizer_syscall_pre_exit(...) +#define __sanitizer_syscall_pre_exit_group(...) +#define __sanitizer_syscall_pre_faccessat(...) +#define __sanitizer_syscall_pre_fadvise64(...) +#define __sanitizer_syscall_pre_fadvise64_64(...) +#define __sanitizer_syscall_pre_fallocate(...) +#define __sanitizer_syscall_pre_fanotify_init(...) +#define __sanitizer_syscall_pre_fanotify_mark(...) +#define __sanitizer_syscall_pre_fchdir(...) +#define __sanitizer_syscall_pre_fchmod(...) +#define __sanitizer_syscall_pre_fchmodat(...) +#define __sanitizer_syscall_pre_fchown(...) +#define __sanitizer_syscall_pre_fchown32(...) +#define __sanitizer_syscall_pre_fchownat(...) +#define __sanitizer_syscall_pre_fcntl(...) +#define __sanitizer_syscall_pre_fcntl64(...) +#define __sanitizer_syscall_pre_fdatasync(...) +#define __sanitizer_syscall_pre_fgetxattr(...) +#define __sanitizer_syscall_pre_flistxattr(...) +#define __sanitizer_syscall_pre_flock(...) +#define __sanitizer_syscall_pre_fork(...) +#define __sanitizer_syscall_pre_fremovexattr(...) +#define __sanitizer_syscall_pre_fsetxattr(...) +#define __sanitizer_syscall_pre_fstat(...) +#define __sanitizer_syscall_pre_fstat64(...) +#define __sanitizer_syscall_pre_fstatat64(...) +#define __sanitizer_syscall_pre_fstatfs(...) +#define __sanitizer_syscall_pre_fstatfs64(...) +#define __sanitizer_syscall_pre_fsync(...) +#define __sanitizer_syscall_pre_ftime(...) +#define __sanitizer_syscall_pre_ftruncate(...) +#define __sanitizer_syscall_pre_ftruncate64(...) +#define __sanitizer_syscall_pre_futex(...) +#define __sanitizer_syscall_pre_futimesat(...) +#define __sanitizer_syscall_pre_getcpu(...) +#define __sanitizer_syscall_pre_getcwd(...) +#define __sanitizer_syscall_pre_getegid(...) +#define __sanitizer_syscall_pre_getegid32(...) +#define __sanitizer_syscall_pre_geteuid(...) +#define __sanitizer_syscall_pre_geteuid32(...) +#define __sanitizer_syscall_pre_getgid(...) +#define __sanitizer_syscall_pre_getgid32(...) +#define __sanitizer_syscall_pre_getgroups(...) +#define __sanitizer_syscall_pre_getgroups32(...) +#define __sanitizer_syscall_pre_getitimer(...) +#define __sanitizer_syscall_pre_get_kernel_syms(...) +#define __sanitizer_syscall_pre_get_mempolicy(...) +#define __sanitizer_syscall_pre_getpeername(...) +#define __sanitizer_syscall_pre_getpgid(...) +#define __sanitizer_syscall_pre_getpgrp(...) +#define __sanitizer_syscall_pre_getpid(...) +#define __sanitizer_syscall_pre_getpmsg(...) +#define __sanitizer_syscall_pre_getppid(...) +#define __sanitizer_syscall_pre_getpriority(...) +#define __sanitizer_syscall_pre_getresgid(...) +#define __sanitizer_syscall_pre_getresgid32(...) +#define __sanitizer_syscall_pre_getresuid(...) +#define __sanitizer_syscall_pre_getresuid32(...) +#define __sanitizer_syscall_pre_getrlimit(...) +#define __sanitizer_syscall_pre_get_robust_list(...) +#define __sanitizer_syscall_pre_getrusage(...) +#define __sanitizer_syscall_pre_getsid(...) +#define __sanitizer_syscall_pre_getsockname(...) +#define __sanitizer_syscall_pre_getsockopt(...) +#define __sanitizer_syscall_pre_get_thread_area(...) +#define __sanitizer_syscall_pre_gettid(...) +#define __sanitizer_syscall_pre_gettimeofday(...) +#define __sanitizer_syscall_pre_getuid(...) +#define __sanitizer_syscall_pre_getuid32(...) +#define __sanitizer_syscall_pre_getxattr(...) +#define __sanitizer_syscall_pre_gtty(...) +#define __sanitizer_syscall_pre_idle(...) +#define __sanitizer_syscall_pre_init_module(...) +#define __sanitizer_syscall_pre_inotify_add_watch(...) +#define __sanitizer_syscall_pre_inotify_init(...) +#define __sanitizer_syscall_pre_inotify_init1(...) +#define __sanitizer_syscall_pre_inotify_rm_watch(...) +#define __sanitizer_syscall_pre_io_cancel(...) +#define __sanitizer_syscall_pre_ioctl(...) +#define __sanitizer_syscall_pre_io_destroy(...) +#define __sanitizer_syscall_pre_io_getevents(...) +#define __sanitizer_syscall_pre_ioperm(...) +#define __sanitizer_syscall_pre_iopl(...) +#define __sanitizer_syscall_pre_ioprio_get(...) +#define __sanitizer_syscall_pre_ioprio_set(...) +#define __sanitizer_syscall_pre_io_setup(...) +#define __sanitizer_syscall_pre_io_submit(...) +#define __sanitizer_syscall_pre_ipc(...) +#define __sanitizer_syscall_pre_kexec_load(...) +#define __sanitizer_syscall_pre_keyctl(...) +#define __sanitizer_syscall_pre_kill(...) +#define __sanitizer_syscall_pre_lchown(...) +#define __sanitizer_syscall_pre_lchown32(...) +#define __sanitizer_syscall_pre_lgetxattr(...) +#define __sanitizer_syscall_pre_link(...) +#define __sanitizer_syscall_pre_linkat(...) +#define __sanitizer_syscall_pre_listen(...) +#define __sanitizer_syscall_pre_listxattr(...) +#define __sanitizer_syscall_pre_llistxattr(...) +#define __sanitizer_syscall_pre__llseek(...) +#define __sanitizer_syscall_pre_lock(...) +#define __sanitizer_syscall_pre_lookup_dcookie(...) +#define __sanitizer_syscall_pre_lremovexattr(...) +#define __sanitizer_syscall_pre_lseek(...) +#define __sanitizer_syscall_pre_lsetxattr(...) +#define __sanitizer_syscall_pre_lstat(...) +#define __sanitizer_syscall_pre_lstat64(...) +#define __sanitizer_syscall_pre_madvise(...) +#define __sanitizer_syscall_pre_madvise1(...) +#define __sanitizer_syscall_pre_mbind(...) +#define __sanitizer_syscall_pre_migrate_pages(...) +#define __sanitizer_syscall_pre_mincore(...) +#define __sanitizer_syscall_pre_mkdir(...) +#define __sanitizer_syscall_pre_mkdirat(...) +#define __sanitizer_syscall_pre_mknod(...) +#define __sanitizer_syscall_pre_mknodat(...) +#define __sanitizer_syscall_pre_mlock(...) +#define __sanitizer_syscall_pre_mlockall(...) +#define __sanitizer_syscall_pre_mmap(...) +#define __sanitizer_syscall_pre_mmap2(...) +#define __sanitizer_syscall_pre_modify_ldt(...) +#define __sanitizer_syscall_pre_mount(...) +#define __sanitizer_syscall_pre_move_pages(...) +#define __sanitizer_syscall_pre_mprotect(...) +#define __sanitizer_syscall_pre_mpx(...) +#define __sanitizer_syscall_pre_mq_getsetattr(...) +#define __sanitizer_syscall_pre_mq_notify(...) +#define __sanitizer_syscall_pre_mq_open(...) +#define __sanitizer_syscall_pre_mq_timedreceive(...) +#define __sanitizer_syscall_pre_mq_timedsend(...) +#define __sanitizer_syscall_pre_mq_unlink(...) +#define __sanitizer_syscall_pre_mremap(...) +#define __sanitizer_syscall_pre_msgctl(...) +#define __sanitizer_syscall_pre_msgget(...) +#define __sanitizer_syscall_pre_msgrcv(...) +#define __sanitizer_syscall_pre_msgsnd(...) +#define __sanitizer_syscall_pre_msync(...) +#define __sanitizer_syscall_pre_munlock(...) +#define __sanitizer_syscall_pre_munlockall(...) +#define __sanitizer_syscall_pre_munmap(...) +#define __sanitizer_syscall_pre_name_to_handle_at(...) +#define __sanitizer_syscall_pre_nanosleep(...) +#define __sanitizer_syscall_pre_newfstatat(...) +#define __sanitizer_syscall_pre__newselect(...) +#define __sanitizer_syscall_pre_nfsservctl(...) +#define __sanitizer_syscall_pre_nice(...) +#define __sanitizer_syscall_pre_oldfstat(...) +#define __sanitizer_syscall_pre_oldlstat(...) +#define __sanitizer_syscall_pre_oldolduname(...) +#define __sanitizer_syscall_pre_oldstat(...) +#define __sanitizer_syscall_pre_olduname(...) +#define __sanitizer_syscall_pre_open(...) +#define __sanitizer_syscall_pre_openat(...) +#define __sanitizer_syscall_pre_open_by_handle_at(...) +#define __sanitizer_syscall_pre_pause(...) +#define __sanitizer_syscall_pre_perf_event_open(...) +#define __sanitizer_syscall_pre_personality(...) +#define __sanitizer_syscall_pre_pipe(...) +#define __sanitizer_syscall_pre_pipe2(...) +#define __sanitizer_syscall_pre_pivot_root(...) +#define __sanitizer_syscall_pre_poll(...) +#define __sanitizer_syscall_pre_ppoll(...) +#define __sanitizer_syscall_pre_prctl(...) +#define __sanitizer_syscall_pre_pread64(...) +#define __sanitizer_syscall_pre_preadv(...) +#define __sanitizer_syscall_pre_prlimit64(...) +#define __sanitizer_syscall_pre_process_vm_readv(...) +#define __sanitizer_syscall_pre_process_vm_writev(...) +#define __sanitizer_syscall_pre_prof(...) +#define __sanitizer_syscall_pre_profil(...) +#define __sanitizer_syscall_pre_pselect6(...) +#define __sanitizer_syscall_pre_ptrace(...) +#define __sanitizer_syscall_pre_putpmsg(...) +#define __sanitizer_syscall_pre_pwrite64(...) +#define __sanitizer_syscall_pre_pwritev(...) +#define __sanitizer_syscall_pre_query_module(...) +#define __sanitizer_syscall_pre_quotactl(...) +#define __sanitizer_syscall_pre_read(...) +#define __sanitizer_syscall_pre_readahead(...) +#define __sanitizer_syscall_pre_readdir(...) +#define __sanitizer_syscall_pre_readlink(...) +#define __sanitizer_syscall_pre_readlinkat(...) +#define __sanitizer_syscall_pre_readv(...) +#define __sanitizer_syscall_pre_reboot(...) +#define __sanitizer_syscall_pre_recvfrom(...) +#define __sanitizer_syscall_pre_recvmmsg(...) +#define __sanitizer_syscall_pre_remap_file_pages(...) +#define __sanitizer_syscall_pre_removexattr(...) +#define __sanitizer_syscall_pre_rename(...) +#define __sanitizer_syscall_pre_renameat(...) +#define __sanitizer_syscall_pre_request_key(...) +#define __sanitizer_syscall_pre_restart_syscall(...) +#define __sanitizer_syscall_pre_rmdir(...) +#define __sanitizer_syscall_pre_rt_sigaction(...) +#define __sanitizer_syscall_pre_rt_sigprocmask(...) +#define __sanitizer_syscall_pre_rt_sigqueueinfo(...) +#define __sanitizer_syscall_pre_rt_sigreturn(...) +#define __sanitizer_syscall_pre_rt_sigsuspend(...) +#define __sanitizer_syscall_pre_rt_sigtimedwait(...) +#define __sanitizer_syscall_pre_rt_tgsigqueueinfo(...) +#define __sanitizer_syscall_pre_sched_getaffinity(...) +#define __sanitizer_syscall_pre_sched_getparam(...) +#define __sanitizer_syscall_pre_sched_get_priority_max(...) +#define __sanitizer_syscall_pre_sched_get_priority_min(...) +#define __sanitizer_syscall_pre_sched_getscheduler(...) +#define __sanitizer_syscall_pre_sched_rr_get_interval(...) +#define __sanitizer_syscall_pre_sched_setaffinity(...) +#define __sanitizer_syscall_pre_sched_setparam(...) +#define __sanitizer_syscall_pre_sched_setscheduler(...) +#define __sanitizer_syscall_pre_sched_yield(...) +#define __sanitizer_syscall_pre_security(...) +#define __sanitizer_syscall_pre_select(...) +#define __sanitizer_syscall_pre_semctl(...) +#define __sanitizer_syscall_pre_semget(...) +#define __sanitizer_syscall_pre_semop(...) +#define __sanitizer_syscall_pre_semtimedop(...) +#define __sanitizer_syscall_pre_sendfile(...) +#define __sanitizer_syscall_pre_sendfile64(...) +#define __sanitizer_syscall_pre_sendmmsg(...) +#define __sanitizer_syscall_pre_sendmsg(...) +#define __sanitizer_syscall_pre_sendto(...) +#define __sanitizer_syscall_pre_setdomainname(...) +#define __sanitizer_syscall_pre_setfsgid(...) +#define __sanitizer_syscall_pre_setfsgid32(...) +#define __sanitizer_syscall_pre_setfsuid(...) +#define __sanitizer_syscall_pre_setfsuid32(...) +#define __sanitizer_syscall_pre_setgid(...) +#define __sanitizer_syscall_pre_setgid32(...) +#define __sanitizer_syscall_pre_setgroups(...) +#define __sanitizer_syscall_pre_setgroups32(...) +#define __sanitizer_syscall_pre_sethostname(...) +#define __sanitizer_syscall_pre_setitimer(...) +#define __sanitizer_syscall_pre_set_mempolicy(...) +#define __sanitizer_syscall_pre_setns(...) +#define __sanitizer_syscall_pre_setpgid(...) +#define __sanitizer_syscall_pre_setpriority(...) +#define __sanitizer_syscall_pre_setregid(...) +#define __sanitizer_syscall_pre_setregid32(...) +#define __sanitizer_syscall_pre_setresgid(...) +#define __sanitizer_syscall_pre_setresgid32(...) +#define __sanitizer_syscall_pre_setresuid(...) +#define __sanitizer_syscall_pre_setresuid32(...) +#define __sanitizer_syscall_pre_setreuid(...) +#define __sanitizer_syscall_pre_setreuid32(...) +#define __sanitizer_syscall_pre_setrlimit(...) +#define __sanitizer_syscall_pre_set_robust_list(...) +#define __sanitizer_syscall_pre_setsid(...) +#define __sanitizer_syscall_pre_setsockopt(...) +#define __sanitizer_syscall_pre_set_thread_area(...) +#define __sanitizer_syscall_pre_set_tid_address(...) +#define __sanitizer_syscall_pre_settimeofday(...) +#define __sanitizer_syscall_pre_setuid(...) +#define __sanitizer_syscall_pre_setuid32(...) +#define __sanitizer_syscall_pre_setxattr(...) +#define __sanitizer_syscall_pre_sgetmask(...) +#define __sanitizer_syscall_pre_shmat(...) +#define __sanitizer_syscall_pre_shmctl(...) +#define __sanitizer_syscall_pre_shmdt(...) +#define __sanitizer_syscall_pre_shmget(...) +#define __sanitizer_syscall_pre_shutdown(...) +#define __sanitizer_syscall_pre_sigaction(...) +#define __sanitizer_syscall_pre_sigaltstack(...) +#define __sanitizer_syscall_pre_signal(...) +#define __sanitizer_syscall_pre_signalfd(...) +#define __sanitizer_syscall_pre_signalfd4(...) +#define __sanitizer_syscall_pre_sigpending(...) +#define __sanitizer_syscall_pre_sigprocmask(...) +#define __sanitizer_syscall_pre_sigreturn(...) +#define __sanitizer_syscall_pre_sigsuspend(...) +#define __sanitizer_syscall_pre_socket(...) +#define __sanitizer_syscall_pre_socketcall(...) +#define __sanitizer_syscall_pre_socketpair(...) +#define __sanitizer_syscall_pre_splice(...) +#define __sanitizer_syscall_pre_ssetmask(...) +#define __sanitizer_syscall_pre_stat(...) +#define __sanitizer_syscall_pre_stat64(...) +#define __sanitizer_syscall_pre_statfs(...) +#define __sanitizer_syscall_pre_statfs64(...) +#define __sanitizer_syscall_pre_stime(...) +#define __sanitizer_syscall_pre_stty(...) +#define __sanitizer_syscall_pre_swapoff(...) +#define __sanitizer_syscall_pre_swapon(...) +#define __sanitizer_syscall_pre_symlink(...) +#define __sanitizer_syscall_pre_symlinkat(...) +#define __sanitizer_syscall_pre_sync(...) +#define __sanitizer_syscall_pre_sync_file_range(...) +#define __sanitizer_syscall_pre_syncfs(...) +#define __sanitizer_syscall_pre__sysctl(...) +#define __sanitizer_syscall_pre_sysfs(...) +#define __sanitizer_syscall_pre_sysinfo(...) +#define __sanitizer_syscall_pre_syslog(...) +#define __sanitizer_syscall_pre_tee(...) +#define __sanitizer_syscall_pre_tgkill(...) +#define __sanitizer_syscall_pre_time(...) +#define __sanitizer_syscall_pre_timer_create(...) +#define __sanitizer_syscall_pre_timer_delete(...) +#define __sanitizer_syscall_pre_timerfd_create(...) +#define __sanitizer_syscall_pre_timerfd_gettime(...) +#define __sanitizer_syscall_pre_timerfd_settime(...) +#define __sanitizer_syscall_pre_timer_getoverrun(...) +#define __sanitizer_syscall_pre_timer_gettime(...) +#define __sanitizer_syscall_pre_timer_settime(...) +#define __sanitizer_syscall_pre_times(...) +#define __sanitizer_syscall_pre_tkill(...) +#define __sanitizer_syscall_pre_truncate(...) +#define __sanitizer_syscall_pre_truncate64(...) +#define __sanitizer_syscall_pre_tuxcall(...) +#define __sanitizer_syscall_pre_ugetrlimit(...) +#define __sanitizer_syscall_pre_ulimit(...) +#define __sanitizer_syscall_pre_umask(...) +#define __sanitizer_syscall_pre_umount(...) +#define __sanitizer_syscall_pre_umount2(...) +#define __sanitizer_syscall_pre_uname(...) +#define __sanitizer_syscall_pre_unlink(...) +#define __sanitizer_syscall_pre_unlinkat(...) +#define __sanitizer_syscall_pre_unshare(...) +#define __sanitizer_syscall_pre_uselib(...) +#define __sanitizer_syscall_pre_ustat(...) +#define __sanitizer_syscall_pre_utime(...) +#define __sanitizer_syscall_pre_utimensat(...) +#define __sanitizer_syscall_pre_utimes(...) +#define __sanitizer_syscall_pre_vfork(...) +#define __sanitizer_syscall_pre_vhangup(...) +#define __sanitizer_syscall_pre_vm86(...) +#define __sanitizer_syscall_pre_vm86old(...) +#define __sanitizer_syscall_pre_vmsplice(...) +#define __sanitizer_syscall_pre_vserver(...) +#define __sanitizer_syscall_pre_waitid(...) +#define __sanitizer_syscall_pre_write(...) +#define __sanitizer_syscall_pre_writev(...) + +#define __sanitizer_syscall_post_accept4(res, ...) +#define __sanitizer_syscall_post_accept(res, ...) +#define __sanitizer_syscall_post_access(res, ...) +#define __sanitizer_syscall_post_acct(res, ...) +#define __sanitizer_syscall_post_add_key(res, ...) +#define __sanitizer_syscall_post_adjtimex(res, ...) +#define __sanitizer_syscall_post_afs_syscall(res, ...) +#define __sanitizer_syscall_post_alarm(res, ...) +#define __sanitizer_syscall_post_arch_prctl(res, ...) +#define __sanitizer_syscall_post_bdflush(res, ...) +#define __sanitizer_syscall_post_bind(res, ...) +#define __sanitizer_syscall_post_break(res, ...) +#define __sanitizer_syscall_post_brk(res, ...) +#define __sanitizer_syscall_post_capget(res, ...) +#define __sanitizer_syscall_post_capset(res, ...) +#define __sanitizer_syscall_post_chdir(res, ...) +#define __sanitizer_syscall_post_chmod(res, ...) +#define __sanitizer_syscall_post_chown32(res, ...) +#define __sanitizer_syscall_post_chown(res, ...) +#define __sanitizer_syscall_post_chroot(res, ...) +#define __sanitizer_syscall_post_clock_adjtime(res, ...) +#define __sanitizer_syscall_post_clock_getres(res, ...) +#define __sanitizer_syscall_post_clock_gettime(res, ...) +#define __sanitizer_syscall_post_clock_nanosleep(res, ...) +#define __sanitizer_syscall_post_clock_settime(res, ...) +#define __sanitizer_syscall_post_clone(res, ...) +#define __sanitizer_syscall_post_close(res, ...) +#define __sanitizer_syscall_post_connect(res, ...) +#define __sanitizer_syscall_post_create_module(res, ...) +#define __sanitizer_syscall_post_creat(res, ...) +#define __sanitizer_syscall_post_delete_module(res, ...) +#define __sanitizer_syscall_post_dup2(res, ...) +#define __sanitizer_syscall_post_dup3(res, ...) +#define __sanitizer_syscall_post_dup(res, ...) +#define __sanitizer_syscall_post_epoll_create1(res, ...) +#define __sanitizer_syscall_post_epoll_create(res, ...) +#define __sanitizer_syscall_post_epoll_ctl_old(res, ...) +#define __sanitizer_syscall_post_epoll_ctl(res, ...) +#define __sanitizer_syscall_post_epoll_pwait(res, ...) +#define __sanitizer_syscall_post_epoll_wait_old(res, ...) +#define __sanitizer_syscall_post_epoll_wait(res, ...) +#define __sanitizer_syscall_post_eventfd2(res, ...) +#define __sanitizer_syscall_post_eventfd(res, ...) +#define __sanitizer_syscall_post_execve(res, ...) +#define __sanitizer_syscall_post_exit_group(res, ...) +#define __sanitizer_syscall_post_exit(res, ...) +#define __sanitizer_syscall_post_faccessat(res, ...) +#define __sanitizer_syscall_post_fadvise64_64(res, ...) +#define __sanitizer_syscall_post_fadvise64(res, ...) +#define __sanitizer_syscall_post_fallocate(res, ...) +#define __sanitizer_syscall_post_fanotify_init(res, ...) +#define __sanitizer_syscall_post_fanotify_mark(res, ...) +#define __sanitizer_syscall_post_fchdir(res, ...) +#define __sanitizer_syscall_post_fchmodat(res, ...) +#define __sanitizer_syscall_post_fchmod(res, ...) +#define __sanitizer_syscall_post_fchown32(res, ...) +#define __sanitizer_syscall_post_fchownat(res, ...) +#define __sanitizer_syscall_post_fchown(res, ...) +#define __sanitizer_syscall_post_fcntl64(res, ...) +#define __sanitizer_syscall_post_fcntl(res, ...) +#define __sanitizer_syscall_post_fdatasync(res, ...) +#define __sanitizer_syscall_post_fgetxattr(res, ...) +#define __sanitizer_syscall_post_flistxattr(res, ...) +#define __sanitizer_syscall_post_flock(res, ...) +#define __sanitizer_syscall_post_fork(res, ...) +#define __sanitizer_syscall_post_fremovexattr(res, ...) +#define __sanitizer_syscall_post_fsetxattr(res, ...) +#define __sanitizer_syscall_post_fstat64(res, ...) +#define __sanitizer_syscall_post_fstatat64(res, ...) +#define __sanitizer_syscall_post_fstatfs64(res, ...) +#define __sanitizer_syscall_post_fstatfs(res, ...) +#define __sanitizer_syscall_post_fstat(res, ...) +#define __sanitizer_syscall_post_fsync(res, ...) +#define __sanitizer_syscall_post_ftime(res, ...) +#define __sanitizer_syscall_post_ftruncate64(res, ...) +#define __sanitizer_syscall_post_ftruncate(res, ...) +#define __sanitizer_syscall_post_futex(res, ...) +#define __sanitizer_syscall_post_futimesat(res, ...) +#define __sanitizer_syscall_post_getcpu(res, ...) +#define __sanitizer_syscall_post_getcwd(res, ...) +#define __sanitizer_syscall_post_getegid32(res, ...) +#define __sanitizer_syscall_post_getegid(res, ...) +#define __sanitizer_syscall_post_geteuid32(res, ...) +#define __sanitizer_syscall_post_geteuid(res, ...) +#define __sanitizer_syscall_post_getgid32(res, ...) +#define __sanitizer_syscall_post_getgid(res, ...) +#define __sanitizer_syscall_post_getgroups32(res, ...) +#define __sanitizer_syscall_post_getgroups(res, ...) +#define __sanitizer_syscall_post_getitimer(res, ...) +#define __sanitizer_syscall_post_get_kernel_syms(res, ...) +#define __sanitizer_syscall_post_get_mempolicy(res, ...) +#define __sanitizer_syscall_post_getpeername(res, ...) +#define __sanitizer_syscall_post_getpgid(res, ...) +#define __sanitizer_syscall_post_getpgrp(res, ...) +#define __sanitizer_syscall_post_getpid(res, ...) +#define __sanitizer_syscall_post_getpmsg(res, ...) +#define __sanitizer_syscall_post_getppid(res, ...) +#define __sanitizer_syscall_post_getpriority(res, ...) +#define __sanitizer_syscall_post_getresgid32(res, ...) +#define __sanitizer_syscall_post_getresgid(res, ...) +#define __sanitizer_syscall_post_getresuid32(res, ...) +#define __sanitizer_syscall_post_getresuid(res, ...) +#define __sanitizer_syscall_post_getrlimit(res, ...) +#define __sanitizer_syscall_post_get_robust_list(res, ...) +#define __sanitizer_syscall_post_getrusage(res, ...) +#define __sanitizer_syscall_post_getsid(res, ...) +#define __sanitizer_syscall_post_getsockname(res, ...) +#define __sanitizer_syscall_post_getsockopt(res, ...) +#define __sanitizer_syscall_post_get_thread_area(res, ...) +#define __sanitizer_syscall_post_gettid(res, ...) +#define __sanitizer_syscall_post_gettimeofday(res, ...) +#define __sanitizer_syscall_post_getuid32(res, ...) +#define __sanitizer_syscall_post_getuid(res, ...) +#define __sanitizer_syscall_post_getxattr(res, ...) +#define __sanitizer_syscall_post_gtty(res, ...) +#define __sanitizer_syscall_post_idle(res, ...) +#define __sanitizer_syscall_post_init_module(res, ...) +#define __sanitizer_syscall_post_inotify_add_watch(res, ...) +#define __sanitizer_syscall_post_inotify_init1(res, ...) +#define __sanitizer_syscall_post_inotify_init(res, ...) +#define __sanitizer_syscall_post_inotify_rm_watch(res, ...) +#define __sanitizer_syscall_post_io_cancel(res, ...) +#define __sanitizer_syscall_post_ioctl(res, ...) +#define __sanitizer_syscall_post_io_destroy(res, ...) +#define __sanitizer_syscall_post_io_getevents(res, ...) +#define __sanitizer_syscall_post_ioperm(res, ...) +#define __sanitizer_syscall_post_iopl(res, ...) +#define __sanitizer_syscall_post_ioprio_get(res, ...) +#define __sanitizer_syscall_post_ioprio_set(res, ...) +#define __sanitizer_syscall_post_io_setup(res, ...) +#define __sanitizer_syscall_post_io_submit(res, ...) +#define __sanitizer_syscall_post_ipc(res, ...) +#define __sanitizer_syscall_post_kexec_load(res, ...) +#define __sanitizer_syscall_post_keyctl(res, ...) +#define __sanitizer_syscall_post_kill(res, ...) +#define __sanitizer_syscall_post_lchown32(res, ...) +#define __sanitizer_syscall_post_lchown(res, ...) +#define __sanitizer_syscall_post_lgetxattr(res, ...) +#define __sanitizer_syscall_post_linkat(res, ...) +#define __sanitizer_syscall_post_link(res, ...) +#define __sanitizer_syscall_post_listen(res, ...) +#define __sanitizer_syscall_post_listxattr(res, ...) +#define __sanitizer_syscall_post_llistxattr(res, ...) +#define __sanitizer_syscall_post__llseek(res, ...) +#define __sanitizer_syscall_post_lock(res, ...) +#define __sanitizer_syscall_post_lookup_dcookie(res, ...) +#define __sanitizer_syscall_post_lremovexattr(res, ...) +#define __sanitizer_syscall_post_lseek(res, ...) +#define __sanitizer_syscall_post_lsetxattr(res, ...) +#define __sanitizer_syscall_post_lstat64(res, ...) +#define __sanitizer_syscall_post_lstat(res, ...) +#define __sanitizer_syscall_post_madvise1(res, ...) +#define __sanitizer_syscall_post_madvise(res, ...) +#define __sanitizer_syscall_post_mbind(res, ...) +#define __sanitizer_syscall_post_migrate_pages(res, ...) +#define __sanitizer_syscall_post_mincore(res, ...) +#define __sanitizer_syscall_post_mkdirat(res, ...) +#define __sanitizer_syscall_post_mkdir(res, ...) +#define __sanitizer_syscall_post_mknodat(res, ...) +#define __sanitizer_syscall_post_mknod(res, ...) +#define __sanitizer_syscall_post_mlockall(res, ...) +#define __sanitizer_syscall_post_mlock(res, ...) +#define __sanitizer_syscall_post_mmap2(res, ...) +#define __sanitizer_syscall_post_mmap(res, ...) +#define __sanitizer_syscall_post_modify_ldt(res, ...) +#define __sanitizer_syscall_post_mount(res, ...) +#define __sanitizer_syscall_post_move_pages(res, ...) +#define __sanitizer_syscall_post_mprotect(res, ...) +#define __sanitizer_syscall_post_mpx(res, ...) +#define __sanitizer_syscall_post_mq_getsetattr(res, ...) +#define __sanitizer_syscall_post_mq_notify(res, ...) +#define __sanitizer_syscall_post_mq_open(res, ...) +#define __sanitizer_syscall_post_mq_timedreceive(res, ...) +#define __sanitizer_syscall_post_mq_timedsend(res, ...) +#define __sanitizer_syscall_post_mq_unlink(res, ...) +#define __sanitizer_syscall_post_mremap(res, ...) +#define __sanitizer_syscall_post_msgctl(res, ...) +#define __sanitizer_syscall_post_msgget(res, ...) +#define __sanitizer_syscall_post_msgrcv(res, ...) +#define __sanitizer_syscall_post_msgsnd(res, ...) +#define __sanitizer_syscall_post_msync(res, ...) +#define __sanitizer_syscall_post_munlockall(res, ...) +#define __sanitizer_syscall_post_munlock(res, ...) +#define __sanitizer_syscall_post_munmap(res, ...) +#define __sanitizer_syscall_post_name_to_handle_at(res, ...) +#define __sanitizer_syscall_post_nanosleep(res, ...) +#define __sanitizer_syscall_post_newfstatat(res, ...) +#define __sanitizer_syscall_post__newselect(res, ...) +#define __sanitizer_syscall_post_nfsservctl(res, ...) +#define __sanitizer_syscall_post_nice(res, ...) +#define __sanitizer_syscall_post_oldfstat(res, ...) +#define __sanitizer_syscall_post_oldlstat(res, ...) +#define __sanitizer_syscall_post_oldolduname(res, ...) +#define __sanitizer_syscall_post_oldstat(res, ...) +#define __sanitizer_syscall_post_olduname(res, ...) +#define __sanitizer_syscall_post_openat(res, ...) +#define __sanitizer_syscall_post_open_by_handle_at(res, ...) +#define __sanitizer_syscall_post_open(res, ...) +#define __sanitizer_syscall_post_pause(res, ...) +#define __sanitizer_syscall_post_perf_event_open(res, ...) +#define __sanitizer_syscall_post_personality(res, ...) +#define __sanitizer_syscall_post_pipe2(res, ...) +#define __sanitizer_syscall_post_pipe(res, ...) +#define __sanitizer_syscall_post_pivot_root(res, ...) +#define __sanitizer_syscall_post_poll(res, ...) +#define __sanitizer_syscall_post_ppoll(res, ...) +#define __sanitizer_syscall_post_prctl(res, ...) +#define __sanitizer_syscall_post_pread64(res, ...) +#define __sanitizer_syscall_post_preadv(res, ...) +#define __sanitizer_syscall_post_prlimit64(res, ...) +#define __sanitizer_syscall_post_process_vm_readv(res, ...) +#define __sanitizer_syscall_post_process_vm_writev(res, ...) +#define __sanitizer_syscall_post_profil(res, ...) +#define __sanitizer_syscall_post_prof(res, ...) +#define __sanitizer_syscall_post_pselect6(res, ...) +#define __sanitizer_syscall_post_ptrace(res, ...) +#define __sanitizer_syscall_post_putpmsg(res, ...) +#define __sanitizer_syscall_post_pwrite64(res, ...) +#define __sanitizer_syscall_post_pwritev(res, ...) +#define __sanitizer_syscall_post_query_module(res, ...) +#define __sanitizer_syscall_post_quotactl(res, ...) +#define __sanitizer_syscall_post_readahead(res, ...) +#define __sanitizer_syscall_post_readdir(res, ...) +#define __sanitizer_syscall_post_readlinkat(res, ...) +#define __sanitizer_syscall_post_readlink(res, ...) +#define __sanitizer_syscall_post_read(res, ...) +#define __sanitizer_syscall_post_readv(res, ...) +#define __sanitizer_syscall_post_reboot(res, ...) +#define __sanitizer_syscall_post_recvfrom(res, ...) +#define __sanitizer_syscall_post_recvmmsg(res, ...) +#define __sanitizer_syscall_post_remap_file_pages(res, ...) +#define __sanitizer_syscall_post_removexattr(res, ...) +#define __sanitizer_syscall_post_renameat(res, ...) +#define __sanitizer_syscall_post_rename(res, ...) +#define __sanitizer_syscall_post_request_key(res, ...) +#define __sanitizer_syscall_post_restart_syscall(res, ...) +#define __sanitizer_syscall_post_rmdir(res, ...) +#define __sanitizer_syscall_post_rt_sigaction(res, ...) +#define __sanitizer_syscall_post_rt_sigprocmask(res, ...) +#define __sanitizer_syscall_post_rt_sigqueueinfo(res, ...) +#define __sanitizer_syscall_post_rt_sigreturn(res, ...) +#define __sanitizer_syscall_post_rt_sigsuspend(res, ...) +#define __sanitizer_syscall_post_rt_sigtimedwait(res, ...) +#define __sanitizer_syscall_post_rt_tgsigqueueinfo(res, ...) +#define __sanitizer_syscall_post_sched_getaffinity(res, ...) +#define __sanitizer_syscall_post_sched_getparam(res, ...) +#define __sanitizer_syscall_post_sched_get_priority_max(res, ...) +#define __sanitizer_syscall_post_sched_get_priority_min(res, ...) +#define __sanitizer_syscall_post_sched_getscheduler(res, ...) +#define __sanitizer_syscall_post_sched_rr_get_interval(res, ...) +#define __sanitizer_syscall_post_sched_setaffinity(res, ...) +#define __sanitizer_syscall_post_sched_setparam(res, ...) +#define __sanitizer_syscall_post_sched_setscheduler(res, ...) +#define __sanitizer_syscall_post_sched_yield(res, ...) +#define __sanitizer_syscall_post_security(res, ...) +#define __sanitizer_syscall_post_select(res, ...) +#define __sanitizer_syscall_post_semctl(res, ...) +#define __sanitizer_syscall_post_semget(res, ...) +#define __sanitizer_syscall_post_semop(res, ...) +#define __sanitizer_syscall_post_semtimedop(res, ...) +#define __sanitizer_syscall_post_sendfile64(res, ...) +#define __sanitizer_syscall_post_sendfile(res, ...) +#define __sanitizer_syscall_post_sendmmsg(res, ...) +#define __sanitizer_syscall_post_sendmsg(res, ...) +#define __sanitizer_syscall_post_sendto(res, ...) +#define __sanitizer_syscall_post_setdomainname(res, ...) +#define __sanitizer_syscall_post_setfsgid32(res, ...) +#define __sanitizer_syscall_post_setfsgid(res, ...) +#define __sanitizer_syscall_post_setfsuid32(res, ...) +#define __sanitizer_syscall_post_setfsuid(res, ...) +#define __sanitizer_syscall_post_setgid32(res, ...) +#define __sanitizer_syscall_post_setgid(res, ...) +#define __sanitizer_syscall_post_setgroups32(res, ...) +#define __sanitizer_syscall_post_setgroups(res, ...) +#define __sanitizer_syscall_post_sethostname(res, ...) +#define __sanitizer_syscall_post_setitimer(res, ...) +#define __sanitizer_syscall_post_set_mempolicy(res, ...) +#define __sanitizer_syscall_post_setns(res, ...) +#define __sanitizer_syscall_post_setpgid(res, ...) +#define __sanitizer_syscall_post_setpriority(res, ...) +#define __sanitizer_syscall_post_setregid32(res, ...) +#define __sanitizer_syscall_post_setregid(res, ...) +#define __sanitizer_syscall_post_setresgid32(res, ...) +#define __sanitizer_syscall_post_setresgid(res, ...) +#define __sanitizer_syscall_post_setresuid32(res, ...) +#define __sanitizer_syscall_post_setresuid(res, ...) +#define __sanitizer_syscall_post_setreuid32(res, ...) +#define __sanitizer_syscall_post_setreuid(res, ...) +#define __sanitizer_syscall_post_setrlimit(res, ...) +#define __sanitizer_syscall_post_set_robust_list(res, ...) +#define __sanitizer_syscall_post_setsid(res, ...) +#define __sanitizer_syscall_post_setsockopt(res, ...) +#define __sanitizer_syscall_post_set_thread_area(res, ...) +#define __sanitizer_syscall_post_set_tid_address(res, ...) +#define __sanitizer_syscall_post_settimeofday(res, ...) +#define __sanitizer_syscall_post_setuid32(res, ...) +#define __sanitizer_syscall_post_setuid(res, ...) +#define __sanitizer_syscall_post_setxattr(res, ...) +#define __sanitizer_syscall_post_sgetmask(res, ...) +#define __sanitizer_syscall_post_shmat(res, ...) +#define __sanitizer_syscall_post_shmctl(res, ...) +#define __sanitizer_syscall_post_shmdt(res, ...) +#define __sanitizer_syscall_post_shmget(res, ...) +#define __sanitizer_syscall_post_shutdown(res, ...) +#define __sanitizer_syscall_post_sigaction(res, ...) +#define __sanitizer_syscall_post_sigaltstack(res, ...) +#define __sanitizer_syscall_post_signalfd4(res, ...) +#define __sanitizer_syscall_post_signalfd(res, ...) +#define __sanitizer_syscall_post_signal(res, ...) +#define __sanitizer_syscall_post_sigpending(res, ...) +#define __sanitizer_syscall_post_sigprocmask(res, ...) +#define __sanitizer_syscall_post_sigreturn(res, ...) +#define __sanitizer_syscall_post_sigsuspend(res, ...) +#define __sanitizer_syscall_post_socketcall(res, ...) +#define __sanitizer_syscall_post_socketpair(res, ...) +#define __sanitizer_syscall_post_socket(res, ...) +#define __sanitizer_syscall_post_splice(res, ...) +#define __sanitizer_syscall_post_ssetmask(res, ...) +#define __sanitizer_syscall_post_stat64(res, ...) +#define __sanitizer_syscall_post_statfs64(res, ...) +#define __sanitizer_syscall_post_statfs(res, ...) +#define __sanitizer_syscall_post_stat(res, ...) +#define __sanitizer_syscall_post_stime(res, ...) +#define __sanitizer_syscall_post_stty(res, ...) +#define __sanitizer_syscall_post_swapoff(res, ...) +#define __sanitizer_syscall_post_swapon(res, ...) +#define __sanitizer_syscall_post_symlinkat(res, ...) +#define __sanitizer_syscall_post_symlink(res, ...) +#define __sanitizer_syscall_post_sync_file_range(res, ...) +#define __sanitizer_syscall_post_syncfs(res, ...) +#define __sanitizer_syscall_post_sync(res, ...) +#define __sanitizer_syscall_post__sysctl(res, ...) +#define __sanitizer_syscall_post_sysfs(res, ...) +#define __sanitizer_syscall_post_sysinfo(res, ...) +#define __sanitizer_syscall_post_syslog(res, ...) +#define __sanitizer_syscall_post_tee(res, ...) +#define __sanitizer_syscall_post_tgkill(res, ...) +#define __sanitizer_syscall_post_timer_create(res, ...) +#define __sanitizer_syscall_post_timer_delete(res, ...) +#define __sanitizer_syscall_post_time(res, ...) +#define __sanitizer_syscall_post_timerfd_create(res, ...) +#define __sanitizer_syscall_post_timerfd_gettime(res, ...) +#define __sanitizer_syscall_post_timerfd_settime(res, ...) +#define __sanitizer_syscall_post_timer_getoverrun(res, ...) +#define __sanitizer_syscall_post_timer_gettime(res, ...) +#define __sanitizer_syscall_post_timer_settime(res, ...) +#define __sanitizer_syscall_post_times(res, ...) +#define __sanitizer_syscall_post_tkill(res, ...) +#define __sanitizer_syscall_post_truncate64(res, ...) +#define __sanitizer_syscall_post_truncate(res, ...) +#define __sanitizer_syscall_post_tuxcall(res, ...) +#define __sanitizer_syscall_post_ugetrlimit(res, ...) +#define __sanitizer_syscall_post_ulimit(res, ...) +#define __sanitizer_syscall_post_umask(res, ...) +#define __sanitizer_syscall_post_umount2(res, ...) +#define __sanitizer_syscall_post_umount(res, ...) +#define __sanitizer_syscall_post_uname(res, ...) +#define __sanitizer_syscall_post_unlinkat(res, ...) +#define __sanitizer_syscall_post_unlink(res, ...) +#define __sanitizer_syscall_post_unshare(res, ...) +#define __sanitizer_syscall_post_uselib(res, ...) +#define __sanitizer_syscall_post_ustat(res, ...) +#define __sanitizer_syscall_post_utimensat(res, ...) +#define __sanitizer_syscall_post_utime(res, ...) +#define __sanitizer_syscall_post_utimes(res, ...) +#define __sanitizer_syscall_post_vfork(res, ...) +#define __sanitizer_syscall_post_vhangup(res, ...) +#define __sanitizer_syscall_post_vm86old(res, ...) +#define __sanitizer_syscall_post_vm86(res, ...) +#define __sanitizer_syscall_post_vmsplice(res, ...) +#define __sanitizer_syscall_post_vserver(res, ...) +#define __sanitizer_syscall_post_waitid(res, ...) +#define __sanitizer_syscall_post_write(res, ...) +#define __sanitizer_syscall_post_writev(res, ...) + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // SANITIZER_LINUX_SYSCALL_HOOKS_H diff --git a/include/sanitizer/msan_interface.h b/include/sanitizer/msan_interface.h index 1a76dd60599f..9eff7b597b69 100644 --- a/include/sanitizer/msan_interface.h +++ b/include/sanitizer/msan_interface.h @@ -16,106 +16,87 @@ #include <sanitizer/common_interface_defs.h> -using __sanitizer::uptr; -using __sanitizer::sptr; -using __sanitizer::u32; - #ifdef __cplusplus extern "C" { #endif -// FIXME: document all interface functions. - -SANITIZER_INTERFACE_ATTRIBUTE -int __msan_get_track_origins(); - -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_init(); - -// Print a warning and maybe return. -// This function can die based on flags()->exit_code. -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_warning(); - -// Print a warning and die. -// Intrumentation inserts calls to this function when building in "fast" mode -// (i.e. -mllvm -msan-keep-going) -SANITIZER_INTERFACE_ATTRIBUTE __attribute__((noreturn)) -void __msan_warning_noreturn(); - -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_unpoison(void *a, uptr size); -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_clear_and_unpoison(void *a, uptr size); -SANITIZER_INTERFACE_ATTRIBUTE -void* __msan_memcpy(void *dst, const void *src, uptr size); -SANITIZER_INTERFACE_ATTRIBUTE -void* __msan_memset(void *s, int c, uptr n); -SANITIZER_INTERFACE_ATTRIBUTE -void* __msan_memmove(void* dest, const void* src, uptr n); -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_copy_poison(void *dst, const void *src, uptr size); -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_copy_origin(void *dst, const void *src, uptr size); -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_move_poison(void *dst, const void *src, uptr size); -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_poison(void *a, uptr size); -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_poison_stack(void *a, uptr size); - -// Copy size bytes from src to dst and unpoison the result. -// Useful to implement unsafe loads. -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_load_unpoisoned(void *src, uptr size, void *dst); - -// Returns the offset of the first (at least partially) poisoned byte, -// or -1 if the whole range is good. -SANITIZER_INTERFACE_ATTRIBUTE -sptr __msan_test_shadow(const void *x, uptr size); - -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_set_origin(void *a, uptr size, u32 origin); -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_set_alloca_origin(void *a, uptr size, const char *descr); -SANITIZER_INTERFACE_ATTRIBUTE -u32 __msan_get_origin(void *a); - -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_clear_on_return(); - -// Default: -1 (don't exit on error). -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_set_exit_code(int exit_code); - -SANITIZER_INTERFACE_ATTRIBUTE -int __msan_set_poison_in_malloc(int do_poison); - -// For testing. -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_set_expect_umr(int expect_umr); -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_break_optimization(void *x); -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_print_shadow(const void *x, uptr size); -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_print_param_shadow(); -SANITIZER_INTERFACE_ATTRIBUTE -int __msan_has_dynamic_component(); - -// Returns x such that %fs:x is the first byte of __msan_retval_tls. -SANITIZER_INTERFACE_ATTRIBUTE -int __msan_get_retval_tls_offset(); -SANITIZER_INTERFACE_ATTRIBUTE -int __msan_get_param_tls_offset(); - -// For testing. -SANITIZER_INTERFACE_ATTRIBUTE -u32 __msan_get_origin_tls(); -SANITIZER_INTERFACE_ATTRIBUTE -const char *__msan_get_origin_descr_if_stack(u32 id); -SANITIZER_INTERFACE_ATTRIBUTE -void __msan_partial_poison(void* data, void* shadow, uptr size); +#if __has_feature(memory_sanitizer) + /* Returns a string describing a stack origin. + Return NULL if the origin is invalid, or is not a stack origin. */ + const char *__msan_get_origin_descr_if_stack(uint32_t id); + + + /* Set raw origin for the memory range. */ + void __msan_set_origin(const void *a, size_t size, uint32_t origin); + + /* Get raw origin for an address. */ + uint32_t __msan_get_origin(const void *a); + + /* Returns non-zero if tracking origins. */ + int __msan_get_track_origins(); + + /* Returns the origin id of the latest UMR in the calling thread. */ + uint32_t __msan_get_umr_origin(); + + /* Make memory region fully initialized (without changing its contents). */ + void __msan_unpoison(const void *a, size_t size); + + /* Make memory region fully uninitialized (without changing its contents). */ + void __msan_poison(const void *a, size_t size); + + /* Make memory region partially uninitialized (without changing its contents). + */ + void __msan_partial_poison(const void* data, void* shadow, size_t size); + + /* Returns the offset of the first (at least partially) poisoned byte in the + memory range, or -1 if the whole range is good. */ + intptr_t __msan_test_shadow(const void *x, size_t size); + + /* Set exit code when error(s) were detected. + Value of 0 means don't change the program exit code. */ + void __msan_set_exit_code(int exit_code); + + /* For testing: + __msan_set_expect_umr(1); + ... some buggy code ... + __msan_set_expect_umr(0); + The last line will verify that a UMR happened. */ + void __msan_set_expect_umr(int expect_umr); + + /* Print shadow and origin for the memory range to stdout in a human-readable + format. */ + void __msan_print_shadow(const void *x, size_t size); + + /* Print current function arguments shadow and origin to stdout in a + human-readable format. */ + void __msan_print_param_shadow(); + + /* Returns true if running under a dynamic tool (DynamoRio-based). */ + int __msan_has_dynamic_component(); + + /* Tell MSan about newly allocated memory (ex.: custom allocator). + Memory will be marked uninitialized, with origin at the call site. */ + void __msan_allocated_memory(const void* data, size_t size); + +#else // __has_feature(memory_sanitizer) + +#define __msan_get_origin_descr_if_stack(id) ((const char*)0) +#define __msan_set_origin(a, size, origin) +#define __msan_get_origin(a) ((uint32_t)-1) +#define __msan_get_track_origins() (0) +#define __msan_get_umr_origin() ((uint32_t)-1) +#define __msan_unpoison(a, size) +#define __msan_poison(a, size) +#define __msan_partial_poison(data, shadow, size) +#define __msan_test_shadow(x, size) ((intptr_t)-1) +#define __msan_set_exit_code(exit_code) +#define __msan_set_expect_umr(expect_umr) +#define __msan_print_shadow(x, size) +#define __msan_print_param_shadow() +#define __msan_has_dynamic_component() (0) +#define __msan_allocated_memory(data, size) + +#endif // __has_feature(memory_sanitizer) #ifdef __cplusplus } // extern "C" diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt index fa6d8abc65e6..f07ab1e1872b 100644 --- a/lib/CMakeLists.txt +++ b/lib/CMakeLists.txt @@ -7,6 +7,7 @@ if(CMAKE_SYSTEM_NAME MATCHES "Darwin|Linux") add_subdirectory(interception) add_subdirectory(sanitizer_common) if(NOT ANDROID) + add_subdirectory(profile) add_subdirectory(ubsan) endif() endif() @@ -14,10 +15,10 @@ if("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux" AND NOT ANDROID) # ThreadSanitizer and MemorySanitizer are supported on Linux only. add_subdirectory(tsan) add_subdirectory(msan) + add_subdirectory(msandr) + add_subdirectory(lsan) endif() -# FIXME: Add support for the profile library. - # The top-level lib directory contains a large amount of C code which provides # generic implementations of the core runtime library along with optimized # architecture-specific code in various subdirectories. @@ -36,6 +37,8 @@ set(GENERIC_SOURCES ashlti3.c ashrdi3.c ashrti3.c + # FIXME: atomic.c may only be compiled if host compiler understands _Atomic + # atomic.c clear_cache.c clzdi2.c clzsi2.c @@ -152,37 +155,36 @@ set(GENERIC_SOURCES umodti3.c ) -if(CAN_TARGET_x86_64) - add_library(clang_rt.x86_64 STATIC - x86_64/floatdidf.c - x86_64/floatdisf.c - x86_64/floatdixf.c - x86_64/floatundidf.S - x86_64/floatundisf.S - x86_64/floatundixf.S - ${GENERIC_SOURCES} - ) - set_target_properties(clang_rt.x86_64 PROPERTIES COMPILE_FLAGS "-std=c99 ${TARGET_x86_64_CFLAGS}") - add_clang_compiler_rt_libraries(clang_rt.x86_64) -endif() -if(CAN_TARGET_i386) - add_library(clang_rt.i386 STATIC - i386/ashldi3.S - i386/ashrdi3.S - i386/divdi3.S - i386/floatdidf.S - i386/floatdisf.S - i386/floatdixf.S - i386/floatundidf.S - i386/floatundisf.S - i386/floatundixf.S - i386/lshrdi3.S - i386/moddi3.S - i386/muldi3.S - i386/udivdi3.S - i386/umoddi3.S - ${GENERIC_SOURCES} - ) - set_target_properties(clang_rt.i386 PROPERTIES COMPILE_FLAGS "-std=c99 ${TARGET_i386_CFLAGS}") - add_clang_compiler_rt_libraries(clang_rt.i386) -endif() +set(x86_64_SOURCES + x86_64/floatdidf.c + x86_64/floatdisf.c + x86_64/floatdixf.c + x86_64/floatundidf.S + x86_64/floatundisf.S + x86_64/floatundixf.S + ${GENERIC_SOURCES}) + +set(i386_SOURCES + i386/ashldi3.S + i386/ashrdi3.S + i386/divdi3.S + i386/floatdidf.S + i386/floatdisf.S + i386/floatdixf.S + i386/floatundidf.S + i386/floatundisf.S + i386/floatundixf.S + i386/lshrdi3.S + i386/moddi3.S + i386/muldi3.S + i386/udivdi3.S + i386/umoddi3.S + ${GENERIC_SOURCES}) + +foreach(arch x86_64 i386) + if(CAN_TARGET_${arch}) + add_compiler_rt_static_runtime(clang_rt.${arch} ${arch} + SOURCES ${${arch}_SOURCES} + CFLAGS "-std=c99") + endif() +endforeach() diff --git a/lib/Makefile.mk b/lib/Makefile.mk index ea471e01b1e6..8054c35aa362 100644 --- a/lib/Makefile.mk +++ b/lib/Makefile.mk @@ -19,15 +19,12 @@ SubDirs += interception SubDirs += profile SubDirs += sanitizer_common SubDirs += tsan +SubDirs += msan SubDirs += ubsan - -# FIXME: We don't currently support building an atomic library, and as it must -# be a separate library from the runtime library, we need to remove its source -# code from the source files list. -ExcludedSources := atomic.c +SubDirs += lsan # Define the variables for this specific directory. -Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(filter-out $(ExcludedSources),$(notdir $(file)))) +Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file))) ObjNames := $(Sources:%.c=%.o) Implementation := Generic diff --git a/lib/apple_versioning.c b/lib/apple_versioning.c index e838d726fbb6..09f149f14cf7 100644 --- a/lib/apple_versioning.c +++ b/lib/apple_versioning.c @@ -13,6 +13,7 @@ #if __APPLE__ #if __arm__ #define NOT_HERE_BEFORE_10_6(sym) + #define NOT_HERE_IN_10_8_AND_EARLIER(sym) #elif __ppc__ #define NOT_HERE_BEFORE_10_6(sym) \ extern const char sym##_tmp3 __asm("$ld$hide$os10.3$_" #sym ); \ @@ -27,6 +28,13 @@ __attribute__((visibility("default"))) const char sym##_tmp4 = 0; \ extern const char sym##_tmp5 __asm("$ld$hide$os10.5$_" #sym ); \ __attribute__((visibility("default"))) const char sym##_tmp5 = 0; + #define NOT_HERE_IN_10_8_AND_EARLIER(sym) \ + extern const char sym##_tmp8 __asm("$ld$hide$os10.8$_" #sym ); \ + __attribute__((visibility("default"))) const char sym##_tmp8 = 0; \ + extern const char sym##_tmp7 __asm("$ld$hide$os10.7$_" #sym ); \ + __attribute__((visibility("default"))) const char sym##_tmp7 = 0; \ + extern const char sym##_tmp6 __asm("$ld$hide$os10.6$_" #sym ); \ + __attribute__((visibility("default"))) const char sym##_tmp6 = 0; #endif /* __ppc__ */ @@ -143,6 +151,56 @@ NOT_HERE_BEFORE_10_6(__gcc_qsub) NOT_HERE_BEFORE_10_6(__trampoline_setup) #endif /* __ppc__ */ +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_compare_exchange) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_compare_exchange_1) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_compare_exchange_2) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_compare_exchange_4) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_compare_exchange_8) + +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_exchange) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_exchange_1) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_exchange_2) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_exchange_4) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_exchange_8) + +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_add_1) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_add_2) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_add_4) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_add_8) + +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_and_1) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_and_2) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_and_4) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_and_8) + +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_or_1) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_or_2) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_or_4) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_or_8) + +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_sub_1) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_sub_2) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_sub_4) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_sub_8) + +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_xor_1) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_xor_2) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_xor_4) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_xor_8) + +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_load) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_load_1) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_load_2) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_load_4) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_load_8) + +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_store) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_store_1) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_store_2) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_store_4) +NOT_HERE_IN_10_8_AND_EARLIER(__atomic_store_8) + + #if __arm__ && __DYNAMIC__ #define NOT_HERE_UNTIL_AFTER_4_3(sym) \ extern const char sym##_tmp1 __asm("$ld$hide$os3.0$_" #sym ); \ diff --git a/lib/arm/aeabi_dcmp.S b/lib/arm/aeabi_dcmp.S new file mode 100644 index 000000000000..c4d07727a380 --- /dev/null +++ b/lib/arm/aeabi_dcmp.S @@ -0,0 +1,39 @@ +//===-- aeabi_dcmp.S - EABI dcmp* implementation ---------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "../assembly.h" + +// int __aeabi_dcmp{eq,lt,le,ge,gt}(double a, double b) { +// int result = __{eq,lt,le,ge,gt}df2(a, b); +// if (result {==,<,<=,>=,>} 0) { +// return 1; +// } else { +// return 0; +// } +// } + +#define DEFINE_AEABI_DCMP(cond) \ + .syntax unified SEPARATOR \ + .align 2 SEPARATOR \ +DEFINE_COMPILERRT_FUNCTION(__aeabi_dcmp ## cond) \ + push { r4, lr } SEPARATOR \ + bl SYMBOL_NAME(__ ## cond ## df2) SEPARATOR \ + cmp r0, #0 SEPARATOR \ + b ## cond 1f SEPARATOR \ + mov r0, #0 SEPARATOR \ + pop { r4, pc } SEPARATOR \ +1: SEPARATOR \ + mov r0, #1 SEPARATOR \ + pop { r4, pc } + +DEFINE_AEABI_DCMP(eq) +DEFINE_AEABI_DCMP(lt) +DEFINE_AEABI_DCMP(le) +DEFINE_AEABI_DCMP(ge) +DEFINE_AEABI_DCMP(gt) diff --git a/lib/arm/aeabi_fcmp.S b/lib/arm/aeabi_fcmp.S new file mode 100644 index 000000000000..576a33f25542 --- /dev/null +++ b/lib/arm/aeabi_fcmp.S @@ -0,0 +1,39 @@ +//===-- aeabi_fcmp.S - EABI fcmp* implementation ---------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "../assembly.h" + +// int __aeabi_fcmp{eq,lt,le,ge,gt}(float a, float b) { +// int result = __{eq,lt,le,ge,gt}sf2(a, b); +// if (result {==,<,<=,>=,>} 0) { +// return 1; +// } else { +// return 0; +// } +// } + +#define DEFINE_AEABI_FCMP(cond) \ + .syntax unified SEPARATOR \ + .align 2 SEPARATOR \ +DEFINE_COMPILERRT_FUNCTION(__aeabi_fcmp ## cond) \ + push { r4, lr } SEPARATOR \ + bl SYMBOL_NAME(__ ## cond ## sf2) SEPARATOR \ + cmp r0, #0 SEPARATOR \ + b ## cond 1f SEPARATOR \ + mov r0, #0 SEPARATOR \ + pop { r4, pc } SEPARATOR \ +1: SEPARATOR \ + mov r0, #1 SEPARATOR \ + pop { r4, pc } + +DEFINE_AEABI_FCMP(eq) +DEFINE_AEABI_FCMP(lt) +DEFINE_AEABI_FCMP(le) +DEFINE_AEABI_FCMP(ge) +DEFINE_AEABI_FCMP(gt) diff --git a/lib/arm/divmodsi4.S b/lib/arm/divmodsi4.S index cec39a7926f9..d31e510c8f38 100644 --- a/lib/arm/divmodsi4.S +++ b/lib/arm/divmodsi4.S @@ -24,6 +24,18 @@ .syntax unified .align 3 DEFINE_COMPILERRT_FUNCTION(__divmodsi4) +#if __ARM_ARCH_7S__ + tst r1, r1 + beq LOCAL_LABEL(divzero) + mov r3, r0 + sdiv r0, r3, r1 + mls r1, r0, r1, r3 + str r1, [r2] + bx lr +LOCAL_LABEL(divzero): + mov r0, #0 + bx lr +#else ESTABLISH_FRAME // Set aside the sign of the quotient and modulus, and the address for the // modulus. @@ -45,3 +57,4 @@ DEFINE_COMPILERRT_FUNCTION(__divmodsi4) sub r1, r1, r5, asr #31 str r1, [r6] CLEAR_FRAME_AND_RETURN +#endif diff --git a/lib/arm/modsi3.S b/lib/arm/modsi3.S index a4cd2ee54e7b..04595011d0ec 100644 --- a/lib/arm/modsi3.S +++ b/lib/arm/modsi3.S @@ -23,6 +23,16 @@ .syntax unified .align 3 DEFINE_COMPILERRT_FUNCTION(__modsi3) +#if __ARM_ARCH_7S__ + tst r1, r1 + beq LOCAL_LABEL(divzero) + sdiv r2, r0, r1 + mls r0, r2, r1, r0 + bx lr +LOCAL_LABEL(divzero): + mov r0, #0 + bx lr +#else ESTABLISH_FRAME // Set aside the sign of the dividend. mov r4, r0 @@ -37,3 +47,4 @@ DEFINE_COMPILERRT_FUNCTION(__modsi3) eor r0, r0, r4, asr #31 sub r0, r0, r4, asr #31 CLEAR_FRAME_AND_RETURN +#endif diff --git a/lib/arm/udivmodsi4.S b/lib/arm/udivmodsi4.S index d164a751d089..9956cd48442f 100644 --- a/lib/arm/udivmodsi4.S +++ b/lib/arm/udivmodsi4.S @@ -31,6 +31,18 @@ .syntax unified .align 3 DEFINE_COMPILERRT_FUNCTION(__udivmodsi4) +#if __ARM_ARCH_7S__ + tst r1, r1 + beq LOCAL_LABEL(divzero) + mov r3, r0 + udiv r0, r3, r1 + mls r1, r0, r1, r3 + str r1, [r2] + bx lr +LOCAL_LABEL(divzero): + mov r0, #0 + bx lr +#else // We use a simple digit by digit algorithm; before we get into the actual // divide loop, we must calculate the left-shift amount necessary to align // the MSB of the divisor with that of the dividend (If this shift is @@ -78,3 +90,4 @@ LOCAL_LABEL(return): str a, [r2] mov r0, q CLEAR_FRAME_AND_RETURN +#endif diff --git a/lib/arm/umodsi3.S b/lib/arm/umodsi3.S index 3a2ab2b87751..328e7054b857 100644 --- a/lib/arm/umodsi3.S +++ b/lib/arm/umodsi3.S @@ -23,6 +23,16 @@ .syntax unified .align 3 DEFINE_COMPILERRT_FUNCTION(__umodsi3) +#if __ARM_ARCH_7S__ + tst r1, r1 + beq LOCAL_LABEL(divzero) + udiv r2, r0, r1 + mls r0, r2, r1, r0 + bx lr +LOCAL_LABEL(divzero): + mov r0, #0 + bx lr +#else // We use a simple digit by digit algorithm; before we get into the actual // divide loop, we must calculate the left-shift amount necessary to align // the MSB of the divisor with that of the dividend. @@ -56,3 +66,4 @@ LOCAL_LABEL(mainLoop): subs r, a, b movhs a, r bx lr +#endif diff --git a/lib/asan/CMakeLists.txt b/lib/asan/CMakeLists.txt index 92cba6dee622..a567a4d3e970 100644 --- a/lib/asan/CMakeLists.txt +++ b/lib/asan/CMakeLists.txt @@ -1,7 +1,6 @@ # Build for the AddressSanitizer runtime support library. set(ASAN_SOURCES - asan_allocator.cc asan_allocator2.cc asan_fake_stack.cc asan_globals.cc @@ -14,60 +13,58 @@ set(ASAN_SOURCES asan_new_delete.cc asan_poisoning.cc asan_posix.cc + asan_preinit.cc asan_report.cc asan_rtl.cc asan_stack.cc asan_stats.cc asan_thread.cc - asan_thread_registry.cc asan_win.cc ) set(ASAN_DYLIB_SOURCES ${ASAN_SOURCES} - dynamic/asan_interceptors_dynamic.cc ) include_directories(..) -set(ASAN_CFLAGS ${SANITIZER_COMMON_CFLAGS}) +set(ASAN_CFLAGS + ${SANITIZER_COMMON_CFLAGS} + -fno-rtti) + +set(ASAN_COMMON_DEFINITIONS + ASAN_HAS_EXCEPTIONS=1) if(ANDROID) - set(ASAN_COMMON_DEFINITIONS - ASAN_HAS_EXCEPTIONS=1 + list(APPEND ASAN_COMMON_DEFINITIONS ASAN_FLEXIBLE_MAPPING_AND_OFFSET=0 ASAN_NEEDS_SEGV=0 - ASAN_LOW_MEMORY=1 - ) + ASAN_LOW_MEMORY=1) else() - set(ASAN_COMMON_DEFINITIONS - ASAN_HAS_EXCEPTIONS=1 - ASAN_FLEXIBLE_MAPPING_AND_OFFSET=0 - ASAN_NEEDS_SEGV=1 - ) + list(APPEND ASAN_COMMON_DEFINITIONS + ASAN_FLEXIBLE_MAPPING_AND_OFFSET=1 + ASAN_NEEDS_SEGV=1) endif() -set(ASAN_DYLIB_DEFINITIONS - ${ASAN_COMMON_DEFINITIONS} - MAC_INTERPOSE_FUNCTIONS=1 - ) - # Architectures supported by ASan. filter_available_targets(ASAN_SUPPORTED_ARCH - x86_64 i386) + x86_64 i386 powerpc64 powerpc) set(ASAN_RUNTIME_LIBRARIES) if(APPLE) # Build universal binary on APPLE. - add_library(clang_rt.asan_osx STATIC - ${ASAN_SOURCES} - $<TARGET_OBJECTS:RTInterception.osx> - $<TARGET_OBJECTS:RTSanitizerCommon.osx> - ) - set_target_compile_flags(clang_rt.asan_osx ${ASAN_CFLAGS}) - set_target_properties(clang_rt.asan_osx PROPERTIES - OSX_ARCHITECTURES "${ASAN_SUPPORTED_ARCH}") - list(APPEND ASAN_RUNTIME_LIBRARIES clang_rt.asan_osx) + add_compiler_rt_osx_dynamic_runtime(clang_rt.asan_osx_dynamic + ARCH ${ASAN_SUPPORTED_ARCH} + SOURCES ${ASAN_DYLIB_SOURCES} + $<TARGET_OBJECTS:RTInterception.osx> + $<TARGET_OBJECTS:RTSanitizerCommon.osx> + CFLAGS ${ASAN_CFLAGS} + DEFS ${ASAN_COMMON_DEFINITIONS} + # Dynamic lookup is needed because shadow scale and offset are + # provided by the instrumented modules. + LINKFLAGS "-framework Foundation" + "-undefined dynamic_lookup") + list(APPEND ASAN_RUNTIME_LIBRARIES clang_rt.asan_osx_dynamic) elseif(ANDROID) add_library(clang_rt.asan-arm-android SHARED ${ASAN_SOURCES} @@ -75,44 +72,28 @@ elseif(ANDROID) $<TARGET_OBJECTS:RTSanitizerCommon.arm.android> ) set_target_compile_flags(clang_rt.asan-arm-android - ${ASAN_CFLAGS} - ) + ${ASAN_CFLAGS}) + set_property(TARGET clang_rt.asan-arm-android APPEND PROPERTY + COMPILE_DEFINITIONS ${ASAN_COMMON_DEFINITIONS}) target_link_libraries(clang_rt.asan-arm-android dl) list(APPEND ASAN_RUNTIME_LIBRARIES clang_rt.asan-arm-android) else() # Otherwise, build separate libraries for each target. foreach(arch ${ASAN_SUPPORTED_ARCH}) - add_library(clang_rt.asan-${arch} STATIC - ${ASAN_SOURCES} - $<TARGET_OBJECTS:RTInterception.${arch}> - $<TARGET_OBJECTS:RTSanitizerCommon.${arch}>) - set_target_compile_flags(clang_rt.asan-${arch} - ${ASAN_CFLAGS} ${TARGET_${arch}_CFLAGS}) + add_compiler_rt_static_runtime(clang_rt.asan-${arch} ${arch} + SOURCES ${ASAN_SOURCES} + $<TARGET_OBJECTS:RTInterception.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + $<TARGET_OBJECTS:RTLSanCommon.${arch}> + CFLAGS ${ASAN_CFLAGS} + DEFS ${ASAN_COMMON_DEFINITIONS} + SYMS asan.syms) list(APPEND ASAN_RUNTIME_LIBRARIES clang_rt.asan-${arch}) endforeach() endif() -set_property(TARGET ${ASAN_RUNTIME_LIBRARIES} APPEND PROPERTY - COMPILE_DEFINITIONS ${ASAN_COMMON_DEFINITIONS}) -add_clang_compiler_rt_libraries(${ASAN_RUNTIME_LIBRARIES}) - -set(ASAN_DYNAMIC_RUNTIME_LIBRARIES) -if(APPLE) - # Build universal binary on APPLE. - add_library(clang_rt.asan_osx_dynamic SHARED - ${ASAN_DYLIB_SOURCES} - $<TARGET_OBJECTS:RTInterception.osx> - $<TARGET_OBJECTS:RTSanitizerCommon.osx> - ) - set_target_compile_flags(clang_rt.asan_osx_dynamic ${ASAN_CFLAGS}) - set_target_properties(clang_rt.asan_osx_dynamic PROPERTIES - COMPILE_DEFINITIONS "${ASAN_DYLIB_DEFINITIONS}" - OSX_ARCHITECTURES "${ASAN_SUPPORTED_ARCH}" - LINK_FLAGS "-framework Foundation") - list(APPEND ASAN_DYNAMIC_RUNTIME_LIBRARIES clang_rt.asan_osx_dynamic) -endif() -add_clang_compiler_rt_libraries(${ASAN_DYNAMIC_RUNTIME_LIBRARIES}) - +add_compiler_rt_resource_file(asan_blacklist asan_blacklist.txt) if(LLVM_INCLUDE_TESTS) add_subdirectory(tests) diff --git a/lib/asan/Makefile.mk b/lib/asan/Makefile.mk index af9602e8b242..97da64bec573 100644 --- a/lib/asan/Makefile.mk +++ b/lib/asan/Makefile.mk @@ -8,7 +8,7 @@ #===------------------------------------------------------------------------===# ModuleName := asan -SubDirs := dynamic +SubDirs := Sources := $(foreach file,$(wildcard $(Dir)/*.cc),$(notdir $(file))) ObjNames := $(Sources:%.cc=%.o) @@ -18,7 +18,6 @@ Implementation := Generic # FIXME: use automatic dependencies? Dependencies := $(wildcard $(Dir)/*.h) Dependencies += $(wildcard $(Dir)/../interception/*.h) -Dependencies += $(wildcard $(Dir)/../interception/mach_override/*.h) Dependencies += $(wildcard $(Dir)/../sanitizer_common/*.h) # Define a convenience variable for all the asan functions. diff --git a/lib/asan/asan.syms b/lib/asan/asan.syms new file mode 100644 index 000000000000..fce367314093 --- /dev/null +++ b/lib/asan/asan.syms @@ -0,0 +1,5 @@ +{ + __asan_*; + __sanitizer_syscall_pre_*; + __sanitizer_syscall_post_*; +}; diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc deleted file mode 100644 index 30dd4ceddd88..000000000000 --- a/lib/asan/asan_allocator.cc +++ /dev/null @@ -1,810 +0,0 @@ -//===-- asan_allocator.cc -------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Implementation of ASan's memory allocator. -// Evey piece of memory (AsanChunk) allocated by the allocator -// has a left redzone of REDZONE bytes and -// a right redzone such that the end of the chunk is aligned by REDZONE -// (i.e. the right redzone is between 0 and REDZONE-1). -// The left redzone is always poisoned. -// The right redzone is poisoned on malloc, the body is poisoned on free. -// Once freed, a chunk is moved to a quarantine (fifo list). -// After quarantine, a chunk is returned to freelists. -// -// The left redzone contains ASan's internal data and the stack trace of -// the malloc call. -// Once freed, the body of the chunk contains the stack trace of the free call. -// -//===----------------------------------------------------------------------===// -#include "asan_allocator.h" - -#if ASAN_ALLOCATOR_VERSION == 1 -#include "asan_interceptors.h" -#include "asan_internal.h" -#include "asan_mapping.h" -#include "asan_stats.h" -#include "asan_report.h" -#include "asan_thread.h" -#include "asan_thread_registry.h" -#include "sanitizer/asan_interface.h" -#include "sanitizer_common/sanitizer_atomic.h" -#include "sanitizer_common/sanitizer_mutex.h" - -namespace __asan { - -#define REDZONE ((uptr)(flags()->redzone)) -static const uptr kMinAllocSize = REDZONE * 2; -static const u64 kMaxAvailableRam = 128ULL << 30; // 128G -static const uptr kMaxThreadLocalQuarantine = 1 << 20; // 1M - -static const uptr kMinMmapSize = (ASAN_LOW_MEMORY) ? 4UL << 17 : 4UL << 20; -static const uptr kMaxSizeForThreadLocalFreeList = - (ASAN_LOW_MEMORY) ? 1 << 15 : 1 << 17; - -// Size classes less than kMallocSizeClassStep are powers of two. -// All other size classes are multiples of kMallocSizeClassStep. -static const uptr kMallocSizeClassStepLog = 26; -static const uptr kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog; - -static const uptr kMaxAllowedMallocSize = - (SANITIZER_WORDSIZE == 32) ? 3UL << 30 : 8UL << 30; - -static inline uptr SizeClassToSize(u8 size_class) { - CHECK(size_class < kNumberOfSizeClasses); - if (size_class <= kMallocSizeClassStepLog) { - return 1UL << size_class; - } else { - return (size_class - kMallocSizeClassStepLog) * kMallocSizeClassStep; - } -} - -static inline u8 SizeToSizeClass(uptr size) { - u8 res = 0; - if (size <= kMallocSizeClassStep) { - uptr rounded = RoundUpToPowerOfTwo(size); - res = Log2(rounded); - } else { - res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep) - + kMallocSizeClassStepLog; - } - CHECK(res < kNumberOfSizeClasses); - CHECK(size <= SizeClassToSize(res)); - return res; -} - -// Given REDZONE bytes, we need to mark first size bytes -// as addressable and the rest REDZONE-size bytes as unaddressable. -static void PoisonHeapPartialRightRedzone(uptr mem, uptr size) { - CHECK(size <= REDZONE); - CHECK(IsAligned(mem, REDZONE)); - CHECK(IsPowerOfTwo(SHADOW_GRANULARITY)); - CHECK(IsPowerOfTwo(REDZONE)); - CHECK(REDZONE >= SHADOW_GRANULARITY); - PoisonShadowPartialRightRedzone(mem, size, REDZONE, - kAsanHeapRightRedzoneMagic); -} - -static u8 *MmapNewPagesAndPoisonShadow(uptr size) { - CHECK(IsAligned(size, GetPageSizeCached())); - u8 *res = (u8*)MmapOrDie(size, __FUNCTION__); - PoisonShadow((uptr)res, size, kAsanHeapLeftRedzoneMagic); - if (flags()->debug) { - Printf("ASAN_MMAP: [%p, %p)\n", res, res + size); - } - return res; -} - -// Every chunk of memory allocated by this allocator can be in one of 3 states: -// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. -// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. -// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. -// -// The pseudo state CHUNK_MEMALIGN is used to mark that the address is not -// the beginning of a AsanChunk (in which the actual chunk resides at -// this - this->used_size). -// -// The magic numbers for the enum values are taken randomly. -enum { - CHUNK_AVAILABLE = 0x57, - CHUNK_ALLOCATED = 0x32, - CHUNK_QUARANTINE = 0x19, - CHUNK_MEMALIGN = 0xDC -}; - -struct ChunkBase { - // First 8 bytes. - uptr chunk_state : 8; - uptr alloc_tid : 24; - uptr size_class : 8; - uptr free_tid : 24; - - // Second 8 bytes. - uptr alignment_log : 8; - uptr alloc_type : 2; - uptr used_size : FIRST_32_SECOND_64(32, 54); // Size requested by the user. - - // This field may overlap with the user area and thus should not - // be used while the chunk is in CHUNK_ALLOCATED state. - AsanChunk *next; - - // Typically the beginning of the user-accessible memory is 'this'+REDZONE - // and is also aligned by REDZONE. However, if the memory is allocated - // by memalign, the alignment might be higher and the user-accessible memory - // starts at the first properly aligned address after 'this'. - uptr Beg() { return RoundUpTo((uptr)this + 1, 1 << alignment_log); } - uptr Size() { return SizeClassToSize(size_class); } - u8 SizeClass() { return size_class; } -}; - -struct AsanChunk: public ChunkBase { - u32 *compressed_alloc_stack() { - return (u32*)((uptr)this + sizeof(ChunkBase)); - } - u32 *compressed_free_stack() { - return (u32*)((uptr)this + Max((uptr)REDZONE, (uptr)sizeof(ChunkBase))); - } - - // The left redzone after the ChunkBase is given to the alloc stack trace. - uptr compressed_alloc_stack_size() { - if (REDZONE < sizeof(ChunkBase)) return 0; - return (REDZONE - sizeof(ChunkBase)) / sizeof(u32); - } - uptr compressed_free_stack_size() { - if (REDZONE < sizeof(ChunkBase)) return 0; - return (REDZONE) / sizeof(u32); - } -}; - -uptr AsanChunkView::Beg() { return chunk_->Beg(); } -uptr AsanChunkView::End() { return Beg() + UsedSize(); } -uptr AsanChunkView::UsedSize() { return chunk_->used_size; } -uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } -uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } - -void AsanChunkView::GetAllocStack(StackTrace *stack) { - StackTrace::UncompressStack(stack, chunk_->compressed_alloc_stack(), - chunk_->compressed_alloc_stack_size()); -} - -void AsanChunkView::GetFreeStack(StackTrace *stack) { - StackTrace::UncompressStack(stack, chunk_->compressed_free_stack(), - chunk_->compressed_free_stack_size()); -} - -static AsanChunk *PtrToChunk(uptr ptr) { - AsanChunk *m = (AsanChunk*)(ptr - REDZONE); - if (m->chunk_state == CHUNK_MEMALIGN) { - m = (AsanChunk*)((uptr)m - m->used_size); - } - return m; -} - -void AsanChunkFifoList::PushList(AsanChunkFifoList *q) { - CHECK(q->size() > 0); - size_ += q->size(); - append_back(q); - q->clear(); -} - -void AsanChunkFifoList::Push(AsanChunk *n) { - push_back(n); - size_ += n->Size(); -} - -// Interesting performance observation: this function takes up to 15% of overal -// allocator time. That's because *first_ has been evicted from cache long time -// ago. Not sure if we can or want to do anything with this. -AsanChunk *AsanChunkFifoList::Pop() { - CHECK(first_); - AsanChunk *res = front(); - size_ -= res->Size(); - pop_front(); - return res; -} - -// All pages we ever allocated. -struct PageGroup { - uptr beg; - uptr end; - uptr size_of_chunk; - uptr last_chunk; - bool InRange(uptr addr) { - return addr >= beg && addr < end; - } -}; - -class MallocInfo { - public: - explicit MallocInfo(LinkerInitialized x) : mu_(x) { } - - AsanChunk *AllocateChunks(u8 size_class, uptr n_chunks) { - AsanChunk *m = 0; - AsanChunk **fl = &free_lists_[size_class]; - { - BlockingMutexLock lock(&mu_); - for (uptr i = 0; i < n_chunks; i++) { - if (!(*fl)) { - *fl = GetNewChunks(size_class); - } - AsanChunk *t = *fl; - *fl = t->next; - t->next = m; - CHECK(t->chunk_state == CHUNK_AVAILABLE); - m = t; - } - } - return m; - } - - void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x, - bool eat_free_lists) { - CHECK(flags()->quarantine_size > 0); - BlockingMutexLock lock(&mu_); - AsanChunkFifoList *q = &x->quarantine_; - if (q->size() > 0) { - quarantine_.PushList(q); - while (quarantine_.size() > (uptr)flags()->quarantine_size) { - QuarantinePop(); - } - } - if (eat_free_lists) { - for (uptr size_class = 0; size_class < kNumberOfSizeClasses; - size_class++) { - AsanChunk *m = x->free_lists_[size_class]; - while (m) { - AsanChunk *t = m->next; - m->next = free_lists_[size_class]; - free_lists_[size_class] = m; - m = t; - } - x->free_lists_[size_class] = 0; - } - } - } - - void BypassThreadLocalQuarantine(AsanChunk *chunk) { - BlockingMutexLock lock(&mu_); - quarantine_.Push(chunk); - } - - AsanChunk *FindChunkByAddr(uptr addr) { - BlockingMutexLock lock(&mu_); - return FindChunkByAddrUnlocked(addr); - } - - uptr AllocationSize(uptr ptr) { - if (!ptr) return 0; - BlockingMutexLock lock(&mu_); - - // Make sure this is our chunk and |ptr| actually points to the beginning - // of the allocated memory. - AsanChunk *m = FindChunkByAddrUnlocked(ptr); - if (!m || m->Beg() != ptr) return 0; - - if (m->chunk_state == CHUNK_ALLOCATED) { - return m->used_size; - } else { - return 0; - } - } - - void ForceLock() { - mu_.Lock(); - } - - void ForceUnlock() { - mu_.Unlock(); - } - - void PrintStatus() { - BlockingMutexLock lock(&mu_); - uptr malloced = 0; - - Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ", - quarantine_.size() >> 20, malloced >> 20); - for (uptr j = 1; j < kNumberOfSizeClasses; j++) { - AsanChunk *i = free_lists_[j]; - if (!i) continue; - uptr t = 0; - for (; i; i = i->next) { - t += i->Size(); - } - Printf("%zu:%zu ", j, t >> 20); - } - Printf("\n"); - } - - PageGroup *FindPageGroup(uptr addr) { - BlockingMutexLock lock(&mu_); - return FindPageGroupUnlocked(addr); - } - - private: - PageGroup *FindPageGroupUnlocked(uptr addr) { - int n = atomic_load(&n_page_groups_, memory_order_relaxed); - // If the page groups are not sorted yet, sort them. - if (n_sorted_page_groups_ < n) { - SortArray((uptr*)page_groups_, n); - n_sorted_page_groups_ = n; - } - // Binary search over the page groups. - int beg = 0, end = n; - while (beg < end) { - int med = (beg + end) / 2; - uptr g = (uptr)page_groups_[med]; - if (addr > g) { - // 'g' points to the end of the group, so 'addr' - // may not belong to page_groups_[med] or any previous group. - beg = med + 1; - } else { - // 'addr' may belong to page_groups_[med] or a previous group. - end = med; - } - } - if (beg >= n) - return 0; - PageGroup *g = page_groups_[beg]; - CHECK(g); - if (g->InRange(addr)) - return g; - return 0; - } - - // We have an address between two chunks, and we want to report just one. - AsanChunk *ChooseChunk(uptr addr, - AsanChunk *left_chunk, AsanChunk *right_chunk) { - // Prefer an allocated chunk or a chunk from quarantine. - if (left_chunk->chunk_state == CHUNK_AVAILABLE && - right_chunk->chunk_state != CHUNK_AVAILABLE) - return right_chunk; - if (right_chunk->chunk_state == CHUNK_AVAILABLE && - left_chunk->chunk_state != CHUNK_AVAILABLE) - return left_chunk; - // Choose based on offset. - uptr l_offset = 0, r_offset = 0; - CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); - CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); - if (l_offset < r_offset) - return left_chunk; - return right_chunk; - } - - AsanChunk *FindChunkByAddrUnlocked(uptr addr) { - PageGroup *g = FindPageGroupUnlocked(addr); - if (!g) return 0; - CHECK(g->size_of_chunk); - uptr offset_from_beg = addr - g->beg; - uptr this_chunk_addr = g->beg + - (offset_from_beg / g->size_of_chunk) * g->size_of_chunk; - CHECK(g->InRange(this_chunk_addr)); - AsanChunk *m = (AsanChunk*)this_chunk_addr; - CHECK(m->chunk_state == CHUNK_ALLOCATED || - m->chunk_state == CHUNK_AVAILABLE || - m->chunk_state == CHUNK_QUARANTINE); - uptr offset = 0; - AsanChunkView m_view(m); - if (m_view.AddrIsInside(addr, 1, &offset)) - return m; - - if (m_view.AddrIsAtRight(addr, 1, &offset)) { - if (this_chunk_addr == g->last_chunk) // rightmost chunk - return m; - uptr right_chunk_addr = this_chunk_addr + g->size_of_chunk; - CHECK(g->InRange(right_chunk_addr)); - return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr); - } else { - CHECK(m_view.AddrIsAtLeft(addr, 1, &offset)); - if (this_chunk_addr == g->beg) // leftmost chunk - return m; - uptr left_chunk_addr = this_chunk_addr - g->size_of_chunk; - CHECK(g->InRange(left_chunk_addr)); - return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m); - } - } - - void QuarantinePop() { - CHECK(quarantine_.size() > 0); - AsanChunk *m = quarantine_.Pop(); - CHECK(m); - // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m); - - CHECK(m->chunk_state == CHUNK_QUARANTINE); - m->chunk_state = CHUNK_AVAILABLE; - PoisonShadow((uptr)m, m->Size(), kAsanHeapLeftRedzoneMagic); - CHECK(m->alloc_tid >= 0); - CHECK(m->free_tid >= 0); - - uptr size_class = m->SizeClass(); - m->next = free_lists_[size_class]; - free_lists_[size_class] = m; - - // Statistics. - AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); - thread_stats.real_frees++; - thread_stats.really_freed += m->used_size; - thread_stats.really_freed_redzones += m->Size() - m->used_size; - thread_stats.really_freed_by_size[m->SizeClass()]++; - } - - // Get a list of newly allocated chunks. - AsanChunk *GetNewChunks(u8 size_class) { - uptr size = SizeClassToSize(size_class); - CHECK(IsPowerOfTwo(kMinMmapSize)); - CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0); - uptr mmap_size = Max(size, kMinMmapSize); - uptr n_chunks = mmap_size / size; - CHECK(n_chunks * size == mmap_size); - uptr PageSize = GetPageSizeCached(); - if (size < PageSize) { - // Size is small, just poison the last chunk. - n_chunks--; - } else { - // Size is large, allocate an extra page at right and poison it. - mmap_size += PageSize; - } - CHECK(n_chunks > 0); - u8 *mem = MmapNewPagesAndPoisonShadow(mmap_size); - - // Statistics. - AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); - thread_stats.mmaps++; - thread_stats.mmaped += mmap_size; - thread_stats.mmaped_by_size[size_class] += n_chunks; - - AsanChunk *res = 0; - for (uptr i = 0; i < n_chunks; i++) { - AsanChunk *m = (AsanChunk*)(mem + i * size); - m->chunk_state = CHUNK_AVAILABLE; - m->size_class = size_class; - m->next = res; - res = m; - } - PageGroup *pg = (PageGroup*)(mem + n_chunks * size); - // This memory is already poisoned, no need to poison it again. - pg->beg = (uptr)mem; - pg->end = pg->beg + mmap_size; - pg->size_of_chunk = size; - pg->last_chunk = (uptr)(mem + size * (n_chunks - 1)); - int idx = atomic_fetch_add(&n_page_groups_, 1, memory_order_relaxed); - CHECK(idx < (int)ARRAY_SIZE(page_groups_)); - page_groups_[idx] = pg; - return res; - } - - AsanChunk *free_lists_[kNumberOfSizeClasses]; - AsanChunkFifoList quarantine_; - BlockingMutex mu_; - - PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize]; - atomic_uint32_t n_page_groups_; - int n_sorted_page_groups_; -}; - -static MallocInfo malloc_info(LINKER_INITIALIZED); - -void AsanThreadLocalMallocStorage::CommitBack() { - malloc_info.SwallowThreadLocalMallocStorage(this, true); -} - -AsanChunkView FindHeapChunkByAddress(uptr address) { - return AsanChunkView(malloc_info.FindChunkByAddr(address)); -} - -static u8 *Allocate(uptr alignment, uptr size, StackTrace *stack, - AllocType alloc_type) { - __asan_init(); - CHECK(stack); - if (size == 0) { - size = 1; // TODO(kcc): do something smarter - } - CHECK(IsPowerOfTwo(alignment)); - uptr rounded_size = RoundUpTo(size, REDZONE); - uptr needed_size = rounded_size + REDZONE; - if (alignment > REDZONE) { - needed_size += alignment; - } - CHECK(IsAligned(needed_size, REDZONE)); - if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { - Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", - (void*)size); - return 0; - } - - u8 size_class = SizeToSizeClass(needed_size); - uptr size_to_allocate = SizeClassToSize(size_class); - CHECK(size_to_allocate >= kMinAllocSize); - CHECK(size_to_allocate >= needed_size); - CHECK(IsAligned(size_to_allocate, REDZONE)); - - if (flags()->verbosity >= 3) { - Printf("Allocate align: %zu size: %zu class: %u real: %zu\n", - alignment, size, size_class, size_to_allocate); - } - - AsanThread *t = asanThreadRegistry().GetCurrent(); - AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); - // Statistics - thread_stats.mallocs++; - thread_stats.malloced += size; - thread_stats.malloced_redzones += size_to_allocate - size; - thread_stats.malloced_by_size[size_class]++; - - AsanChunk *m = 0; - if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) { - // get directly from global storage. - m = malloc_info.AllocateChunks(size_class, 1); - thread_stats.malloc_large++; - } else { - // get from the thread-local storage. - AsanChunk **fl = &t->malloc_storage().free_lists_[size_class]; - if (!*fl) { - uptr n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate; - *fl = malloc_info.AllocateChunks(size_class, n_new_chunks); - thread_stats.malloc_small_slow++; - } - m = *fl; - *fl = (*fl)->next; - } - CHECK(m); - CHECK(m->chunk_state == CHUNK_AVAILABLE); - m->chunk_state = CHUNK_ALLOCATED; - m->alloc_type = alloc_type; - m->next = 0; - CHECK(m->Size() == size_to_allocate); - uptr addr = (uptr)m + REDZONE; - CHECK(addr <= (uptr)m->compressed_free_stack()); - - if (alignment > REDZONE && (addr & (alignment - 1))) { - addr = RoundUpTo(addr, alignment); - CHECK((addr & (alignment - 1)) == 0); - AsanChunk *p = (AsanChunk*)(addr - REDZONE); - p->chunk_state = CHUNK_MEMALIGN; - p->used_size = (uptr)p - (uptr)m; - m->alignment_log = Log2(alignment); - CHECK(m->Beg() == addr); - } else { - m->alignment_log = Log2(REDZONE); - } - CHECK(m == PtrToChunk(addr)); - m->used_size = size; - CHECK(m->Beg() == addr); - m->alloc_tid = t ? t->tid() : 0; - m->free_tid = kInvalidTid; - StackTrace::CompressStack(stack, m->compressed_alloc_stack(), - m->compressed_alloc_stack_size()); - PoisonShadow(addr, rounded_size, 0); - if (size < rounded_size) { - PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE, - size & (REDZONE - 1)); - } - if (size <= (uptr)(flags()->max_malloc_fill_size)) { - REAL(memset)((void*)addr, 0, rounded_size); - } - return (u8*)addr; -} - -static void Deallocate(u8 *ptr, StackTrace *stack, AllocType alloc_type) { - if (!ptr) return; - CHECK(stack); - - if (flags()->debug) { - CHECK(malloc_info.FindPageGroup((uptr)ptr)); - } - - // Printf("Deallocate %p\n", ptr); - AsanChunk *m = PtrToChunk((uptr)ptr); - - // Flip the chunk_state atomically to avoid race on double-free. - u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE, - memory_order_acq_rel); - - if (old_chunk_state == CHUNK_QUARANTINE) { - ReportDoubleFree((uptr)ptr, stack); - } else if (old_chunk_state != CHUNK_ALLOCATED) { - ReportFreeNotMalloced((uptr)ptr, stack); - } - CHECK(old_chunk_state == CHUNK_ALLOCATED); - if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) - ReportAllocTypeMismatch((uptr)ptr, stack, - (AllocType)m->alloc_type, (AllocType)alloc_type); - // With REDZONE==16 m->next is in the user area, otherwise it should be 0. - CHECK(REDZONE <= 16 || !m->next); - CHECK(m->free_tid == kInvalidTid); - CHECK(m->alloc_tid >= 0); - AsanThread *t = asanThreadRegistry().GetCurrent(); - m->free_tid = t ? t->tid() : 0; - StackTrace::CompressStack(stack, m->compressed_free_stack(), - m->compressed_free_stack_size()); - uptr rounded_size = RoundUpTo(m->used_size, REDZONE); - PoisonShadow((uptr)ptr, rounded_size, kAsanHeapFreeMagic); - - // Statistics. - AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); - thread_stats.frees++; - thread_stats.freed += m->used_size; - thread_stats.freed_by_size[m->SizeClass()]++; - - CHECK(m->chunk_state == CHUNK_QUARANTINE); - - if (t) { - AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); - ms->quarantine_.Push(m); - - if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) { - malloc_info.SwallowThreadLocalMallocStorage(ms, false); - } - } else { - malloc_info.BypassThreadLocalQuarantine(m); - } -} - -static u8 *Reallocate(u8 *old_ptr, uptr new_size, - StackTrace *stack) { - CHECK(old_ptr && new_size); - - // Statistics. - AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); - thread_stats.reallocs++; - thread_stats.realloced += new_size; - - AsanChunk *m = PtrToChunk((uptr)old_ptr); - CHECK(m->chunk_state == CHUNK_ALLOCATED); - uptr old_size = m->used_size; - uptr memcpy_size = Min(new_size, old_size); - u8 *new_ptr = Allocate(0, new_size, stack, FROM_MALLOC); - if (new_ptr) { - CHECK(REAL(memcpy) != 0); - REAL(memcpy)(new_ptr, old_ptr, memcpy_size); - Deallocate(old_ptr, stack, FROM_MALLOC); - } - return new_ptr; -} - -} // namespace __asan - -#if !SANITIZER_SUPPORTS_WEAK_HOOKS -// Provide default (no-op) implementation of malloc hooks. -extern "C" { -SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE -void __asan_malloc_hook(void *ptr, uptr size) { - (void)ptr; - (void)size; -} -SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE -void __asan_free_hook(void *ptr) { - (void)ptr; -} -} // extern "C" -#endif - -namespace __asan { - -void PrintInternalAllocatorStats() { -} - -SANITIZER_INTERFACE_ATTRIBUTE -void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, - AllocType alloc_type) { - void *ptr = (void*)Allocate(alignment, size, stack, alloc_type); - ASAN_MALLOC_HOOK(ptr, size); - return ptr; -} - -SANITIZER_INTERFACE_ATTRIBUTE -void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) { - ASAN_FREE_HOOK(ptr); - Deallocate((u8*)ptr, stack, alloc_type); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void *asan_malloc(uptr size, StackTrace *stack) { - void *ptr = (void*)Allocate(0, size, stack, FROM_MALLOC); - ASAN_MALLOC_HOOK(ptr, size); - return ptr; -} - -void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { - void *ptr = (void*)Allocate(0, nmemb * size, stack, FROM_MALLOC); - if (ptr) - REAL(memset)(ptr, 0, nmemb * size); - ASAN_MALLOC_HOOK(ptr, size); - return ptr; -} - -void *asan_realloc(void *p, uptr size, StackTrace *stack) { - if (p == 0) { - void *ptr = (void*)Allocate(0, size, stack, FROM_MALLOC); - ASAN_MALLOC_HOOK(ptr, size); - return ptr; - } else if (size == 0) { - ASAN_FREE_HOOK(p); - Deallocate((u8*)p, stack, FROM_MALLOC); - return 0; - } - return Reallocate((u8*)p, size, stack); -} - -void *asan_valloc(uptr size, StackTrace *stack) { - void *ptr = (void*)Allocate(GetPageSizeCached(), size, stack, FROM_MALLOC); - ASAN_MALLOC_HOOK(ptr, size); - return ptr; -} - -void *asan_pvalloc(uptr size, StackTrace *stack) { - uptr PageSize = GetPageSizeCached(); - size = RoundUpTo(size, PageSize); - if (size == 0) { - // pvalloc(0) should allocate one page. - size = PageSize; - } - void *ptr = (void*)Allocate(PageSize, size, stack, FROM_MALLOC); - ASAN_MALLOC_HOOK(ptr, size); - return ptr; -} - -int asan_posix_memalign(void **memptr, uptr alignment, uptr size, - StackTrace *stack) { - void *ptr = Allocate(alignment, size, stack, FROM_MALLOC); - CHECK(IsAligned((uptr)ptr, alignment)); - ASAN_MALLOC_HOOK(ptr, size); - *memptr = ptr; - return 0; -} - -uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) { - CHECK(stack); - if (ptr == 0) return 0; - uptr usable_size = malloc_info.AllocationSize((uptr)ptr); - if (flags()->check_malloc_usable_size && (usable_size == 0)) { - ReportMallocUsableSizeNotOwned((uptr)ptr, stack); - } - return usable_size; -} - -uptr asan_mz_size(const void *ptr) { - return malloc_info.AllocationSize((uptr)ptr); -} - -void asan_mz_force_lock() { - malloc_info.ForceLock(); -} - -void asan_mz_force_unlock() { - malloc_info.ForceUnlock(); -} - -} // namespace __asan - -// ---------------------- Interface ---------------- {{{1 -using namespace __asan; // NOLINT - -// ASan allocator doesn't reserve extra bytes, so normally we would -// just return "size". -uptr __asan_get_estimated_allocated_size(uptr size) { - if (size == 0) return 1; - return Min(size, kMaxAllowedMallocSize); -} - -bool __asan_get_ownership(const void *p) { - return malloc_info.AllocationSize((uptr)p) > 0; -} - -uptr __asan_get_allocated_size(const void *p) { - if (p == 0) return 0; - uptr allocated_size = malloc_info.AllocationSize((uptr)p); - // Die if p is not malloced or if it is already freed. - if (allocated_size == 0) { - GET_STACK_TRACE_FATAL_HERE; - ReportAsanGetAllocatedSizeNotOwned((uptr)p, &stack); - } - return allocated_size; -} -#endif // ASAN_ALLOCATOR_VERSION diff --git a/lib/asan/asan_allocator.h b/lib/asan/asan_allocator.h index cca24edad81f..f817ce352ee2 100644 --- a/lib/asan/asan_allocator.h +++ b/lib/asan/asan_allocator.h @@ -9,7 +9,7 @@ // // This file is a part of AddressSanitizer, an address sanity checker. // -// ASan-private header for asan_allocator.cc. +// ASan-private header for asan_allocator2.cc. //===----------------------------------------------------------------------===// #ifndef ASAN_ALLOCATOR_H @@ -19,18 +19,6 @@ #include "asan_interceptors.h" #include "sanitizer_common/sanitizer_list.h" -// We are in the process of transitioning from the old allocator (version 1) -// to a new one (version 2). The change is quite intrusive so both allocators -// will co-exist in the source base for a while. The actual allocator is chosen -// at build time by redefining this macro. -#ifndef ASAN_ALLOCATOR_VERSION -# if ASAN_LINUX && !ASAN_ANDROID -# define ASAN_ALLOCATOR_VERSION 2 -# else -# define ASAN_ALLOCATOR_VERSION 1 -# endif -#endif // ASAN_ALLOCATOR_VERSION - namespace __asan { enum AllocType { @@ -42,6 +30,8 @@ enum AllocType { static const uptr kNumberOfSizeClasses = 255; struct AsanChunk; +void InitializeAllocator(); + class AsanChunkView { public: explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {} @@ -53,14 +43,14 @@ class AsanChunkView { uptr FreeTid(); void GetAllocStack(StackTrace *stack); void GetFreeStack(StackTrace *stack); - bool AddrIsInside(uptr addr, uptr access_size, uptr *offset) { + bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) { if (addr >= Beg() && (addr + access_size) <= End()) { *offset = addr - Beg(); return true; } return false; } - bool AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) { + bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) { (void)access_size; if (addr < Beg()) { *offset = Beg() - addr; @@ -68,12 +58,9 @@ class AsanChunkView { } return false; } - bool AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) { - if (addr + access_size >= End()) { - if (addr <= End()) - *offset = 0; - else - *offset = addr - End(); + bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) { + if (addr + access_size > End()) { + *offset = addr - End(); return true; } return false; @@ -104,109 +91,17 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> { struct AsanThreadLocalMallocStorage { explicit AsanThreadLocalMallocStorage(LinkerInitialized x) -#if ASAN_ALLOCATOR_VERSION == 1 - : quarantine_(x) -#endif { } AsanThreadLocalMallocStorage() { CHECK(REAL(memset)); REAL(memset)(this, 0, sizeof(AsanThreadLocalMallocStorage)); } -#if ASAN_ALLOCATOR_VERSION == 1 - AsanChunkFifoList quarantine_; - AsanChunk *free_lists_[kNumberOfSizeClasses]; -#else uptr quarantine_cache[16]; uptr allocator2_cache[96 * (512 * 8 + 16)]; // Opaque. -#endif void CommitBack(); }; -// Fake stack frame contains local variables of one function. -// This struct should fit into a stack redzone (32 bytes). -struct FakeFrame { - uptr magic; // Modified by the instrumented code. - uptr descr; // Modified by the instrumented code. - FakeFrame *next; - u64 real_stack : 48; - u64 size_minus_one : 16; -}; - -struct FakeFrameFifo { - public: - void FifoPush(FakeFrame *node); - FakeFrame *FifoPop(); - private: - FakeFrame *first_, *last_; -}; - -class FakeFrameLifo { - public: - void LifoPush(FakeFrame *node) { - node->next = top_; - top_ = node; - } - void LifoPop() { - CHECK(top_); - top_ = top_->next; - } - FakeFrame *top() { return top_; } - private: - FakeFrame *top_; -}; - -// For each thread we create a fake stack and place stack objects on this fake -// stack instead of the real stack. The fake stack is not really a stack but -// a fast malloc-like allocator so that when a function exits the fake stack -// is not poped but remains there for quite some time until gets used again. -// So, we poison the objects on the fake stack when function returns. -// It helps us find use-after-return bugs. -// We can not rely on __asan_stack_free being called on every function exit, -// so we maintain a lifo list of all current fake frames and update it on every -// call to __asan_stack_malloc. -class FakeStack { - public: - FakeStack(); - explicit FakeStack(LinkerInitialized) {} - void Init(uptr stack_size); - void StopUsingFakeStack() { alive_ = false; } - void Cleanup(); - uptr AllocateStack(uptr size, uptr real_stack); - static void OnFree(uptr ptr, uptr size, uptr real_stack); - // Return the bottom of the maped region. - uptr AddrIsInFakeStack(uptr addr); - bool StackSize() { return stack_size_; } - - private: - static const uptr kMinStackFrameSizeLog = 9; // Min frame is 512B. - static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K. - static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog; - static const uptr kNumberOfSizeClasses = - kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1; - - bool AddrIsInSizeClass(uptr addr, uptr size_class); - - // Each size class should be large enough to hold all frames. - uptr ClassMmapSize(uptr size_class); - - uptr ClassSize(uptr size_class) { - return 1UL << (size_class + kMinStackFrameSizeLog); - } - - void DeallocateFrame(FakeFrame *fake_frame); - - uptr ComputeSizeClass(uptr alloc_size); - void AllocateOneSizeClass(uptr size_class); - - uptr stack_size_; - bool alive_; - - uptr allocated_size_classes_[kNumberOfSizeClasses]; - FakeFrameFifo size_classes_[kNumberOfSizeClasses]; - FakeFrameLifo call_stack_; -}; - void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, AllocType alloc_type); void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type); @@ -227,50 +122,5 @@ void asan_mz_force_unlock(); void PrintInternalAllocatorStats(); -// Log2 and RoundUpToPowerOfTwo should be inlined for performance. -#if defined(_WIN32) && !defined(__clang__) -extern "C" { -unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT -unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT -#if defined(_WIN64) -unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT -unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT -#endif -} -#endif - -static inline uptr Log2(uptr x) { - CHECK(IsPowerOfTwo(x)); -#if !defined(_WIN32) || defined(__clang__) - return __builtin_ctzl(x); -#elif defined(_WIN64) - unsigned long ret; // NOLINT - _BitScanForward64(&ret, x); - return ret; -#else - unsigned long ret; // NOLINT - _BitScanForward(&ret, x); - return ret; -#endif -} - -static inline uptr RoundUpToPowerOfTwo(uptr size) { - CHECK(size); - if (IsPowerOfTwo(size)) return size; - - unsigned long up; // NOLINT -#if !defined(_WIN32) || defined(__clang__) - up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size); -#elif defined(_WIN64) - _BitScanReverse64(&up, size); -#else - _BitScanReverse(&up, size); -#endif - CHECK(size < (1ULL << (up + 1))); - CHECK(size > (1ULL << up)); - return 1UL << (up + 1); -} - - } // namespace __asan #endif // ASAN_ALLOCATOR_H diff --git a/lib/asan/asan_allocator2.cc b/lib/asan/asan_allocator2.cc index 42d8b29afd6b..d74aa553a288 100644 --- a/lib/asan/asan_allocator2.cc +++ b/lib/asan/asan_allocator2.cc @@ -13,21 +13,20 @@ // This variant uses the allocator from sanitizer_common, i.e. the one shared // with ThreadSanitizer and MemorySanitizer. // -// Status: under development, not enabled by default yet. //===----------------------------------------------------------------------===// #include "asan_allocator.h" -#if ASAN_ALLOCATOR_VERSION == 2 #include "asan_mapping.h" +#include "asan_poisoning.h" #include "asan_report.h" #include "asan_thread.h" -#include "asan_thread_registry.h" -#include "sanitizer/asan_interface.h" #include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_list.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_quarantine.h" +#include "lsan/lsan_common.h" namespace __asan { @@ -35,7 +34,7 @@ struct AsanMapUnmapCallback { void OnMap(uptr p, uptr size) const { PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); // Statistics. - AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.mmaps++; thread_stats.mmaped += size; } @@ -50,23 +49,32 @@ struct AsanMapUnmapCallback { uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size); FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); // Statistics. - AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.munmaps++; thread_stats.munmaped += size; } }; #if SANITIZER_WORDSIZE == 64 +#if defined(__powerpc64__) +const uptr kAllocatorSpace = 0xa0000000000ULL; +const uptr kAllocatorSize = 0x20000000000ULL; // 2T. +#else const uptr kAllocatorSpace = 0x600000000000ULL; -const uptr kAllocatorSize = 0x10000000000ULL; // 1T. +const uptr kAllocatorSize = 0x40000000000ULL; // 4T. +#endif typedef DefaultSizeClassMap SizeClassMap; typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/, SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; #elif SANITIZER_WORDSIZE == 32 static const u64 kAddressSpaceSize = 1ULL << 32; typedef CompactSizeClassMap SizeClassMap; +static const uptr kRegionSizeLog = 20; +static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog; typedef SizeClassAllocator32<0, kAddressSpaceSize, 16, - SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator; + SizeClassMap, kRegionSizeLog, + FlatByteMap<kFlatByteMapSize>, + AsanMapUnmapCallback> PrimaryAllocator; #endif typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; @@ -91,8 +99,6 @@ static const uptr kMaxAllowedMallocSize = static const uptr kMaxThreadLocalQuarantine = FIRST_32_SECOND_64(1 << 18, 1 << 20); -static const uptr kReturnOnZeroMalloc = 2048; // Zero page is protected. - // Every chunk of memory allocated by this allocator can be in one of 3 states: // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. // CHUNK_ALLOCATED: the chunk is allocated and not yet freed. @@ -114,7 +120,7 @@ static u32 RZSize2Log(u32 rz_size) { CHECK_GE(rz_size, 16); CHECK_LE(rz_size, 2048); CHECK(IsPowerOfTwo(rz_size)); - u32 res = __builtin_ctz(rz_size) - 4; + u32 res = Log2(rz_size) - 4; CHECK_EQ(rz_size, RZLog2Size(res)); return res; } @@ -158,6 +164,7 @@ struct ChunkHeader { u32 from_memalign : 1; u32 alloc_type : 2; u32 rz_log : 3; + u32 lsan_tag : 2; // 2-nd 8 bytes // This field is used for small sizes. For large sizes it is equal to // SizeClassMap::kMaxSize and the actual size is stored in the @@ -168,7 +175,6 @@ struct ChunkHeader { struct ChunkBase : ChunkHeader { // Header2, intersects with user memory. - AsanChunk *next; u32 free_context_id; }; @@ -189,7 +195,8 @@ struct AsanChunk: ChunkBase { return allocator.GetBlockBegin(reinterpret_cast<void *>(this)); return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); } - // We store the alloc/free stack traces in the chunk itself. + // If we don't use stack depot, we store the alloc/free stack traces + // in the chunk itself. u32 *AllocStackBeg() { return (u32*)(Beg() - RZLog2Size(rz_log)); } @@ -205,6 +212,9 @@ struct AsanChunk: ChunkBase { uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY); return (available - kChunkHeader2Size) / sizeof(u32); } + bool AddrIsInside(uptr addr) { + return (addr >= Beg()) && (addr < Beg() + UsedSize()); + } }; uptr AsanChunkView::Beg() { return chunk_->Beg(); } @@ -258,8 +268,8 @@ struct QuarantineCallback { } void Recycle(AsanChunk *m) { - CHECK(m->chunk_state == CHUNK_QUARANTINE); - m->chunk_state = CHUNK_AVAILABLE; + CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); + atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); CHECK_NE(m->alloc_tid, kInvalidTid); CHECK_NE(m->free_tid, kInvalidTid); PoisonShadow(m->Beg(), @@ -273,7 +283,7 @@ struct QuarantineCallback { } // Statistics. - AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.real_frees++; thread_stats.really_freed += m->UsedSize(); @@ -291,34 +301,32 @@ struct QuarantineCallback { AllocatorCache *cache_; }; -static void Init() { - static int inited = 0; - if (inited) return; - __asan_init(); - inited = true; // this must happen before any threads are created. +void InitializeAllocator() { allocator.Init(); quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); } static void *Allocate(uptr size, uptr alignment, StackTrace *stack, - AllocType alloc_type) { - Init(); + AllocType alloc_type, bool can_fill) { + if (!asan_inited) + __asan_init(); + Flags &fl = *flags(); CHECK(stack); const uptr min_alignment = SHADOW_GRANULARITY; if (alignment < min_alignment) alignment = min_alignment; if (size == 0) { - if (alignment <= kReturnOnZeroMalloc) - return reinterpret_cast<void *>(kReturnOnZeroMalloc); - else - return 0; // 0 bytes with large alignment requested. Just return 0. + // We'd be happy to avoid allocating memory for zero-size requests, but + // some programs/tests depend on this behavior and assume that malloc would + // not return NULL even for zero-size allocations. Moreover, it looks like + // operator new should never return NULL, and results of consecutive "new" + // calls must be different even if the allocated size is zero. + size = 1; } CHECK(IsPowerOfTwo(alignment)); uptr rz_log = ComputeRZLog(size); uptr rz_size = RZLog2Size(rz_log); - uptr rounded_size = RoundUpTo(size, alignment); - if (rounded_size < kChunkHeader2Size) - rounded_size = kChunkHeader2Size; + uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); uptr needed_size = rounded_size + rz_size; if (alignment > min_alignment) needed_size += alignment; @@ -336,7 +344,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack, return 0; } - AsanThread *t = asanThreadRegistry().GetCurrent(); + AsanThread *t = GetCurrentThread(); void *allocated; if (t) { AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); @@ -358,7 +366,6 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack, CHECK_LE(user_end, alloc_end); uptr chunk_beg = user_beg - kChunkHeaderSize; AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); - m->chunk_state = CHUNK_ALLOCATED; m->alloc_type = alloc_type; m->rz_log = rz_log; u32 alloc_tid = t ? t->tid() : 0; @@ -384,7 +391,7 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack, meta[1] = chunk_beg; } - if (flags()->use_stack_depot) { + if (fl.use_stack_depot) { m->alloc_context_id = StackDepotPut(stack->trace, stack->size); } else { m->alloc_context_id = 0; @@ -396,12 +403,12 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack, if (size_rounded_down_to_granularity) PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); // Deal with the end of the region if size is not aligned to granularity. - if (size != size_rounded_down_to_granularity && flags()->poison_heap) { + if (size != size_rounded_down_to_granularity && fl.poison_heap) { u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); *shadow = size & (SHADOW_GRANULARITY - 1); } - AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.mallocs++; thread_stats.malloced += size; thread_stats.malloced_redzones += needed_size - size; @@ -411,25 +418,42 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack, thread_stats.malloc_large++; void *res = reinterpret_cast<void *>(user_beg); + if (can_fill && fl.max_malloc_fill_size) { + uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); + REAL(memset)(res, fl.malloc_fill_byte, fill_size); + } + // Must be the last mutation of metadata in this function. + atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); ASAN_MALLOC_HOOK(res, size); return res; } -static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { - uptr p = reinterpret_cast<uptr>(ptr); - if (p == 0 || p == kReturnOnZeroMalloc) return; - uptr chunk_beg = p - kChunkHeaderSize; - AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); - +static void AtomicallySetQuarantineFlag(AsanChunk *m, + void *ptr, StackTrace *stack) { + u8 old_chunk_state = CHUNK_ALLOCATED; // Flip the chunk_state atomically to avoid race on double-free. - u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE, - memory_order_relaxed); - - if (old_chunk_state == CHUNK_QUARANTINE) - ReportDoubleFree((uptr)ptr, stack); - else if (old_chunk_state != CHUNK_ALLOCATED) - ReportFreeNotMalloced((uptr)ptr, stack); - CHECK(old_chunk_state == CHUNK_ALLOCATED); + if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state, + CHUNK_QUARANTINE, memory_order_acquire)) { + if (old_chunk_state == CHUNK_QUARANTINE) + ReportDoubleFree((uptr)ptr, stack); + else + ReportFreeNotMalloced((uptr)ptr, stack); + } + CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); +} + +// Expects the chunk to already be marked as quarantined by using +// AtomicallySetQuarantineFlag. +static void QuarantineChunk(AsanChunk *m, void *ptr, + StackTrace *stack, AllocType alloc_type) { + CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); + + // FIXME: if the free hook produces an ASan report (e.g. due to a bug), + // printing the report may crash as the AsanChunk free-related fields have not + // been updated yet. We might need to introduce yet another chunk state to + // handle this correctly, but don't want to yet. + ASAN_FREE_HOOK(ptr); + if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type, (AllocType)alloc_type); @@ -437,7 +461,7 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { CHECK_GE(m->alloc_tid, 0); if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. CHECK_EQ(m->free_tid, kInvalidTid); - AsanThread *t = asanThreadRegistry().GetCurrent(); + AsanThread *t = GetCurrentThread(); m->free_tid = t ? t->tid() : 0; if (flags()->use_stack_depot) { m->free_context_id = StackDepotPut(stack->trace, stack->size); @@ -445,13 +469,12 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { m->free_context_id = 0; StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize()); } - CHECK(m->chunk_state == CHUNK_QUARANTINE); // Poison the region. PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), kAsanHeapFreeMagic); - AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.frees++; thread_stats.freed += m->UsedSize(); @@ -467,8 +490,17 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m, m->UsedSize()); } +} - ASAN_FREE_HOOK(ptr); +static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) { + uptr p = reinterpret_cast<uptr>(ptr); + if (p == 0) return; + + uptr chunk_beg = p - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); + // Must mark the chunk as quarantined before any changes to its metadata. + AtomicallySetQuarantineFlag(m, ptr, stack); + QuarantineChunk(m, ptr, stack, alloc_type); } static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) { @@ -477,18 +509,21 @@ static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) { uptr chunk_beg = p - kChunkHeaderSize; AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); - AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats(); + AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.reallocs++; thread_stats.realloced += new_size; - CHECK(m->chunk_state == CHUNK_ALLOCATED); + // Must mark the chunk as quarantined before any changes to its metadata. + // This also ensures that other threads can't deallocate it in the meantime. + AtomicallySetQuarantineFlag(m, old_ptr, stack); + uptr old_size = m->UsedSize(); uptr memcpy_size = Min(new_size, old_size); - void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC); + void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); if (new_ptr) { - CHECK(REAL(memcpy) != 0); + CHECK_NE(REAL(memcpy), (void*)0); REAL(memcpy)(new_ptr, old_ptr, memcpy_size); - Deallocate(old_ptr, stack, FROM_MALLOC); + QuarantineChunk(m, old_ptr, stack, FROM_MALLOC); } return new_ptr; } @@ -548,7 +583,7 @@ AsanChunk *ChooseChunk(uptr addr, return right_chunk; } // Same chunk_state: choose based on offset. - uptr l_offset = 0, r_offset = 0; + sptr l_offset = 0, r_offset = 0; CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); if (l_offset < r_offset) @@ -559,7 +594,7 @@ AsanChunk *ChooseChunk(uptr addr, AsanChunkView FindHeapChunkByAddress(uptr addr) { AsanChunk *m1 = GetAsanChunkByAddr(addr); if (!m1) return AsanChunkView(m1); - uptr offset = 0; + sptr offset = 0; if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { // The address is in the chunk's left redzone, so maybe it is actually // a right buffer overflow from the other chunk to the left. @@ -589,7 +624,7 @@ void PrintInternalAllocatorStats() { SANITIZER_INTERFACE_ATTRIBUTE void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, AllocType alloc_type) { - return Allocate(size, alignment, stack, alloc_type); + return Allocate(size, alignment, stack, alloc_type, true); } SANITIZER_INTERFACE_ATTRIBUTE @@ -599,19 +634,22 @@ void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) { SANITIZER_INTERFACE_ATTRIBUTE void *asan_malloc(uptr size, StackTrace *stack) { - return Allocate(size, 8, stack, FROM_MALLOC); + return Allocate(size, 8, stack, FROM_MALLOC, true); } void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { - void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC); - if (ptr) + if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0; + void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); + // If the memory comes from the secondary allocator no need to clear it + // as it comes directly from mmap. + if (ptr && allocator.FromPrimary(ptr)) REAL(memset)(ptr, 0, nmemb * size); return ptr; } void *asan_realloc(void *p, uptr size, StackTrace *stack) { if (p == 0) - return Allocate(size, 8, stack, FROM_MALLOC); + return Allocate(size, 8, stack, FROM_MALLOC, true); if (size == 0) { Deallocate(p, stack, FROM_MALLOC); return 0; @@ -620,7 +658,7 @@ void *asan_realloc(void *p, uptr size, StackTrace *stack) { } void *asan_valloc(uptr size, StackTrace *stack) { - return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC); + return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); } void *asan_pvalloc(uptr size, StackTrace *stack) { @@ -630,12 +668,12 @@ void *asan_pvalloc(uptr size, StackTrace *stack) { // pvalloc(0) should allocate one page. size = PageSize; } - return Allocate(size, PageSize, stack, FROM_MALLOC); + return Allocate(size, PageSize, stack, FROM_MALLOC, true); } int asan_posix_memalign(void **memptr, uptr alignment, uptr size, StackTrace *stack) { - void *ptr = Allocate(size, alignment, stack, FROM_MALLOC); + void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true); CHECK(IsAligned((uptr)ptr, alignment)); *memptr = ptr; return 0; @@ -651,20 +689,96 @@ uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) { } uptr asan_mz_size(const void *ptr) { - UNIMPLEMENTED(); - return 0; + return AllocationSize(reinterpret_cast<uptr>(ptr)); } void asan_mz_force_lock() { - UNIMPLEMENTED(); + allocator.ForceLock(); + fallback_mutex.Lock(); } void asan_mz_force_unlock() { - UNIMPLEMENTED(); + fallback_mutex.Unlock(); + allocator.ForceUnlock(); } } // namespace __asan +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { +void LockAllocator() { + __asan::allocator.ForceLock(); +} + +void UnlockAllocator() { + __asan::allocator.ForceUnlock(); +} + +void GetAllocatorGlobalRange(uptr *begin, uptr *end) { + *begin = (uptr)&__asan::allocator; + *end = *begin + sizeof(__asan::allocator); +} + +void *PointsIntoChunk(void* p) { + uptr addr = reinterpret_cast<uptr>(p); + __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr); + if (!m) return 0; + uptr chunk = m->Beg(); + if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) + return reinterpret_cast<void *>(chunk); + return 0; +} + +void *GetUserBegin(void *p) { + __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(reinterpret_cast<uptr>(p)); + CHECK(m); + return reinterpret_cast<void *>(m->Beg()); +} + +LsanMetadata::LsanMetadata(void *chunk) { + uptr addr = reinterpret_cast<uptr>(chunk); + metadata_ = reinterpret_cast<void *>(addr - __asan::kChunkHeaderSize); +} + +bool LsanMetadata::allocated() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->chunk_state == __asan::CHUNK_ALLOCATED; +} + +ChunkTag LsanMetadata::tag() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return static_cast<ChunkTag>(m->lsan_tag); +} + +void LsanMetadata::set_tag(ChunkTag value) { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + m->lsan_tag = value; +} + +uptr LsanMetadata::requested_size() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->UsedSize(); +} + +u32 LsanMetadata::stack_trace_id() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->alloc_context_id; +} + +template <typename Callable> void ForEachChunk(Callable const &callback) { + __asan::allocator.ForEachChunk(callback); +} +#if CAN_SANITIZE_LEAKS +template void ForEachChunk<ProcessPlatformSpecificAllocationsCb>( + ProcessPlatformSpecificAllocationsCb const &callback); +template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback); +template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback); +template void ForEachChunk<MarkIndirectlyLeakedCb>( + MarkIndirectlyLeakedCb const &callback); +template void ForEachChunk<ClearTagCb>(ClearTagCb const &callback); +#endif // CAN_SANITIZE_LEAKS +} // namespace __lsan + // ---------------------- Interface ---------------- {{{1 using namespace __asan; // NOLINT @@ -676,7 +790,7 @@ uptr __asan_get_estimated_allocated_size(uptr size) { bool __asan_get_ownership(const void *p) { uptr ptr = reinterpret_cast<uptr>(p); - return (ptr == kReturnOnZeroMalloc) || (AllocationSize(ptr) > 0); + return (AllocationSize(ptr) > 0); } uptr __asan_get_allocated_size(const void *p) { @@ -684,7 +798,7 @@ uptr __asan_get_allocated_size(const void *p) { uptr ptr = reinterpret_cast<uptr>(p); uptr allocated_size = AllocationSize(ptr); // Die if p is not malloced or if it is already freed. - if (allocated_size == 0 && ptr != kReturnOnZeroMalloc) { + if (allocated_size == 0) { GET_STACK_TRACE_FATAL_HERE; ReportAsanGetAllocatedSizeNotOwned(ptr, &stack); } @@ -705,6 +819,3 @@ void __asan_free_hook(void *ptr) { } } // extern "C" #endif - - -#endif // ASAN_ALLOCATOR_VERSION diff --git a/lib/asan/asan_blacklist.txt b/lib/asan/asan_blacklist.txt new file mode 100644 index 000000000000..03da08598d23 --- /dev/null +++ b/lib/asan/asan_blacklist.txt @@ -0,0 +1,10 @@ +# Blacklist for AddressSanitizer. Turns off instrumentation of particular +# functions or sources. Use with care. You may set location of blacklist +# at compile-time using -fsanitize-blacklist=<path> flag. + +# Example usage: +# fun:*bad_function_name* +# src:file_with_tricky_code.cc +# global:*global_with_bad_access_or_initialization* +# global-init:*global_with_initialization_issues* +# global-init-type:*Namespace::ClassName* diff --git a/lib/asan/asan_fake_stack.cc b/lib/asan/asan_fake_stack.cc index 7c5a16312d46..23eebe64e612 100644 --- a/lib/asan/asan_fake_stack.cc +++ b/lib/asan/asan_fake_stack.cc @@ -12,14 +12,13 @@ // FakeStack is used to detect use-after-return bugs. //===----------------------------------------------------------------------===// #include "asan_allocator.h" +#include "asan_poisoning.h" #include "asan_thread.h" -#include "asan_thread_registry.h" -#include "sanitizer/asan_interface.h" namespace __asan { FakeStack::FakeStack() { - CHECK(REAL(memset) != 0); + CHECK(REAL(memset)); REAL(memset)(this, 0, sizeof(*this)); } @@ -31,24 +30,26 @@ bool FakeStack::AddrIsInSizeClass(uptr addr, uptr size_class) { } uptr FakeStack::AddrIsInFakeStack(uptr addr) { - for (uptr i = 0; i < kNumberOfSizeClasses; i++) { - if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i]; + for (uptr size_class = 0; size_class < kNumberOfSizeClasses; size_class++) { + if (!AddrIsInSizeClass(addr, size_class)) continue; + uptr size_class_first_ptr = allocated_size_classes_[size_class]; + uptr size = ClassSize(size_class); + CHECK_LE(size_class_first_ptr, addr); + CHECK_GT(size_class_first_ptr + ClassMmapSize(size_class), addr); + return size_class_first_ptr + ((addr - size_class_first_ptr) / size) * size; } return 0; } // We may want to compute this during compilation. -inline uptr FakeStack::ComputeSizeClass(uptr alloc_size) { +ALWAYS_INLINE uptr FakeStack::ComputeSizeClass(uptr alloc_size) { uptr rounded_size = RoundUpToPowerOfTwo(alloc_size); uptr log = Log2(rounded_size); - CHECK(alloc_size <= (1UL << log)); - if (!(alloc_size > (1UL << (log-1)))) { - Printf("alloc_size %zu log %zu\n", alloc_size, log); - } - CHECK(alloc_size > (1UL << (log-1))); + CHECK_LE(alloc_size, (1UL << log)); + CHECK_GT(alloc_size, (1UL << (log-1))); uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog; - CHECK(res < kNumberOfSizeClasses); - CHECK(ClassSize(res) >= rounded_size); + CHECK_LT(res, kNumberOfSizeClasses); + CHECK_GE(ClassSize(res), rounded_size); return res; } @@ -104,7 +105,7 @@ void FakeStack::AllocateOneSizeClass(uptr size_class) { uptr new_mem = (uptr)MmapOrDie( ClassMmapSize(size_class), __FUNCTION__); // Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n", - // asanThreadRegistry().GetCurrent()->tid(), + // GetCurrentThread()->tid(), // size_class, new_mem, new_mem + ClassMmapSize(size_class), // ClassMmapSize(size_class)); uptr i; @@ -116,7 +117,7 @@ void FakeStack::AllocateOneSizeClass(uptr size_class) { allocated_size_classes_[size_class] = new_mem; } -uptr FakeStack::AllocateStack(uptr size, uptr real_stack) { +ALWAYS_INLINE uptr FakeStack::AllocateStack(uptr size, uptr real_stack) { if (!alive_) return real_stack; CHECK(size <= kMaxStackMallocSize && size > 1); uptr size_class = ComputeSizeClass(size); @@ -138,7 +139,7 @@ uptr FakeStack::AllocateStack(uptr size, uptr real_stack) { return ptr; } -void FakeStack::DeallocateFrame(FakeFrame *fake_frame) { +ALWAYS_INLINE void FakeStack::DeallocateFrame(FakeFrame *fake_frame) { CHECK(alive_); uptr size = fake_frame->size_minus_one + 1; uptr size_class = ComputeSizeClass(size); @@ -149,11 +150,11 @@ void FakeStack::DeallocateFrame(FakeFrame *fake_frame) { size_classes_[size_class].FifoPush(fake_frame); } -void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) { +ALWAYS_INLINE void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) { FakeFrame *fake_frame = (FakeFrame*)ptr; - CHECK(fake_frame->magic = kRetiredStackFrameMagic); - CHECK(fake_frame->descr != 0); - CHECK(fake_frame->size_minus_one == size - 1); + CHECK_EQ(fake_frame->magic, kRetiredStackFrameMagic); + CHECK_NE(fake_frame->descr, 0); + CHECK_EQ(fake_frame->size_minus_one, size - 1); PoisonShadow(ptr, size, kAsanStackAfterReturnMagic); } @@ -164,7 +165,7 @@ using namespace __asan; // NOLINT uptr __asan_stack_malloc(uptr size, uptr real_stack) { if (!flags()->use_fake_stack) return real_stack; - AsanThread *t = asanThreadRegistry().GetCurrent(); + AsanThread *t = GetCurrentThread(); if (!t) { // TSD is gone, use the real stack. return real_stack; diff --git a/lib/asan/asan_fake_stack.h b/lib/asan/asan_fake_stack.h new file mode 100644 index 000000000000..308b4c571832 --- /dev/null +++ b/lib/asan/asan_fake_stack.h @@ -0,0 +1,117 @@ +//===-- asan_fake_stack.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_fake_stack.cc +//===----------------------------------------------------------------------===// + +#ifndef ASAN_FAKE_STACK_H +#define ASAN_FAKE_STACK_H + +namespace __asan { + +// Fake stack frame contains local variables of one function. +struct FakeFrame { + uptr magic; // Modified by the instrumented code. + uptr descr; // Modified by the instrumented code. + uptr pc; // Modified by the instrumented code. + u64 real_stack : 48; + u64 size_minus_one : 16; + // End of the first 32 bytes. + // The rest should not be used when the frame is active. + FakeFrame *next; +}; + +struct FakeFrameFifo { + public: + void FifoPush(FakeFrame *node); + FakeFrame *FifoPop(); + private: + FakeFrame *first_, *last_; +}; + +template<uptr kMaxNumberOfFrames> +class FakeFrameLifo { + public: + explicit FakeFrameLifo(LinkerInitialized) {} + FakeFrameLifo() : n_frames_(0) {} + void LifoPush(FakeFrame *node) { + CHECK_LT(n_frames_, kMaxNumberOfFrames); + frames_[n_frames_++] = node; + } + void LifoPop() { + CHECK(n_frames_); + n_frames_--; + } + FakeFrame *top() { + if (n_frames_ == 0) + return 0; + return frames_[n_frames_ - 1]; + } + private: + uptr n_frames_; + FakeFrame *frames_[kMaxNumberOfFrames]; +}; + +// For each thread we create a fake stack and place stack objects on this fake +// stack instead of the real stack. The fake stack is not really a stack but +// a fast malloc-like allocator so that when a function exits the fake stack +// is not poped but remains there for quite some time until gets used again. +// So, we poison the objects on the fake stack when function returns. +// It helps us find use-after-return bugs. +// We can not rely on __asan_stack_free being called on every function exit, +// so we maintain a lifo list of all current fake frames and update it on every +// call to __asan_stack_malloc. +class FakeStack { + public: + FakeStack(); + explicit FakeStack(LinkerInitialized x) : call_stack_(x) {} + void Init(uptr stack_size); + void StopUsingFakeStack() { alive_ = false; } + void Cleanup(); + uptr AllocateStack(uptr size, uptr real_stack); + static void OnFree(uptr ptr, uptr size, uptr real_stack); + // Return the bottom of the maped region. + uptr AddrIsInFakeStack(uptr addr); + bool StackSize() { return stack_size_; } + + private: + static const uptr kMinStackFrameSizeLog = 9; // Min frame is 512B. + static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K. + static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog; + static const uptr kNumberOfSizeClasses = + kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1; + static const uptr kMaxRecursionDepth = 1023; + + bool AddrIsInSizeClass(uptr addr, uptr size_class); + + // Each size class should be large enough to hold all frames. + uptr ClassMmapSize(uptr size_class); + + uptr ClassSize(uptr size_class) { + return 1UL << (size_class + kMinStackFrameSizeLog); + } + + void DeallocateFrame(FakeFrame *fake_frame); + + uptr ComputeSizeClass(uptr alloc_size); + void AllocateOneSizeClass(uptr size_class); + + uptr stack_size_; + bool alive_; + + uptr allocated_size_classes_[kNumberOfSizeClasses]; + FakeFrameFifo size_classes_[kNumberOfSizeClasses]; + FakeFrameLifo<kMaxRecursionDepth> call_stack_; +}; + +} // namespace __asan + +#endif // ASAN_FAKE_STACK_H diff --git a/lib/asan/asan_flags.h b/lib/asan/asan_flags.h index d7b21ea4a45f..2f3bc9051ae1 100644 --- a/lib/asan/asan_flags.h +++ b/lib/asan/asan_flags.h @@ -15,13 +15,15 @@ #ifndef ASAN_FLAGS_H #define ASAN_FLAGS_H -#include "sanitizer/common_interface_defs.h" +#include "sanitizer_common/sanitizer_internal_defs.h" -// ASan flag values can be defined in three ways: +// ASan flag values can be defined in four ways: // 1) initialized with default values at startup. -// 2) overriden from string returned by user-specified function +// 2) overriden during compilation of ASan runtime by providing +// compile definition ASAN_DEFAULT_OPTIONS. +// 3) overriden from string returned by user-specified function // __asan_default_options(). -// 3) overriden from env variable ASAN_OPTIONS. +// 4) overriden from env variable ASAN_OPTIONS. namespace __asan { @@ -30,8 +32,6 @@ struct Flags { // Lower value may reduce memory usage but increase the chance of // false negatives. int quarantine_size; - // If set, uses in-process symbolizer from common sanitizer runtime. - bool symbolize; // Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output). int verbosity; // Size (in bytes) of redzones around heap objects. @@ -45,22 +45,18 @@ struct Flags { int report_globals; // If set, attempts to catch initialization order issues. bool check_initialization_order; - // Max number of stack frames kept for each allocation/deallocation. - int malloc_context_size; // If set, uses custom wrappers and replacements for libc string functions // to find more errors. bool replace_str; // If set, uses custom wrappers for memset/memcpy/memmove intinsics. bool replace_intrin; - // Used on Mac only. See comments in asan_mac.cc and asan_malloc_mac.cc. - bool replace_cfallocator; // Used on Mac only. bool mac_ignore_invalid_free; - // ASan allocator flag. See asan_allocator.cc. + // ASan allocator flag. bool use_fake_stack; - // ASan allocator flag. Sets the maximal size of allocation request - // that would return memory filled with zero bytes. - int max_malloc_fill_size; + // ASan allocator flag. max_malloc_fill_size is the maximal amount of bytes + // that will be filled with malloc_fill_byte on malloc. + int max_malloc_fill_size, malloc_fill_byte; // Override exit status if something was reported. int exitcode; // If set, user may manually mark memory regions as poisoned or unpoisoned. @@ -71,6 +67,8 @@ struct Flags { int sleep_before_dying; // If set, registers ASan custom segv handler. bool handle_segv; + // If set, allows user register segv handler even if ASan registers one. + bool allow_user_segv_handler; // If set, uses alternate stack for signal handling. bool use_sigaltstack; // Allow the users to work around the bug in Nvidia drivers prior to 295.*. @@ -79,6 +77,10 @@ struct Flags { bool unmap_shadow_on_exit; // If set, calls abort() instead of _exit() after printing an error report. bool abort_on_error; + // Print various statistics after printing an error message or if atexit=1. + bool print_stats; + // Print the legend for the shadow bytes. + bool print_legend; // If set, prints ASan exit stats even after program terminates successfully. bool atexit; // By default, disable core dumper on 64-bit - it makes little sense @@ -87,18 +89,12 @@ struct Flags { // Allow the tool to re-exec the program. This may interfere badly with the // debugger. bool allow_reexec; - // Strips this prefix from file paths in error reports. - const char *strip_path_prefix; // If set, prints not only thread creation stacks for threads in error report, // but also thread creation stacks for threads that created those threads, // etc. up to main thread. bool print_full_thread_history; // ASan will write logs to "log_path.pid" instead of stderr. const char *log_path; - // Use fast (frame-pointer-based) unwinder on fatal errors (if available). - bool fast_unwind_on_fatal; - // Use fast (frame-pointer-based) unwinder on malloc/free (if available). - bool fast_unwind_on_malloc; // Poison (or not) the heap memory on [de]allocation. Zero value is useful // for benchmarking the allocator or instrumentator. bool poison_heap; @@ -106,9 +102,20 @@ struct Flags { bool alloc_dealloc_mismatch; // Use stack depot instead of storing stacks in the redzones. bool use_stack_depot; + // If true, assume that memcmp(p1, p2, n) always reads n bytes before + // comparing p1 and p2. + bool strict_memcmp; + // If true, assume that dynamic initializers can never access globals from + // other modules, even if the latter are already initialized. + bool strict_init_order; + // Invoke LeakSanitizer at process exit. + bool detect_leaks; }; -Flags *flags(); +extern Flags asan_flags_dont_use_directly; +inline Flags *flags() { + return &asan_flags_dont_use_directly; +} void InitializeFlags(Flags *f, const char *env); } // namespace __asan diff --git a/lib/asan/asan_globals.cc b/lib/asan/asan_globals.cc index 4e18bb8e2355..301ea44f2ca5 100644 --- a/lib/asan/asan_globals.cc +++ b/lib/asan/asan_globals.cc @@ -14,12 +14,14 @@ #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_mapping.h" +#include "asan_poisoning.h" #include "asan_report.h" #include "asan_stack.h" #include "asan_stats.h" #include "asan_thread.h" -#include "sanitizer/asan_interface.h" +#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_mutex.h" +#include "sanitizer_common/sanitizer_placement_new.h" namespace __asan { @@ -33,38 +35,48 @@ struct ListOfGlobals { static BlockingMutex mu_for_globals(LINKER_INITIALIZED); static LowLevelAllocator allocator_for_globals; static ListOfGlobals *list_of_all_globals; -static ListOfGlobals *list_of_dynamic_init_globals; - -void PoisonRedZones(const Global &g) { - uptr shadow_rz_size = kGlobalAndStackRedzone >> SHADOW_SCALE; - CHECK(shadow_rz_size == 1 || shadow_rz_size == 2 || shadow_rz_size == 4); - // full right redzone - uptr g_aligned_size = kGlobalAndStackRedzone * - ((g.size + kGlobalAndStackRedzone - 1) / kGlobalAndStackRedzone); - PoisonShadow(g.beg + g_aligned_size, - kGlobalAndStackRedzone, kAsanGlobalRedzoneMagic); - if ((g.size % kGlobalAndStackRedzone) != 0) { - // partial right redzone - u64 g_aligned_down_size = kGlobalAndStackRedzone * - (g.size / kGlobalAndStackRedzone); - CHECK(g_aligned_down_size == g_aligned_size - kGlobalAndStackRedzone); - PoisonShadowPartialRightRedzone(g.beg + g_aligned_down_size, - g.size % kGlobalAndStackRedzone, - kGlobalAndStackRedzone, - kAsanGlobalRedzoneMagic); + +static const int kDynamicInitGlobalsInitialCapacity = 512; +struct DynInitGlobal { + Global g; + bool initialized; +}; +typedef InternalVector<DynInitGlobal> VectorOfGlobals; +// Lazy-initialized and never deleted. +static VectorOfGlobals *dynamic_init_globals; + +ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) { + FastPoisonShadow(g->beg, g->size_with_redzone, value); +} + +ALWAYS_INLINE void PoisonRedZones(const Global &g) { + uptr aligned_size = RoundUpTo(g.size, SHADOW_GRANULARITY); + FastPoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size, + kAsanGlobalRedzoneMagic); + if (g.size != aligned_size) { + FastPoisonShadowPartialRightRedzone( + g.beg + RoundDownTo(g.size, SHADOW_GRANULARITY), + g.size % SHADOW_GRANULARITY, + SHADOW_GRANULARITY, + kAsanGlobalRedzoneMagic); } } -bool DescribeAddressIfGlobal(uptr addr) { +static void ReportGlobal(const Global &g, const char *prefix) { + Report("%s Global: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu\n", + prefix, (void*)g.beg, g.size, g.size_with_redzone, g.name, + g.module_name, g.has_dynamic_init); +} + +bool DescribeAddressIfGlobal(uptr addr, uptr size) { if (!flags()->report_globals) return false; BlockingMutexLock lock(&mu_for_globals); bool res = false; for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { const Global &g = *l->g; if (flags()->report_globals >= 2) - Report("Search Global: beg=%p size=%zu name=%s\n", - (void*)g.beg, g.size, (char*)g.name); - res |= DescribeAddressRelativeToGlobal(addr, g); + ReportGlobal(g, "Search"); + res |= DescribeAddressRelativeToGlobal(addr, size, g); } return res; } @@ -75,24 +87,26 @@ bool DescribeAddressIfGlobal(uptr addr) { static void RegisterGlobal(const Global *g) { CHECK(asan_inited); if (flags()->report_globals >= 2) - Report("Added Global: beg=%p size=%zu/%zu name=%s dyn.init=%zu\n", - (void*)g->beg, g->size, g->size_with_redzone, g->name, - g->has_dynamic_init); + ReportGlobal(*g, "Added"); CHECK(flags()->report_globals); CHECK(AddrIsInMem(g->beg)); CHECK(AddrIsAlignedByGranularity(g->beg)); CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); - PoisonRedZones(*g); + if (flags()->poison_heap) + PoisonRedZones(*g); ListOfGlobals *l = (ListOfGlobals*)allocator_for_globals.Allocate(sizeof(ListOfGlobals)); l->g = g; l->next = list_of_all_globals; list_of_all_globals = l; if (g->has_dynamic_init) { - l = (ListOfGlobals*)allocator_for_globals.Allocate(sizeof(ListOfGlobals)); - l->g = g; - l->next = list_of_dynamic_init_globals; - list_of_dynamic_init_globals = l; + if (dynamic_init_globals == 0) { + void *mem = allocator_for_globals.Allocate(sizeof(VectorOfGlobals)); + dynamic_init_globals = new(mem) + VectorOfGlobals(kDynamicInitGlobalsInitialCapacity); + } + DynInitGlobal dyn_global = { *g, false }; + dynamic_init_globals->push_back(dyn_global); } } @@ -102,34 +116,26 @@ static void UnregisterGlobal(const Global *g) { CHECK(AddrIsInMem(g->beg)); CHECK(AddrIsAlignedByGranularity(g->beg)); CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); - PoisonShadow(g->beg, g->size_with_redzone, 0); + if (flags()->poison_heap) + PoisonShadowForGlobal(g, 0); // We unpoison the shadow memory for the global but we do not remove it from // the list because that would require O(n^2) time with the current list // implementation. It might not be worth doing anyway. } -// Poison all shadow memory for a single global. -static void PoisonGlobalAndRedzones(const Global *g) { - CHECK(asan_inited); - CHECK(flags()->check_initialization_order); - CHECK(AddrIsInMem(g->beg)); - CHECK(AddrIsAlignedByGranularity(g->beg)); - CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); - if (flags()->report_globals >= 3) - Printf("DynInitPoison : %s\n", g->name); - PoisonShadow(g->beg, g->size_with_redzone, kAsanInitializationOrderMagic); -} - -static void UnpoisonGlobal(const Global *g) { - CHECK(asan_inited); - CHECK(flags()->check_initialization_order); - CHECK(AddrIsInMem(g->beg)); - CHECK(AddrIsAlignedByGranularity(g->beg)); - CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); - if (flags()->report_globals >= 3) - Printf("DynInitUnpoison: %s\n", g->name); - PoisonShadow(g->beg, g->size_with_redzone, 0); - PoisonRedZones(*g); +void StopInitOrderChecking() { + BlockingMutexLock lock(&mu_for_globals); + if (!flags()->check_initialization_order || !dynamic_init_globals) + return; + flags()->check_initialization_order = false; + for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { + DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; + const Global *g = &dyn_g.g; + // Unpoison the whole global. + PoisonShadowForGlobal(g, 0); + // Poison redzones back. + PoisonRedZones(*g); + } } } // namespace __asan @@ -160,31 +166,47 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) { // when all dynamically initialized globals are unpoisoned. This method // poisons all global variables not defined in this TU, so that a dynamic // initializer can only touch global variables in the same TU. -void __asan_before_dynamic_init(uptr first_addr, uptr last_addr) { - if (!flags()->check_initialization_order) return; - CHECK(list_of_dynamic_init_globals); +void __asan_before_dynamic_init(const char *module_name) { + if (!flags()->check_initialization_order || + !flags()->poison_heap) + return; + bool strict_init_order = flags()->strict_init_order; + CHECK(dynamic_init_globals); + CHECK(module_name); + CHECK(asan_inited); BlockingMutexLock lock(&mu_for_globals); - bool from_current_tu = false; - // The list looks like: - // a => ... => b => last_addr => ... => first_addr => c => ... - // The globals of the current TU reside between last_addr and first_addr. - for (ListOfGlobals *l = list_of_dynamic_init_globals; l; l = l->next) { - if (l->g->beg == last_addr) - from_current_tu = true; - if (!from_current_tu) - PoisonGlobalAndRedzones(l->g); - if (l->g->beg == first_addr) - from_current_tu = false; + if (flags()->report_globals >= 3) + Printf("DynInitPoison module: %s\n", module_name); + for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { + DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; + const Global *g = &dyn_g.g; + if (dyn_g.initialized) + continue; + if (g->module_name != module_name) + PoisonShadowForGlobal(g, kAsanInitializationOrderMagic); + else if (!strict_init_order) + dyn_g.initialized = true; } - CHECK(!from_current_tu); } // This method runs immediately after dynamic initialization in each TU, when // all dynamically initialized globals except for those defined in the current // TU are poisoned. It simply unpoisons all dynamically initialized globals. void __asan_after_dynamic_init() { - if (!flags()->check_initialization_order) return; + if (!flags()->check_initialization_order || + !flags()->poison_heap) + return; + CHECK(asan_inited); BlockingMutexLock lock(&mu_for_globals); - for (ListOfGlobals *l = list_of_dynamic_init_globals; l; l = l->next) - UnpoisonGlobal(l->g); + // FIXME: Optionally report that we're unpoisoning globals from a module. + for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) { + DynInitGlobal &dyn_g = (*dynamic_init_globals)[i]; + const Global *g = &dyn_g.g; + if (!dyn_g.initialized) { + // Unpoison the whole global. + PoisonShadowForGlobal(g, 0); + // Poison redzones back. + PoisonRedZones(*g); + } + } } diff --git a/lib/asan/asan_intercepted_functions.h b/lib/asan/asan_intercepted_functions.h index a1faf713c130..842781cdb17f 100644 --- a/lib/asan/asan_intercepted_functions.h +++ b/lib/asan/asan_intercepted_functions.h @@ -19,16 +19,16 @@ #include "sanitizer_common/sanitizer_platform_interceptors.h" #include <stdarg.h> +#include <stddef.h> using __sanitizer::uptr; // Use macro to describe if specific function should be // intercepted on a given platform. -#if !defined(_WIN32) +#if !SANITIZER_WINDOWS # define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1 # define ASAN_INTERCEPT__LONGJMP 1 # define ASAN_INTERCEPT_STRDUP 1 -# define ASAN_INTERCEPT_STRCASECMP_AND_STRNCASECMP 1 # define ASAN_INTERCEPT_INDEX 1 # define ASAN_INTERCEPT_PTHREAD_CREATE 1 # define ASAN_INTERCEPT_MLOCKX 1 @@ -36,225 +36,79 @@ using __sanitizer::uptr; # define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0 # define ASAN_INTERCEPT__LONGJMP 0 # define ASAN_INTERCEPT_STRDUP 0 -# define ASAN_INTERCEPT_STRCASECMP_AND_STRNCASECMP 0 # define ASAN_INTERCEPT_INDEX 0 # define ASAN_INTERCEPT_PTHREAD_CREATE 0 # define ASAN_INTERCEPT_MLOCKX 0 #endif -#if defined(__linux__) +#if SANITIZER_LINUX # define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1 #else # define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0 #endif -#if !defined(__APPLE__) +#if !SANITIZER_MAC # define ASAN_INTERCEPT_STRNLEN 1 #else # define ASAN_INTERCEPT_STRNLEN 0 #endif -#if defined(__linux__) && !defined(ANDROID) +#if SANITIZER_LINUX && !SANITIZER_ANDROID # define ASAN_INTERCEPT_SWAPCONTEXT 1 #else # define ASAN_INTERCEPT_SWAPCONTEXT 0 #endif -#if !defined(ANDROID) && !defined(_WIN32) +#if !SANITIZER_ANDROID && !SANITIZER_WINDOWS # define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1 #else # define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0 #endif -// On Darwin siglongjmp tailcalls longjmp, so we don't want to intercept it -// there. -#if !defined(_WIN32) && (!defined(__APPLE__) || MAC_INTERPOSE_FUNCTIONS) +#if !SANITIZER_WINDOWS # define ASAN_INTERCEPT_SIGLONGJMP 1 #else # define ASAN_INTERCEPT_SIGLONGJMP 0 #endif -#if ASAN_HAS_EXCEPTIONS && !defined(_WIN32) +#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS # define ASAN_INTERCEPT___CXA_THROW 1 #else # define ASAN_INTERCEPT___CXA_THROW 0 #endif -#define DECLARE_FUNCTION_AND_WRAPPER(ret_type, func, ...) \ - ret_type func(__VA_ARGS__); \ - ret_type WRAP(func)(__VA_ARGS__) - -// Use extern declarations of intercepted functions on Mac and Windows -// to avoid including system headers. -#if defined(__APPLE__) || (defined(_WIN32) && !defined(_DLL)) -extern "C" { -// signal.h -# if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION -struct sigaction; -DECLARE_FUNCTION_AND_WRAPPER(int, sigaction, int sig, - const struct sigaction *act, - struct sigaction *oldact); -DECLARE_FUNCTION_AND_WRAPPER(void*, signal, int signum, void *handler); -# endif - -// setjmp.h -DECLARE_FUNCTION_AND_WRAPPER(void, longjmp, void *env, int value); -# if ASAN_INTERCEPT__LONGJMP -DECLARE_FUNCTION_AND_WRAPPER(void, _longjmp, void *env, int value); -# endif -# if ASAN_INTERCEPT_SIGLONGJMP -DECLARE_FUNCTION_AND_WRAPPER(void, siglongjmp, void *env, int value); -# endif -# if ASAN_INTERCEPT___CXA_THROW -DECLARE_FUNCTION_AND_WRAPPER(void, __cxa_throw, void *a, void *b, void *c); -#endif - -// string.h / strings.h -DECLARE_FUNCTION_AND_WRAPPER(int, memcmp, - const void *a1, const void *a2, uptr size); -DECLARE_FUNCTION_AND_WRAPPER(void*, memmove, - void *to, const void *from, uptr size); -DECLARE_FUNCTION_AND_WRAPPER(void*, memcpy, - void *to, const void *from, uptr size); -DECLARE_FUNCTION_AND_WRAPPER(void*, memset, void *block, int c, uptr size); -DECLARE_FUNCTION_AND_WRAPPER(char*, strchr, const char *str, int c); -DECLARE_FUNCTION_AND_WRAPPER(char*, strcat, /* NOLINT */ - char *to, const char* from); -DECLARE_FUNCTION_AND_WRAPPER(char*, strncat, - char *to, const char* from, uptr size); -DECLARE_FUNCTION_AND_WRAPPER(char*, strcpy, /* NOLINT */ - char *to, const char* from); -DECLARE_FUNCTION_AND_WRAPPER(char*, strncpy, - char *to, const char* from, uptr size); -DECLARE_FUNCTION_AND_WRAPPER(int, strcmp, const char *s1, const char* s2); -DECLARE_FUNCTION_AND_WRAPPER(int, strncmp, - const char *s1, const char* s2, uptr size); -DECLARE_FUNCTION_AND_WRAPPER(uptr, strlen, const char *s); -# if ASAN_INTERCEPT_STRCASECMP_AND_STRNCASECMP -DECLARE_FUNCTION_AND_WRAPPER(int, strcasecmp, const char *s1, const char *s2); -DECLARE_FUNCTION_AND_WRAPPER(int, strncasecmp, - const char *s1, const char *s2, uptr n); -# endif -# if ASAN_INTERCEPT_STRDUP -DECLARE_FUNCTION_AND_WRAPPER(char*, strdup, const char *s); -# endif -# if ASAN_INTERCEPT_STRNLEN -DECLARE_FUNCTION_AND_WRAPPER(uptr, strnlen, const char *s, uptr maxlen); -# endif -#if ASAN_INTERCEPT_INDEX -DECLARE_FUNCTION_AND_WRAPPER(char*, index, const char *string, int c); -#endif - -// stdlib.h -DECLARE_FUNCTION_AND_WRAPPER(int, atoi, const char *nptr); -DECLARE_FUNCTION_AND_WRAPPER(long, atol, const char *nptr); // NOLINT -DECLARE_FUNCTION_AND_WRAPPER(long, strtol, const char *nptr, char **endptr, int base); // NOLINT -# if ASAN_INTERCEPT_ATOLL_AND_STRTOLL -DECLARE_FUNCTION_AND_WRAPPER(long long, atoll, const char *nptr); // NOLINT -DECLARE_FUNCTION_AND_WRAPPER(long long, strtoll, const char *nptr, char **endptr, int base); // NOLINT -# endif - -// unistd.h -# if SANITIZER_INTERCEPT_READ -DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, read, int fd, void *buf, SIZE_T count); -# endif -# if SANITIZER_INTERCEPT_PREAD -DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pread, int fd, void *buf, - SIZE_T count, OFF_T offset); -# endif -# if SANITIZER_INTERCEPT_PREAD64 -DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pread64, int fd, void *buf, - SIZE_T count, OFF64_T offset); -# endif - -#if SANITIZER_INTERCEPT_WRITE -DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, write, int fd, void *ptr, SIZE_T count); -#endif -#if SANITIZER_INTERCEPT_PWRITE -DECLARE_FUNCTION_AND_WRAPPER(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count); +#if !SANITIZER_WINDOWS +# define ASAN_INTERCEPT___CXA_ATEXIT 1 +#else +# define ASAN_INTERCEPT___CXA_ATEXIT 0 #endif -# if ASAN_INTERCEPT_MLOCKX -// mlock/munlock -DECLARE_FUNCTION_AND_WRAPPER(int, mlock, const void *addr, SIZE_T len); -DECLARE_FUNCTION_AND_WRAPPER(int, munlock, const void *addr, SIZE_T len); -DECLARE_FUNCTION_AND_WRAPPER(int, mlockall, int flags); -DECLARE_FUNCTION_AND_WRAPPER(int, munlockall, void); -# endif - +# if SANITIZER_WINDOWS +extern "C" { // Windows threads. -# if defined(_WIN32) __declspec(dllimport) void* __stdcall CreateThread(void *sec, uptr st, void* start, void *arg, DWORD fl, DWORD *id); -# endif -// Posix threads. -# if ASAN_INTERCEPT_PTHREAD_CREATE -DECLARE_FUNCTION_AND_WRAPPER(int, pthread_create, - void *thread, void *attr, - void *(*start_routine)(void*), void *arg); -# endif - -#if defined(__APPLE__) -typedef void* pthread_workqueue_t; -typedef void* pthread_workitem_handle_t; - -typedef void* dispatch_group_t; -typedef void* dispatch_queue_t; -typedef void* dispatch_source_t; -typedef u64 dispatch_time_t; -typedef void (*dispatch_function_t)(void *block); -typedef void* (*worker_t)(void *block); -typedef void* CFStringRef; -typedef void* CFAllocatorRef; - -DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_async_f, - dispatch_queue_t dq, - void *ctxt, dispatch_function_t func); -DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_sync_f, - dispatch_queue_t dq, - void *ctxt, dispatch_function_t func); -DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_after_f, - dispatch_time_t when, dispatch_queue_t dq, - void *ctxt, dispatch_function_t func); -DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_barrier_async_f, - dispatch_queue_t dq, - void *ctxt, dispatch_function_t func); -DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_group_async_f, - dispatch_group_t group, dispatch_queue_t dq, - void *ctxt, dispatch_function_t func); -DECLARE_FUNCTION_AND_WRAPPER(void, __CFInitialize, void); -DECLARE_FUNCTION_AND_WRAPPER(CFStringRef, CFStringCreateCopy, - CFAllocatorRef alloc, CFStringRef str); -DECLARE_FUNCTION_AND_WRAPPER(void, free, void* ptr); - -DECLARE_FUNCTION_AND_WRAPPER(int, vscanf, const char *format, va_list ap); -DECLARE_FUNCTION_AND_WRAPPER(int, vsscanf, const char *str, const char *format, - va_list ap); -DECLARE_FUNCTION_AND_WRAPPER(int, vfscanf, void *stream, const char *format, - va_list ap); -DECLARE_FUNCTION_AND_WRAPPER(int, scanf, const char *format, ...); -DECLARE_FUNCTION_AND_WRAPPER(int, fscanf, - void* stream, const char *format, ...); -DECLARE_FUNCTION_AND_WRAPPER(int, sscanf, // NOLINT - const char *str, const char *format, ...); - -#if MAC_INTERPOSE_FUNCTIONS && !defined(MISSING_BLOCKS_SUPPORT) -DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_group_async, - dispatch_group_t dg, - dispatch_queue_t dq, void (^work)(void)); -DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_async, - dispatch_queue_t dq, void (^work)(void)); -DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_after, - dispatch_queue_t dq, void (^work)(void)); -DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_source_set_event_handler, - dispatch_source_t ds, void (^work)(void)); -DECLARE_FUNCTION_AND_WRAPPER(void, dispatch_source_set_cancel_handler, - dispatch_source_t ds, void (^work)(void)); -#endif // MAC_INTERPOSE_FUNCTIONS -#endif // __APPLE__ -} // extern "C" -#endif +int memcmp(const void *a1, const void *a2, uptr size); +void memmove(void *to, const void *from, uptr size); +void* memset(void *block, int c, uptr size); +void* memcpy(void *to, const void *from, uptr size); +char* strcat(char *to, const char* from); // NOLINT +char* strchr(const char *str, int c); +int strcmp(const char *s1, const char* s2); +char* strcpy(char *to, const char* from); // NOLINT +uptr strlen(const char *s); +char* strncat(char *to, const char* from, uptr size); +int strncmp(const char *s1, const char* s2, uptr size); +char* strncpy(char *to, const char* from, uptr size); +uptr strnlen(const char *s, uptr maxlen); +int atoi(const char *nptr); +long atol(const char *nptr); // NOLINT +long strtol(const char *nptr, char **endptr, int base); // NOLINT +void longjmp(void *env, int value); +double frexp(double x, int *expptr); +} +# endif #endif // ASAN_INTERCEPTED_FUNCTIONS_H diff --git a/lib/asan/asan_interceptors.cc b/lib/asan/asan_interceptors.cc index 6170974d6f5e..7e7deea29634 100644 --- a/lib/asan/asan_interceptors.cc +++ b/lib/asan/asan_interceptors.cc @@ -17,27 +17,40 @@ #include "asan_intercepted_functions.h" #include "asan_internal.h" #include "asan_mapping.h" +#include "asan_poisoning.h" #include "asan_report.h" #include "asan_stack.h" #include "asan_stats.h" -#include "asan_thread_registry.h" #include "interception/interception.h" -#include "sanitizer/asan_interface.h" #include "sanitizer_common/sanitizer_libc.h" namespace __asan { +// Return true if we can quickly decide that the region is unpoisoned. +static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) { + if (size == 0) return true; + if (size <= 32) + return !AddressIsPoisoned(beg) && + !AddressIsPoisoned(beg + size - 1) && + !AddressIsPoisoned(beg + size / 2); + return false; +} + // We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE, // and ASAN_WRITE_RANGE as macro instead of function so // that no extra frames are created, and stack trace contains // relevant information only. // We check all shadow bytes. -#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \ - if (uptr __ptr = __asan_region_is_poisoned((uptr)(offset), size)) { \ - GET_CURRENT_PC_BP_SP; \ - __asan_report_error(pc, bp, sp, __ptr, isWrite, /* access_size */1); \ - } \ -} while (0) +#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \ + uptr __offset = (uptr)(offset); \ + uptr __size = (uptr)(size); \ + uptr __bad = 0; \ + if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \ + (__bad = __asan_region_is_poisoned(__offset, __size))) { \ + GET_CURRENT_PC_BP_SP; \ + __asan_report_error(pc, bp, sp, __bad, isWrite, __size); \ + } \ + } while (0) #define ASAN_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, false) #define ASAN_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, true); @@ -76,9 +89,14 @@ static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) { } void SetThreadName(const char *name) { - AsanThread *t = asanThreadRegistry().GetCurrent(); + AsanThread *t = GetCurrentThread(); if (t) - t->summary()->set_name(name); + asanThreadRegistry().SetThreadName(t->tid(), name); +} + +static void DisableStrictInitOrderChecker() { + if (flags()->strict_init_order) + flags()->check_initialization_order = false; } } // namespace __asan @@ -89,37 +107,54 @@ using namespace __asan; // NOLINT #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ ASAN_WRITE_RANGE(ptr, size) #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size) -#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ - do { \ - ctx = 0; \ - (void)ctx; \ - ENSURE_ASAN_INITED(); \ +#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ + do { \ + if (asan_init_is_running) \ + return REAL(func)(__VA_ARGS__); \ + ctx = 0; \ + (void)ctx; \ + ENSURE_ASAN_INITED(); \ } while (false) #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) do { } while (false) #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) do { } while (false) #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name) #include "sanitizer_common/sanitizer_common_interceptors.inc" +#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(p, s) +#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(p, s) +#define COMMON_SYSCALL_POST_READ_RANGE(p, s) +#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) +#include "sanitizer_common/sanitizer_common_syscalls.inc" + static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { AsanThread *t = (AsanThread*)arg; - asanThreadRegistry().SetCurrent(t); - return t->ThreadStart(); + SetCurrentThread(t); + return t->ThreadStart(GetTid()); } #if ASAN_INTERCEPT_PTHREAD_CREATE +extern "C" int pthread_attr_getdetachstate(void *attr, int *v); + INTERCEPTOR(int, pthread_create, void *thread, void *attr, void *(*start_routine)(void*), void *arg) { + // Strict init-order checking in thread-hostile. + DisableStrictInitOrderChecker(); GET_STACK_TRACE_THREAD; - u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); - AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack); - asanThreadRegistry().RegisterThread(t); + int detached = 0; + if (attr != 0) + pthread_attr_getdetachstate(attr, &detached); + + u32 current_tid = GetCurrentTidOrInvalid(); + AsanThread *t = AsanThread::Create(start_routine, arg); + CreateThreadContextArgs args = { t, &stack }; + asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args); return REAL(pthread_create)(thread, attr, asan_thread_start, t); } #endif // ASAN_INTERCEPT_PTHREAD_CREATE #if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION INTERCEPTOR(void*, signal, int signum, void *handler) { - if (!AsanInterceptsSignal(signum)) { + if (!AsanInterceptsSignal(signum) || flags()->allow_user_segv_handler) { return REAL(signal)(signum, handler); } return 0; @@ -127,12 +162,12 @@ INTERCEPTOR(void*, signal, int signum, void *handler) { INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act, struct sigaction *oldact) { - if (!AsanInterceptsSignal(signum)) { + if (!AsanInterceptsSignal(signum) || flags()->allow_user_segv_handler) { return REAL(sigaction)(signum, act, oldact); } return 0; } -#elif ASAN_POSIX +#elif SANITIZER_POSIX // We need to have defined REAL(sigaction) on posix systems. DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act, struct sigaction *oldact); @@ -237,27 +272,32 @@ static inline int CharCmp(unsigned char c1, unsigned char c2) { return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1; } -static inline int CharCaseCmp(unsigned char c1, unsigned char c2) { - int c1_low = ToLower(c1); - int c2_low = ToLower(c2); - return c1_low - c2_low; -} - INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) { if (!asan_inited) return internal_memcmp(a1, a2, size); ENSURE_ASAN_INITED(); - unsigned char c1 = 0, c2 = 0; - const unsigned char *s1 = (const unsigned char*)a1; - const unsigned char *s2 = (const unsigned char*)a2; - uptr i; - for (i = 0; i < size; i++) { - c1 = s1[i]; - c2 = s2[i]; - if (c1 != c2) break; + if (flags()->replace_intrin) { + if (flags()->strict_memcmp) { + // Check the entire regions even if the first bytes of the buffers are + // different. + ASAN_READ_RANGE(a1, size); + ASAN_READ_RANGE(a2, size); + // Fallthrough to REAL(memcmp) below. + } else { + unsigned char c1 = 0, c2 = 0; + const unsigned char *s1 = (const unsigned char*)a1; + const unsigned char *s2 = (const unsigned char*)a2; + uptr i; + for (i = 0; i < size; i++) { + c1 = s1[i]; + c2 = s2[i]; + if (c1 != c2) break; + } + ASAN_READ_RANGE(s1, Min(i + 1, size)); + ASAN_READ_RANGE(s2, Min(i + 1, size)); + return CharCmp(c1, c2); + } } - ASAN_READ_RANGE(s1, Min(i + 1, size)); - ASAN_READ_RANGE(s2, Min(i + 1, size)); - return CharCmp(c1, c2); + return REAL(memcmp(a1, a2, size)); } INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) { @@ -277,13 +317,9 @@ INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) { ASAN_READ_RANGE(from, size); ASAN_WRITE_RANGE(to, size); } -#if MAC_INTERPOSE_FUNCTIONS // Interposing of resolver functions is broken on Mac OS 10.7 and 10.8. // See also http://code.google.com/p/address-sanitizer/issues/detail?id=116. return internal_memcpy(to, from, size); -#else - return REAL(memcpy)(to, from, size); -#endif } INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) { @@ -296,13 +332,9 @@ INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) { ASAN_READ_RANGE(from, size); ASAN_WRITE_RANGE(to, size); } -#if MAC_INTERPOSE_FUNCTIONS // Interposing of resolver functions is broken on Mac OS 10.7 and 10.8. // See also http://code.google.com/p/address-sanitizer/issues/detail?id=116. return internal_memmove(to, from, size); -#else - return REAL(memmove)(to, from, size); -#endif } INTERCEPTOR(void*, memset, void *block, int c, uptr size) { @@ -339,7 +371,12 @@ INTERCEPTOR(char*, strchr, const char *str, int c) { INTERCEPTOR(char*, index, const char *string, int c) ALIAS(WRAPPER_NAME(strchr)); # else -DEFINE_REAL(char*, index, const char *string, int c) +# if SANITIZER_MAC +DECLARE_REAL(char*, index, const char *string, int c) +OVERRIDE_FUNCTION(index, strchr); +# else +DEFINE_REAL(char*, index, const char *string, int c); +# endif # endif #endif // ASAN_INTERCEPT_INDEX @@ -400,7 +437,7 @@ INTERCEPTOR(int, strcmp, const char *s1, const char *s2) { } INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT -#if MAC_INTERPOSE_FUNCTIONS +#if SANITIZER_MAC if (!asan_inited) return REAL(strcpy)(to, from); // NOLINT #endif // strcpy is called from malloc_default_purgeable_zone() @@ -420,7 +457,7 @@ INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT #if ASAN_INTERCEPT_STRDUP INTERCEPTOR(char*, strdup, const char *s) { -#if MAC_INTERPOSE_FUNCTIONS +#if SANITIZER_MAC // FIXME: because internal_strdup() uses InternalAlloc(), which currently // just calls malloc() on Mac, we can't use internal_strdup() with the // dynamic runtime. We can remove the call to REAL(strdup) once InternalAlloc @@ -453,36 +490,6 @@ INTERCEPTOR(uptr, strlen, const char *s) { return length; } -#if ASAN_INTERCEPT_STRCASECMP_AND_STRNCASECMP -INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) { - ENSURE_ASAN_INITED(); - unsigned char c1, c2; - uptr i; - for (i = 0; ; i++) { - c1 = (unsigned char)s1[i]; - c2 = (unsigned char)s2[i]; - if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break; - } - ASAN_READ_RANGE(s1, i + 1); - ASAN_READ_RANGE(s2, i + 1); - return CharCaseCmp(c1, c2); -} - -INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, uptr n) { - ENSURE_ASAN_INITED(); - unsigned char c1 = 0, c2 = 0; - uptr i; - for (i = 0; i < n; i++) { - c1 = (unsigned char)s1[i]; - c2 = (unsigned char)s2[i]; - if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break; - } - ASAN_READ_RANGE(s1, Min(i + 1, n)); - ASAN_READ_RANGE(s2, Min(i + 1, n)); - return CharCaseCmp(c1, c2); -} -#endif // ASAN_INTERCEPT_STRCASECMP_AND_STRNCASECMP - INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) { if (!asan_inited) return internal_strncmp(s1, s2, size); // strncmp is called from malloc_default_purgeable_zone() @@ -530,7 +537,7 @@ static inline bool IsValidStrtolBase(int base) { } static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) { - CHECK(endptr != 0); + CHECK(endptr); if (nptr == *endptr) { // No digits were found at strtol call, we need to find out the last // symbol accessed by strtoll on our own. @@ -561,7 +568,7 @@ INTERCEPTOR(long, strtol, const char *nptr, // NOLINT } INTERCEPTOR(int, atoi, const char *nptr) { -#if MAC_INTERPOSE_FUNCTIONS +#if SANITIZER_MAC if (!asan_inited) return REAL(atoi)(nptr); #endif ENSURE_ASAN_INITED(); @@ -580,7 +587,7 @@ INTERCEPTOR(int, atoi, const char *nptr) { } INTERCEPTOR(long, atol, const char *nptr) { // NOLINT -#if MAC_INTERPOSE_FUNCTIONS +#if SANITIZER_MAC if (!asan_inited) return REAL(atol)(nptr); #endif ENSURE_ASAN_INITED(); @@ -629,20 +636,39 @@ INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT } #endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL +static void AtCxaAtexit(void *unused) { + (void)unused; + StopInitOrderChecking(); +} + +#if ASAN_INTERCEPT___CXA_ATEXIT +INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg, + void *dso_handle) { + ENSURE_ASAN_INITED(); + int res = REAL(__cxa_atexit)(func, arg, dso_handle); + REAL(__cxa_atexit)(AtCxaAtexit, 0, 0); + return res; +} +#endif // ASAN_INTERCEPT___CXA_ATEXIT + #define ASAN_INTERCEPT_FUNC(name) do { \ if (!INTERCEPT_FUNCTION(name) && flags()->verbosity > 0) \ Report("AddressSanitizer: failed to intercept '" #name "'\n"); \ } while (0) -#if defined(_WIN32) +#if SANITIZER_WINDOWS INTERCEPTOR_WINAPI(DWORD, CreateThread, void* security, uptr stack_size, DWORD (__stdcall *start_routine)(void*), void* arg, DWORD flags, void* tid) { + // Strict init-order checking in thread-hostile. + DisableStrictInitOrderChecker(); GET_STACK_TRACE_THREAD; - u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); - AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack); - asanThreadRegistry().RegisterThread(t); + u32 current_tid = GetCurrentTidOrInvalid(); + AsanThread *t = AsanThread::Create(start_routine, arg); + CreateThreadContextArgs args = { t, &stack }; + int detached = 0; // FIXME: how can we determine it on Windows? + asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args); return REAL(CreateThread)(security, stack_size, asan_thread_start, t, flags, tid); } @@ -661,10 +687,9 @@ void InitializeAsanInterceptors() { static bool was_called_once; CHECK(was_called_once == false); was_called_once = true; -#if MAC_INTERPOSE_FUNCTIONS +#if SANITIZER_MAC return; -#endif - +#else SANITIZER_COMMON_INTERCEPTORS_INIT; // Intercept mem* functions. @@ -673,12 +698,6 @@ void InitializeAsanInterceptors() { ASAN_INTERCEPT_FUNC(memset); if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { ASAN_INTERCEPT_FUNC(memcpy); - } else { -#if !MAC_INTERPOSE_FUNCTIONS - // If we're using dynamic interceptors on Mac, these two are just plain - // functions. - internal_memcpy(&REAL(memcpy), &REAL(memmove), sizeof(REAL(memmove))); -#endif } // Intercept str* functions. @@ -690,22 +709,14 @@ void InitializeAsanInterceptors() { ASAN_INTERCEPT_FUNC(strncat); ASAN_INTERCEPT_FUNC(strncmp); ASAN_INTERCEPT_FUNC(strncpy); -#if ASAN_INTERCEPT_STRCASECMP_AND_STRNCASECMP - ASAN_INTERCEPT_FUNC(strcasecmp); - ASAN_INTERCEPT_FUNC(strncasecmp); -#endif #if ASAN_INTERCEPT_STRDUP ASAN_INTERCEPT_FUNC(strdup); #endif #if ASAN_INTERCEPT_STRNLEN ASAN_INTERCEPT_FUNC(strnlen); #endif -#if ASAN_INTERCEPT_INDEX -# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX +#if ASAN_INTERCEPT_INDEX && ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX ASAN_INTERCEPT_FUNC(index); -# else - CHECK(OVERRIDE_FUNCTION(index, WRAP(strchr))); -# endif #endif ASAN_INTERCEPT_FUNC(atoi); @@ -750,19 +761,20 @@ void InitializeAsanInterceptors() { ASAN_INTERCEPT_FUNC(pthread_create); #endif - // Some Windows-specific interceptors. -#if defined(_WIN32) - InitializeWindowsInterceptors(); + // Intercept atexit function. +#if ASAN_INTERCEPT___CXA_ATEXIT + ASAN_INTERCEPT_FUNC(__cxa_atexit); #endif - // Some Mac-specific interceptors. -#if defined(__APPLE__) - InitializeMacInterceptors(); + // Some Windows-specific interceptors. +#if SANITIZER_WINDOWS + InitializeWindowsInterceptors(); #endif if (flags()->verbosity > 0) { Report("AddressSanitizer: libc interceptors initialized\n"); } +#endif // SANITIZER_MAC } } // namespace __asan diff --git a/lib/asan/asan_interceptors.h b/lib/asan/asan_interceptors.h index 3b3e90ef93ff..91830aa145a9 100644 --- a/lib/asan/asan_interceptors.h +++ b/lib/asan/asan_interceptors.h @@ -32,9 +32,6 @@ DECLARE_REAL(int, sigaction, int signum, const struct sigaction *act, namespace __asan { void InitializeAsanInterceptors(); -#if defined(__APPLE__) -void InitializeMacInterceptors(); -#endif // __APPLE__ } // namespace __asan diff --git a/lib/asan/asan_interface_internal.h b/lib/asan/asan_interface_internal.h new file mode 100644 index 000000000000..24f76253bccd --- /dev/null +++ b/lib/asan/asan_interface_internal.h @@ -0,0 +1,141 @@ +//===-- asan_interface_internal.h -------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// This header can be included by the instrumented program to fetch +// data (mostly allocator statistics) from ASan runtime library. +//===----------------------------------------------------------------------===// +#ifndef ASAN_INTERFACE_INTERNAL_H +#define ASAN_INTERFACE_INTERNAL_H + +#include "sanitizer_common/sanitizer_internal_defs.h" + +using __sanitizer::uptr; + +extern "C" { + // This function should be called at the very beginning of the process, + // before any instrumented code is executed and before any call to malloc. + // Everytime the asan ABI changes we also change the version number in this + // name. Objects build with incompatible asan ABI version + // will not link with run-time. + // Changes between ABI versions: + // v1=>v2: added 'module_name' to __asan_global + // v2=>v3: stack frame description (created by the compiler) + // contains the function PC as the 3-rd field (see + // DescribeAddressIfStack). + void __asan_init_v3() SANITIZER_INTERFACE_ATTRIBUTE; + #define __asan_init __asan_init_v3 + + // This structure describes an instrumented global variable. + struct __asan_global { + uptr beg; // The address of the global. + uptr size; // The original size of the global. + uptr size_with_redzone; // The size with the redzone. + const char *name; // Name as a C string. + const char *module_name; // Module name as a C string. This pointer is a + // unique identifier of a module. + uptr has_dynamic_init; // Non-zero if the global has dynamic initializer. + }; + + // These two functions should be called by the instrumented code. + // 'globals' is an array of structures describing 'n' globals. + void __asan_register_globals(__asan_global *globals, uptr n) + SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_unregister_globals(__asan_global *globals, uptr n) + SANITIZER_INTERFACE_ATTRIBUTE; + + // These two functions should be called before and after dynamic initializers + // of a single module run, respectively. + void __asan_before_dynamic_init(const char *module_name) + SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_after_dynamic_init() + SANITIZER_INTERFACE_ATTRIBUTE; + + // These two functions are used by the instrumented code in the + // use-after-return mode. __asan_stack_malloc allocates size bytes of + // fake stack and __asan_stack_free poisons it. real_stack is a pointer to + // the real stack region. + uptr __asan_stack_malloc(uptr size, uptr real_stack) + SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) + SANITIZER_INTERFACE_ATTRIBUTE; + + // These two functions are used by instrumented code in the + // use-after-scope mode. They mark memory for local variables as + // unaddressable when they leave scope and addressable before the + // function exits. + void __asan_poison_stack_memory(uptr addr, uptr size) + SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_unpoison_stack_memory(uptr addr, uptr size) + SANITIZER_INTERFACE_ATTRIBUTE; + + // Performs cleanup before a NoReturn function. Must be called before things + // like _exit and execl to avoid false positives on stack. + void __asan_handle_no_return() SANITIZER_INTERFACE_ATTRIBUTE; + + void __asan_poison_memory_region(void const volatile *addr, uptr size) + SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_unpoison_memory_region(void const volatile *addr, uptr size) + SANITIZER_INTERFACE_ATTRIBUTE; + + bool __asan_address_is_poisoned(void const volatile *addr) + SANITIZER_INTERFACE_ATTRIBUTE; + + uptr __asan_region_is_poisoned(uptr beg, uptr size) + SANITIZER_INTERFACE_ATTRIBUTE; + + void __asan_describe_address(uptr addr) + SANITIZER_INTERFACE_ATTRIBUTE; + + void __asan_report_error(uptr pc, uptr bp, uptr sp, + uptr addr, bool is_write, uptr access_size) + SANITIZER_INTERFACE_ATTRIBUTE; + + int __asan_set_error_exit_code(int exit_code) + SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_set_death_callback(void (*callback)(void)) + SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_set_error_report_callback(void (*callback)(const char*)) + SANITIZER_INTERFACE_ATTRIBUTE; + + /* OPTIONAL */ void __asan_on_error() + SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; + + /* OPTIONAL */ bool __asan_symbolize(const void *pc, char *out_buffer, + int out_size) + SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; + + uptr __asan_get_estimated_allocated_size(uptr size) + SANITIZER_INTERFACE_ATTRIBUTE; + bool __asan_get_ownership(const void *p) + SANITIZER_INTERFACE_ATTRIBUTE; + uptr __asan_get_allocated_size(const void *p) + SANITIZER_INTERFACE_ATTRIBUTE; + uptr __asan_get_current_allocated_bytes() + SANITIZER_INTERFACE_ATTRIBUTE; + uptr __asan_get_heap_size() + SANITIZER_INTERFACE_ATTRIBUTE; + uptr __asan_get_free_bytes() + SANITIZER_INTERFACE_ATTRIBUTE; + uptr __asan_get_unmapped_bytes() + SANITIZER_INTERFACE_ATTRIBUTE; + void __asan_print_accumulated_stats() + SANITIZER_INTERFACE_ATTRIBUTE; + + /* OPTIONAL */ const char* __asan_default_options() + SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; + + /* OPTIONAL */ void __asan_malloc_hook(void *ptr, uptr size) + SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; + /* OPTIONAL */ void __asan_free_hook(void *ptr) + SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; +} // extern "C" + +#endif // ASAN_INTERFACE_INTERNAL_H diff --git a/lib/asan/asan_internal.h b/lib/asan/asan_internal.h index 5d3bffa814da..7a4d74472bcd 100644 --- a/lib/asan/asan_internal.h +++ b/lib/asan/asan_internal.h @@ -15,45 +15,15 @@ #define ASAN_INTERNAL_H #include "asan_flags.h" +#include "asan_interface_internal.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_libc.h" -#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32) -# error "This operating system is not supported by AddressSanitizer" -#endif - #define ASAN_DEFAULT_FAILURE_EXITCODE 1 -#if defined(__linux__) -# define ASAN_LINUX 1 -#else -# define ASAN_LINUX 0 -#endif - -#if defined(__APPLE__) -# define ASAN_MAC 1 -#else -# define ASAN_MAC 0 -#endif - -#if defined(_WIN32) -# define ASAN_WINDOWS 1 -#else -# define ASAN_WINDOWS 0 -#endif - -#if defined(__ANDROID__) || defined(ANDROID) -# define ASAN_ANDROID 1 -#else -# define ASAN_ANDROID 0 -#endif - - -#define ASAN_POSIX (ASAN_LINUX || ASAN_MAC) - -#if __has_feature(address_sanitizer) +#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) # error "The AddressSanitizer run-time should not be" " instrumented by AddressSanitizer" #endif @@ -62,7 +32,7 @@ // If set, asan will install its own SEGV signal handler. #ifndef ASAN_NEEDS_SEGV -# if ASAN_ANDROID == 1 +# if SANITIZER_ANDROID == 1 # define ASAN_NEEDS_SEGV 0 # else # define ASAN_NEEDS_SEGV 1 @@ -90,6 +60,10 @@ # endif #endif +#ifndef ASAN_USE_PREINIT_ARRAY +# define ASAN_USE_PREINIT_ARRAY (SANITIZER_LINUX && !SANITIZER_ANDROID) +#endif + // All internal functions in asan reside inside the __asan namespace // to avoid namespace collisions with the user programs. // Seperate namespace also makes it simpler to distinguish the asan run-time @@ -118,6 +92,7 @@ void UnsetAlternateSignalStack(); void InstallSignalHandlers(); void ReadContextStack(void *context, uptr *stack, uptr *ssize); void AsanPlatformThreadInit(); +void StopInitOrderChecking(); // Wrapper for TLS/TSD. void AsanTSDInit(void (*destructor)(void *tsd)); @@ -126,24 +101,14 @@ void AsanTSDSet(void *tsd); void AppendToErrorMessageBuffer(const char *buffer); -// asan_poisoning.cc -// Poisons the shadow memory for "size" bytes starting from "addr". -void PoisonShadow(uptr addr, uptr size, u8 value); -// Poisons the shadow memory for "redzone_size" bytes starting from -// "addr + size". -void PoisonShadowPartialRightRedzone(uptr addr, - uptr size, - uptr redzone_size, - u8 value); - // Platfrom-specific options. -#ifdef __APPLE__ +#if SANITIZER_MAC bool PlatformHasDifferentMemcpyAndMemmove(); # define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \ (PlatformHasDifferentMemcpyAndMemmove()) #else # define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true -#endif // __APPLE__ +#endif // SANITIZER_MAC // Add convenient macro for interface functions that may be represented as // weak hooks. diff --git a/lib/asan/asan_linux.cc b/lib/asan/asan_linux.cc index 845493de0956..17bb4ca5f01c 100644 --- a/lib/asan/asan_linux.cc +++ b/lib/asan/asan_linux.cc @@ -11,12 +11,13 @@ // // Linux-specific details. //===----------------------------------------------------------------------===// -#ifdef __linux__ + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_LINUX #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_thread.h" -#include "asan_thread_registry.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_procmaps.h" @@ -31,7 +32,7 @@ #include <unistd.h> #include <unwind.h> -#if !ASAN_ANDROID +#if !SANITIZER_ANDROID // FIXME: where to get ucontext on Android? #include <sys/ucontext.h> #endif @@ -50,7 +51,7 @@ void *AsanDoesNotSupportStaticLinkage() { } void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { -#if ASAN_ANDROID +#if SANITIZER_ANDROID *pc = *sp = *bp = 0; #elif defined(__arm__) ucontext_t *ucontext = (ucontext_t*)context; @@ -101,25 +102,7 @@ void AsanPlatformThreadInit() { // Nothing here for now. } -void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) { -#if defined(__arm__) || \ - defined(__powerpc__) || defined(__powerpc64__) || \ - defined(__sparc__) - fast = false; -#endif - if (!fast) - return stack->SlowUnwindStack(pc, max_s); - stack->size = 0; - stack->trace[0] = pc; - if (max_s > 1) { - stack->max_size = max_s; - if (!asan_inited) return; - if (AsanThread *t = asanThreadRegistry().GetCurrent()) - stack->FastUnwindStack(pc, bp, t->stack_top(), t->stack_bottom()); - } -} - -#if !ASAN_ANDROID +#if !SANITIZER_ANDROID void ReadContextStack(void *context, uptr *stack, uptr *ssize) { ucontext_t *ucp = (ucontext_t*)context; *stack = (uptr)ucp->uc_stack.ss_sp; @@ -133,4 +116,4 @@ void ReadContextStack(void *context, uptr *stack, uptr *ssize) { } // namespace __asan -#endif // __linux__ +#endif // SANITIZER_LINUX diff --git a/lib/asan/asan_mac.cc b/lib/asan/asan_mac.cc index 3ed9e06eeb6f..4313534008e7 100644 --- a/lib/asan/asan_mac.cc +++ b/lib/asan/asan_mac.cc @@ -12,7 +12,8 @@ // Mac-specific details. //===----------------------------------------------------------------------===// -#ifdef __APPLE__ +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC #include "asan_interceptors.h" #include "asan_internal.h" @@ -20,7 +21,6 @@ #include "asan_mapping.h" #include "asan_stack.h" #include "asan_thread.h" -#include "asan_thread_registry.h" #include "sanitizer_common/sanitizer_libc.h" #include <crt_externs.h> // for _NSGetArgv @@ -36,7 +36,6 @@ #include <stdlib.h> // for free() #include <unistd.h> #include <libkern/OSAtomic.h> -#include <CoreFoundation/CFString.h> namespace __asan { @@ -59,9 +58,9 @@ int GetMacosVersion() { uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]); for (uptr i = 0; i < maxlen; i++) version[i] = '\0'; // Get the version length. - CHECK(sysctl(mib, 2, 0, &len, 0, 0) != -1); - CHECK(len < maxlen); - CHECK(sysctl(mib, 2, version, &len, 0, 0) != -1); + CHECK_NE(sysctl(mib, 2, 0, &len, 0, 0), -1); + CHECK_LT(len, maxlen); + CHECK_NE(sysctl(mib, 2, version, &len, 0, 0), -1); switch (version[0]) { case '9': return MACOS_VERSION_LEOPARD; case '1': { @@ -89,16 +88,52 @@ extern "C" void __asan_init(); static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES"; +LowLevelAllocator allocator_for_env; + +// Change the value of the env var |name|, leaking the original value. +// If |name_value| is NULL, the variable is deleted from the environment, +// otherwise the corresponding "NAME=value" string is replaced with +// |name_value|. +void LeakyResetEnv(const char *name, const char *name_value) { + char ***env_ptr = _NSGetEnviron(); + CHECK(env_ptr); + char **environ = *env_ptr; + CHECK(environ); + uptr name_len = internal_strlen(name); + while (*environ != 0) { + uptr len = internal_strlen(*environ); + if (len > name_len) { + const char *p = *environ; + if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') { + // Match. + if (name_value) { + // Replace the old value with the new one. + *environ = const_cast<char*>(name_value); + } else { + // Shift the subsequent pointers back. + char **del = environ; + do { + del[0] = del[1]; + } while (*del++); + } + } + } + environ++; + } +} void MaybeReexec() { if (!flags()->allow_reexec) return; -#if MAC_INTERPOSE_FUNCTIONS - // If the program is linked with the dynamic ASan runtime library, make sure - // the library is preloaded so that the wrappers work. If it is not, set - // DYLD_INSERT_LIBRARIES and re-exec ourselves. + // Make sure the dynamic ASan runtime library is preloaded so that the + // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec + // ourselves. Dl_info info; CHECK(dladdr((void*)((uptr)__asan_init), &info)); - const char *dyld_insert_libraries = GetEnv(kDyldInsertLibraries); + char *dyld_insert_libraries = + const_cast<char*>(GetEnv(kDyldInsertLibraries)); + uptr old_env_len = dyld_insert_libraries ? + internal_strlen(dyld_insert_libraries) : 0; + uptr fname_len = internal_strlen(info.dli_fname); if (!dyld_insert_libraries || !REAL(strstr)(dyld_insert_libraries, info.dli_fname)) { // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime @@ -106,19 +141,80 @@ void MaybeReexec() { char program_name[1024]; uint32_t buf_size = sizeof(program_name); _NSGetExecutablePath(program_name, &buf_size); - // Ok to use setenv() since the wrappers don't depend on the value of - // asan_inited. - setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0); + char *new_env = const_cast<char*>(info.dli_fname); + if (dyld_insert_libraries) { + // Append the runtime dylib name to the existing value of + // DYLD_INSERT_LIBRARIES. + new_env = (char*)allocator_for_env.Allocate(old_env_len + fname_len + 2); + internal_strncpy(new_env, dyld_insert_libraries, old_env_len); + new_env[old_env_len] = ':'; + // Copy fname_len and add a trailing zero. + internal_strncpy(new_env + old_env_len + 1, info.dli_fname, + fname_len + 1); + // Ok to use setenv() since the wrappers don't depend on the value of + // asan_inited. + setenv(kDyldInsertLibraries, new_env, /*overwrite*/1); + } else { + // Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name. + setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0); + } if (flags()->verbosity >= 1) { Report("exec()-ing the program with\n"); - Report("%s=%s\n", kDyldInsertLibraries, info.dli_fname); + Report("%s=%s\n", kDyldInsertLibraries, new_env); Report("to enable ASan wrappers.\n"); Report("Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n"); } execv(program_name, *_NSGetArgv()); + } else { + // DYLD_INSERT_LIBRARIES is set and contains the runtime library. + if (old_env_len == fname_len) { + // It's just the runtime library name - fine to unset the variable. + LeakyResetEnv(kDyldInsertLibraries, NULL); + } else { + uptr env_name_len = internal_strlen(kDyldInsertLibraries); + // Allocate memory to hold the previous env var name, its value, the '=' + // sign and the '\0' char. + char *new_env = (char*)allocator_for_env.Allocate( + old_env_len + 2 + env_name_len); + CHECK(new_env); + internal_memset(new_env, '\0', old_env_len + 2 + env_name_len); + internal_strncpy(new_env, kDyldInsertLibraries, env_name_len); + new_env[env_name_len] = '='; + char *new_env_pos = new_env + env_name_len + 1; + + // Iterate over colon-separated pieces of |dyld_insert_libraries|. + char *piece_start = dyld_insert_libraries; + char *piece_end = NULL; + char *old_env_end = dyld_insert_libraries + old_env_len; + do { + if (piece_start[0] == ':') piece_start++; + piece_end = REAL(strchr)(piece_start, ':'); + if (!piece_end) piece_end = dyld_insert_libraries + old_env_len; + if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break; + uptr piece_len = piece_end - piece_start; + + // If the current piece isn't the runtime library name, + // append it to new_env. + if ((piece_len != fname_len) || + (internal_strncmp(piece_start, info.dli_fname, fname_len) != 0)) { + if (new_env_pos != new_env + env_name_len + 1) { + new_env_pos[0] = ':'; + new_env_pos++; + } + internal_strncpy(new_env_pos, piece_start, piece_len); + } + // Move on to the next piece. + new_env_pos += piece_len; + piece_start = piece_end; + } while (piece_start < old_env_end); + + // Can't use setenv() here, because it requires the allocator to be + // initialized. + // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in + // a separate function called after InitializeAllocator(). + LeakyResetEnv(kDyldInsertLibraries, new_env); + } } -#endif // MAC_INTERPOSE_FUNCTIONS - // If we're not using the dynamic runtime, do nothing. } // No-op. Mac does not support static linkage anyway. @@ -131,83 +227,12 @@ bool AsanInterceptsSignal(int signum) { } void AsanPlatformThreadInit() { - // For the first program thread, we can't replace the allocator before - // __CFInitialize() has been called. If it hasn't, we'll call - // MaybeReplaceCFAllocator() later on this thread. - // For other threads __CFInitialize() has been called before their creation. - // See also asan_malloc_mac.cc. - if (((CFRuntimeBase*)kCFAllocatorSystemDefault)->_cfisa) { - MaybeReplaceCFAllocator(); - } -} - -void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) { - (void)fast; - stack->size = 0; - stack->trace[0] = pc; - if ((max_s) > 1) { - stack->max_size = max_s; - if (!asan_inited) return; - if (AsanThread *t = asanThreadRegistry().GetCurrent()) - stack->FastUnwindStack(pc, bp, t->stack_top(), t->stack_bottom()); - } } void ReadContextStack(void *context, uptr *stack, uptr *ssize) { UNIMPLEMENTED(); } -// The range of pages to be used for escape islands. -// TODO(glider): instead of mapping a fixed range we must find a range of -// unmapped pages in vmmap and take them. -// These constants were chosen empirically and may not work if the shadow -// memory layout changes. Unfortunately they do necessarily depend on -// kHighMemBeg or kHighMemEnd. -static void *island_allocator_pos = 0; - -#if SANITIZER_WORDSIZE == 32 -# define kIslandEnd (0xffdf0000 - GetPageSizeCached()) -# define kIslandBeg (kIslandEnd - 256 * GetPageSizeCached()) -#else -# define kIslandEnd (0x7fffffdf0000 - GetPageSizeCached()) -# define kIslandBeg (kIslandEnd - 256 * GetPageSizeCached()) -#endif - -extern "C" -mach_error_t __interception_allocate_island(void **ptr, - uptr unused_size, - void *unused_hint) { - if (!island_allocator_pos) { - island_allocator_pos = - internal_mmap((void*)kIslandBeg, kIslandEnd - kIslandBeg, - PROT_READ | PROT_WRITE | PROT_EXEC, - MAP_PRIVATE | MAP_ANON | MAP_FIXED, - -1, 0); - if (island_allocator_pos != (void*)kIslandBeg) { - return KERN_NO_SPACE; - } - if (flags()->verbosity) { - Report("Mapped pages %p--%p for branch islands.\n", - (void*)kIslandBeg, (void*)kIslandEnd); - } - // Should not be very performance-critical. - internal_memset(island_allocator_pos, 0xCC, kIslandEnd - kIslandBeg); - }; - *ptr = island_allocator_pos; - island_allocator_pos = (char*)island_allocator_pos + GetPageSizeCached(); - if (flags()->verbosity) { - Report("Branch island allocated at %p\n", *ptr); - } - return err_none; -} - -extern "C" -mach_error_t __interception_deallocate_island(void *ptr) { - // Do nothing. - // TODO(glider): allow to free and reuse the island memory. - return err_none; -} - // Support for the following functions from libdispatch on Mac OS: // dispatch_async_f() // dispatch_async() @@ -237,9 +262,6 @@ mach_error_t __interception_deallocate_island(void *ptr) { // The implementation details are at // http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c -typedef void* pthread_workqueue_t; -typedef void* pthread_workitem_handle_t; - typedef void* dispatch_group_t; typedef void* dispatch_queue_t; typedef void* dispatch_source_t; @@ -254,32 +276,16 @@ typedef struct { u32 parent_tid; } asan_block_context_t; -// We use extern declarations of libdispatch functions here instead -// of including <dispatch/dispatch.h>. This header is not present on -// Mac OS X Leopard and eariler, and although we don't expect ASan to -// work on legacy systems, it's bad to break the build of -// LLVM compiler-rt there. -extern "C" { -void dispatch_async_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func); -void dispatch_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func); -void dispatch_after_f(dispatch_time_t when, dispatch_queue_t dq, void *ctxt, - dispatch_function_t func); -void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func); -void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t dq, - void *ctxt, dispatch_function_t func); -} // extern "C" - -static ALWAYS_INLINE +ALWAYS_INLINE void asan_register_worker_thread(int parent_tid, StackTrace *stack) { - AsanThread *t = asanThreadRegistry().GetCurrent(); + AsanThread *t = GetCurrentThread(); if (!t) { - t = AsanThread::Create(parent_tid, 0, 0, stack); - asanThreadRegistry().RegisterThread(t); + t = AsanThread::Create(0, 0); + CreateThreadContextArgs args = { t, stack }; + asanThreadRegistry().CreateThread(*(uptr*)t, true, parent_tid, &args); t->Init(); - asanThreadRegistry().SetCurrent(t); + asanThreadRegistry().StartThread(t->tid(), 0, 0); + SetCurrentThread(t); } } @@ -313,7 +319,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func, (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack); asan_ctxt->block = ctxt; asan_ctxt->func = func; - asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); + asan_ctxt->parent_tid = GetCurrentTidOrInvalid(); return asan_ctxt; } @@ -364,14 +370,7 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, asan_dispatch_call_block_and_release); } -#if MAC_INTERPOSE_FUNCTIONS && !defined(MISSING_BLOCKS_SUPPORT) -// dispatch_async, dispatch_group_async and others tailcall the corresponding -// dispatch_*_f functions. When wrapping functions with mach_override, those -// dispatch_*_f are intercepted automatically. But with dylib interposition -// this does not work, because the calls within the same library are not -// interposed. -// Therefore we need to re-implement dispatch_async and friends. - +#if !defined(MISSING_BLOCKS_SUPPORT) extern "C" { // FIXME: consolidate these declarations with asan_intercepted_functions.h. void dispatch_async(dispatch_queue_t dq, void(^work)(void)); @@ -386,7 +385,7 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void)); #define GET_ASAN_BLOCK(work) \ void (^asan_block)(void); \ - int parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); \ + int parent_tid = GetCurrentTidOrInvalid(); \ asan_block = ^(void) { \ GET_STACK_TRACE_THREAD; \ asan_register_worker_thread(parent_tid, &stack); \ @@ -424,53 +423,4 @@ INTERCEPTOR(void, dispatch_source_set_event_handler, } #endif -// See http://opensource.apple.com/source/CF/CF-635.15/CFString.c -int __CFStrIsConstant(CFStringRef str) { - CFRuntimeBase *base = (CFRuntimeBase*)str; -#if __LP64__ - return base->_rc == 0; -#else - return (base->_cfinfo[CF_RC_BITS]) == 0; -#endif -} - -INTERCEPTOR(CFStringRef, CFStringCreateCopy, CFAllocatorRef alloc, - CFStringRef str) { - if (__CFStrIsConstant(str)) { - return str; - } else { - return REAL(CFStringCreateCopy)(alloc, str); - } -} - -DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr) - -DECLARE_REAL_AND_INTERCEPTOR(void, __CFInitialize, void) - -namespace __asan { - -void InitializeMacInterceptors() { - CHECK(INTERCEPT_FUNCTION(dispatch_async_f)); - CHECK(INTERCEPT_FUNCTION(dispatch_sync_f)); - CHECK(INTERCEPT_FUNCTION(dispatch_after_f)); - CHECK(INTERCEPT_FUNCTION(dispatch_barrier_async_f)); - CHECK(INTERCEPT_FUNCTION(dispatch_group_async_f)); - // Normally CFStringCreateCopy should not copy constant CF strings. - // Replacing the default CFAllocator causes constant strings to be copied - // rather than just returned, which leads to bugs in big applications like - // Chromium and WebKit, see - // http://code.google.com/p/address-sanitizer/issues/detail?id=10 - // Until this problem is fixed we need to check that the string is - // non-constant before calling CFStringCreateCopy. - CHECK(INTERCEPT_FUNCTION(CFStringCreateCopy)); - // Some of the library functions call free() directly, so we have to - // intercept it. - CHECK(INTERCEPT_FUNCTION(free)); - if (flags()->replace_cfallocator) { - CHECK(INTERCEPT_FUNCTION(__CFInitialize)); - } -} - -} // namespace __asan - -#endif // __APPLE__ +#endif // SANITIZER_MAC diff --git a/lib/asan/asan_mac.h b/lib/asan/asan_mac.h index be913865c440..b1a1966dbc6e 100644 --- a/lib/asan/asan_mac.h +++ b/lib/asan/asan_mac.h @@ -12,7 +12,7 @@ // Mac-specific ASan definitions. //===----------------------------------------------------------------------===// #ifndef ASAN_MAC_H -#define ASAN_MAC_H +#define ASAN__MAC_H // CF_RC_BITS, the layout of CFRuntimeBase and __CFStrIsConstant are internal // and subject to change in further CoreFoundation versions. Apple does not diff --git a/lib/asan/asan_malloc_linux.cc b/lib/asan/asan_malloc_linux.cc index b95cfe3149b7..20e636b9b3c0 100644 --- a/lib/asan/asan_malloc_linux.cc +++ b/lib/asan/asan_malloc_linux.cc @@ -13,16 +13,16 @@ // We simply define functions like malloc, free, realloc, etc. // They will replace the corresponding libc functions automagically. //===----------------------------------------------------------------------===// -#ifdef __linux__ + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_LINUX #include "asan_allocator.h" #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_stack.h" -#include "asan_thread_registry.h" -#include "sanitizer/asan_interface.h" -#if ASAN_ANDROID +#if SANITIZER_ANDROID DECLARE_REAL_AND_INTERCEPTOR(void*, malloc, uptr size) DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr) DECLARE_REAL_AND_INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) @@ -147,4 +147,4 @@ INTERCEPTOR(void, malloc_stats, void) { __asan_print_accumulated_stats(); } -#endif // __linux__ +#endif // SANITIZER_LINUX diff --git a/lib/asan/asan_malloc_mac.cc b/lib/asan/asan_malloc_mac.cc index 545ede2debe7..4f353cb99ca7 100644 --- a/lib/asan/asan_malloc_mac.cc +++ b/lib/asan/asan_malloc_mac.cc @@ -12,7 +12,8 @@ // Mac-specific malloc interception. //===----------------------------------------------------------------------===// -#ifdef __APPLE__ +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC #include <AvailabilityMacros.h> #include <CoreFoundation/CFBase.h> @@ -26,7 +27,6 @@ #include "asan_report.h" #include "asan_stack.h" #include "asan_stats.h" -#include "asan_thread_registry.h" // Similar code is used in Google Perftools, // http://code.google.com/p/google-perftools. @@ -36,85 +36,108 @@ using namespace __asan; // NOLINT // TODO(glider): do we need both zones? static malloc_zone_t *system_malloc_zone = 0; -static malloc_zone_t *system_purgeable_zone = 0; static malloc_zone_t asan_zone; -CFAllocatorRef cf_asan = 0; - -// _CFRuntimeCreateInstance() checks whether the supplied allocator is -// kCFAllocatorSystemDefault and, if it is not, stores the allocator reference -// at the beginning of the allocated memory and returns the pointer to the -// allocated memory plus sizeof(CFAllocatorRef). See -// http://www.opensource.apple.com/source/CF/CF-635.21/CFRuntime.c -// Pointers returned by _CFRuntimeCreateInstance() can then be passed directly -// to free() or CFAllocatorDeallocate(), which leads to false invalid free -// reports. -// The corresponding rdar bug is http://openradar.appspot.com/radar?id=1796404. -void* ALWAYS_INLINE get_saved_cfallocator_ref(void *ptr) { - if (flags()->replace_cfallocator) { - // Make sure we're not hitting the previous page. This may be incorrect - // if ASan's malloc returns an address ending with 0xFF8, which will be - // then padded to a page boundary with a CFAllocatorRef. - uptr arith_ptr = (uptr)ptr; - if ((arith_ptr & 0xFFF) > sizeof(CFAllocatorRef)) { - CFAllocatorRef *saved = - (CFAllocatorRef*)(arith_ptr - sizeof(CFAllocatorRef)); - if ((*saved == cf_asan) && asan_mz_size(saved)) ptr = (void*)saved; - } + +INTERCEPTOR(malloc_zone_t *, malloc_create_zone, + vm_size_t start_size, unsigned zone_flags) { + if (!asan_inited) __asan_init(); + GET_STACK_TRACE_MALLOC; + malloc_zone_t *new_zone = + (malloc_zone_t*)asan_malloc(sizeof(asan_zone), &stack); + internal_memcpy(new_zone, &asan_zone, sizeof(asan_zone)); + new_zone->zone_name = NULL; // The name will be changed anyway. + return new_zone; +} + +INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) { + if (!asan_inited) __asan_init(); + return &asan_zone; +} + +INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) { + // FIXME: ASan should support purgeable allocations. + // https://code.google.com/p/address-sanitizer/issues/detail?id=139 + if (!asan_inited) __asan_init(); + return &asan_zone; +} + +INTERCEPTOR(void, malloc_make_purgeable, void *ptr) { + // FIXME: ASan should support purgeable allocations. Ignoring them is fine + // for now. + if (!asan_inited) __asan_init(); +} + +INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) { + // FIXME: ASan should support purgeable allocations. Ignoring them is fine + // for now. + if (!asan_inited) __asan_init(); + // Must return 0 if the contents were not purged since the last call to + // malloc_make_purgeable(). + return 0; +} + +INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) { + if (!asan_inited) __asan_init(); + // Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes. + size_t buflen = 6 + (name ? internal_strlen(name) : 0); + InternalScopedBuffer<char> new_name(buflen); + if (name && zone->introspect == asan_zone.introspect) { + internal_snprintf(new_name.data(), buflen, "asan-%s", name); + name = new_name.data(); } - return ptr; + + // Call the system malloc's implementation for both external and our zones, + // since that appropriately changes VM region protections on the zone. + REAL(malloc_set_zone_name)(zone, name); +} + +INTERCEPTOR(void *, malloc, size_t size) { + if (!asan_inited) __asan_init(); + GET_STACK_TRACE_MALLOC; + void *res = asan_malloc(size, &stack); + return res; } -// The free() implementation provided by OS X calls malloc_zone_from_ptr() -// to find the owner of |ptr|. If the result is 0, an invalid free() is -// reported. Our implementation falls back to asan_free() in this case -// in order to print an ASan-style report. -// -// For the objects created by _CFRuntimeCreateInstance a CFAllocatorRef is -// placed at the beginning of the allocated chunk and the pointer returned by -// our allocator is off by sizeof(CFAllocatorRef). This pointer can be then -// passed directly to free(), which will lead to errors. -// To overcome this we're checking whether |ptr-sizeof(CFAllocatorRef)| -// contains a pointer to our CFAllocator (assuming no other allocator is used). -// See http://code.google.com/p/address-sanitizer/issues/detail?id=70 for more -// info. INTERCEPTOR(void, free, void *ptr) { - malloc_zone_t *zone = malloc_zone_from_ptr(ptr); - if (zone) { -#if defined(MAC_OS_X_VERSION_10_6) && \ - MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 - if ((zone->version >= 6) && (zone->free_definite_size)) { - zone->free_definite_size(zone, ptr, malloc_size(ptr)); - } else { - malloc_zone_free(zone, ptr); - } -#else - malloc_zone_free(zone, ptr); -#endif - } else { - if (!asan_mz_size(ptr)) ptr = get_saved_cfallocator_ref(ptr); - GET_STACK_TRACE_FREE; - asan_free(ptr, &stack, FROM_MALLOC); - } + if (!asan_inited) __asan_init(); + if (!ptr) return; + GET_STACK_TRACE_FREE; + asan_free(ptr, &stack, FROM_MALLOC); } -// We can't always replace the default CFAllocator with cf_asan right in -// ReplaceSystemMalloc(), because it is sometimes called before -// __CFInitialize(), when the default allocator is invalid and replacing it may -// crash the program. Instead we wait for the allocator to initialize and jump -// in just after __CFInitialize(). Nobody is going to allocate memory using -// CFAllocators before that, so we won't miss anything. -// -// See http://code.google.com/p/address-sanitizer/issues/detail?id=87 -// and http://opensource.apple.com/source/CF/CF-550.43/CFRuntime.c -INTERCEPTOR(void, __CFInitialize, void) { - // If the runtime is built as dynamic library, __CFInitialize wrapper may be - // called before __asan_init. -#if !MAC_INTERPOSE_FUNCTIONS - CHECK(flags()->replace_cfallocator); - CHECK(asan_inited); -#endif - REAL(__CFInitialize)(); - if (!cf_asan && asan_inited) MaybeReplaceCFAllocator(); +INTERCEPTOR(void *, realloc, void *ptr, size_t size) { + if (!asan_inited) __asan_init(); + GET_STACK_TRACE_MALLOC; + return asan_realloc(ptr, size, &stack); +} + +INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) { + if (!asan_inited) __asan_init(); + GET_STACK_TRACE_MALLOC; + return asan_calloc(nmemb, size, &stack); +} + +INTERCEPTOR(void *, valloc, size_t size) { + if (!asan_inited) __asan_init(); + GET_STACK_TRACE_MALLOC; + return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); +} + +INTERCEPTOR(size_t, malloc_good_size, size_t size) { + if (!asan_inited) __asan_init(); + return asan_zone.introspect->good_size(&asan_zone, size); +} + +INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) { + if (!asan_inited) __asan_init(); + CHECK(memptr); + GET_STACK_TRACE_MALLOC; + void *result = asan_memalign(alignment, size, &stack, FROM_MALLOC); + if (result) { + *memptr = result; + return 0; + } + return -1; } namespace { @@ -134,15 +157,6 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) { return asan_malloc(size, &stack); } -void *cf_malloc(CFIndex size, CFOptionFlags hint, void *info) { - if (!asan_inited) { - CHECK(system_malloc_zone); - return malloc_zone_malloc(system_malloc_zone, size); - } - GET_STACK_TRACE_MALLOC; - return asan_malloc(size, &stack); -} - void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) { if (!asan_inited) { // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. @@ -174,31 +188,14 @@ void *mz_valloc(malloc_zone_t *zone, size_t size) { void ALWAYS_INLINE free_common(void *context, void *ptr) { if (!ptr) return; - if (asan_mz_size(ptr)) { - GET_STACK_TRACE_FREE; + GET_STACK_TRACE_FREE; + // FIXME: need to retire this flag. + if (!flags()->mac_ignore_invalid_free) { asan_free(ptr, &stack, FROM_MALLOC); } else { - // If the pointer does not belong to any of the zones, use one of the - // fallback methods to free memory. - malloc_zone_t *zone_ptr = malloc_zone_from_ptr(ptr); - if (zone_ptr == system_purgeable_zone) { - // allocations from malloc_default_purgeable_zone() done before - // __asan_init() may be occasionally freed via free_common(). - // see http://code.google.com/p/address-sanitizer/issues/detail?id=99. - malloc_zone_free(zone_ptr, ptr); - } else { - // If the memory chunk pointer was moved to store additional - // CFAllocatorRef, fix it back. - ptr = get_saved_cfallocator_ref(ptr); - GET_STACK_TRACE_FREE; - if (!flags()->mac_ignore_invalid_free) { - asan_free(ptr, &stack, FROM_MALLOC); - } else { - GET_ZONE_FOR_PTR(ptr); - WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); - return; - } - } + GET_ZONE_FOR_PTR(ptr); + WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); + return; } } @@ -207,10 +204,6 @@ void mz_free(malloc_zone_t *zone, void *ptr) { free_common(zone, ptr); } -void cf_free(void *ptr, void *info) { - free_common(info, ptr); -} - void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { if (!ptr) { GET_STACK_TRACE_MALLOC; @@ -230,29 +223,11 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { } } -void *cf_realloc(void *ptr, CFIndex size, CFOptionFlags hint, void *info) { - if (!ptr) { - GET_STACK_TRACE_MALLOC; - return asan_malloc(size, &stack); - } else { - if (asan_mz_size(ptr)) { - GET_STACK_TRACE_MALLOC; - return asan_realloc(ptr, size, &stack); - } else { - // We can't recover from reallocating an unknown address, because - // this would require reading at most |size| bytes from - // potentially unaccessible memory. - GET_STACK_TRACE_FREE; - GET_ZONE_FOR_PTR(ptr); - ReportMacCfReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); - } - } -} - void mz_destroy(malloc_zone_t* zone) { // A no-op -- we will not be destroyed! - Printf("mz_destroy() called -- ignoring\n"); + Report("mz_destroy() called -- ignoring\n"); } + // from AvailabilityMacros.h #if defined(MAC_OS_X_VERSION_10_6) && \ MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 @@ -309,7 +284,7 @@ void mi_force_unlock(malloc_zone_t *zone) { void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { AsanMallocStats malloc_stats; - asanThreadRegistry().FillMallocStatistics(&malloc_stats); + FillMallocStatistics(&malloc_stats); CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t)); } @@ -324,23 +299,7 @@ boolean_t mi_zone_locked(malloc_zone_t *zone) { } // unnamed namespace -extern int __CFRuntimeClassTableSize; - namespace __asan { -void MaybeReplaceCFAllocator() { - static CFAllocatorContext asan_context = { - /*version*/ 0, /*info*/ &asan_zone, - /*retain*/ 0, /*release*/ 0, - /*copyDescription*/0, - /*allocate*/ &cf_malloc, - /*reallocate*/ &cf_realloc, - /*deallocate*/ &cf_free, - /*preferredSize*/ 0 }; - if (!cf_asan) - cf_asan = CFAllocatorCreate(kCFAllocatorUseContext, &asan_context); - if (flags()->replace_cfallocator && CFAllocatorGetDefault() != cf_asan) - CFAllocatorSetDefault(cf_asan); -} void ReplaceSystemMalloc() { static malloc_introspection_t asan_introspection; @@ -380,42 +339,11 @@ void ReplaceSystemMalloc() { asan_zone.free_definite_size = 0; asan_zone.memalign = &mz_memalign; asan_introspection.zone_locked = &mi_zone_locked; - - // Request the default purgable zone to force its creation. The - // current default zone is registered with the purgable zone for - // doing tiny and small allocs. Sadly, it assumes that the default - // zone is the szone implementation from OS X and will crash if it - // isn't. By creating the zone now, this will be true and changing - // the default zone won't cause a problem. (OS X 10.6 and higher.) - system_purgeable_zone = malloc_default_purgeable_zone(); #endif - // Register the ASan zone. At this point, it will not be the - // default zone. + // Register the ASan zone. malloc_zone_register(&asan_zone); - - // Unregister and reregister the default zone. Unregistering swaps - // the specified zone with the last one registered which for the - // default zone makes the more recently registered zone the default - // zone. The default zone is then re-registered to ensure that - // allocations made from it earlier will be handled correctly. - // Things are not guaranteed to work that way, but it's how they work now. - system_malloc_zone = malloc_default_zone(); - malloc_zone_unregister(system_malloc_zone); - malloc_zone_register(system_malloc_zone); - // Make sure the default allocator was replaced. - CHECK(malloc_default_zone() == &asan_zone); - - // If __CFInitialize() hasn't been called yet, cf_asan will be created and - // installed as the default allocator after __CFInitialize() finishes (see - // the interceptor for __CFInitialize() above). Otherwise install cf_asan - // right now. On both Snow Leopard and Lion __CFInitialize() calls - // __CFAllocatorInitialize(), which initializes the _base._cfisa field of - // the default allocators we check here. - if (((CFRuntimeBase*)kCFAllocatorSystemDefault)->_cfisa) { - MaybeReplaceCFAllocator(); - } } } // namespace __asan -#endif // __APPLE__ +#endif // SANITIZER_MAC diff --git a/lib/asan/asan_malloc_win.cc b/lib/asan/asan_malloc_win.cc index 9fcfea56384f..31fb777c7045 100644 --- a/lib/asan/asan_malloc_win.cc +++ b/lib/asan/asan_malloc_win.cc @@ -11,7 +11,9 @@ // // Windows-specific malloc interception. //===----------------------------------------------------------------------===// -#ifdef _WIN32 + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_WINDOWS #include "asan_allocator.h" #include "asan_interceptors.h" diff --git a/lib/asan/asan_mapping.h b/lib/asan/asan_mapping.h index 5e3067031f4a..f04629222419 100644 --- a/lib/asan/asan_mapping.h +++ b/lib/asan/asan_mapping.h @@ -18,6 +18,37 @@ // The full explanation of the memory mapping could be found here: // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm +// +// Typical shadow mapping on Linux/x86_64 with SHADOW_OFFSET == 0x00007fff8000: +// || `[0x10007fff8000, 0x7fffffffffff]` || HighMem || +// || `[0x02008fff7000, 0x10007fff7fff]` || HighShadow || +// || `[0x00008fff7000, 0x02008fff6fff]` || ShadowGap || +// || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow || +// || `[0x000000000000, 0x00007fff7fff]` || LowMem || +// +// When SHADOW_OFFSET is zero (-pie): +// || `[0x100000000000, 0x7fffffffffff]` || HighMem || +// || `[0x020000000000, 0x0fffffffffff]` || HighShadow || +// || `[0x000000040000, 0x01ffffffffff]` || ShadowGap || +// +// Special case when something is already mapped between +// 0x003000000000 and 0x005000000000 (e.g. when prelink is installed): +// || `[0x10007fff8000, 0x7fffffffffff]` || HighMem || +// || `[0x02008fff7000, 0x10007fff7fff]` || HighShadow || +// || `[0x005000000000, 0x02008fff6fff]` || ShadowGap3 || +// || `[0x003000000000, 0x004fffffffff]` || MidMem || +// || `[0x000a7fff8000, 0x002fffffffff]` || ShadowGap2 || +// || `[0x00067fff8000, 0x000a7fff7fff]` || MidShadow || +// || `[0x00008fff7000, 0x00067fff7fff]` || ShadowGap || +// || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow || +// || `[0x000000000000, 0x00007fff7fff]` || LowMem || +// +// Default Linux/i386 mapping: +// || `[0x40000000, 0xffffffff]` || HighMem || +// || `[0x28000000, 0x3fffffff]` || HighShadow || +// || `[0x24000000, 0x27ffffff]` || ShadowGap || +// || `[0x20000000, 0x23ffffff]` || LowShadow || +// || `[0x00000000, 0x1fffffff]` || LowMem || #if ASAN_FLEXIBLE_MAPPING_AND_OFFSET == 1 extern SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_scale; @@ -25,7 +56,7 @@ extern SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset; # define SHADOW_SCALE (__asan_mapping_scale) # define SHADOW_OFFSET (__asan_mapping_offset) #else -# if ASAN_ANDROID +# if SANITIZER_ANDROID # define SHADOW_SCALE (3) # define SHADOW_OFFSET (0) # else @@ -36,27 +67,20 @@ extern SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset; # if defined(__powerpc64__) # define SHADOW_OFFSET (1ULL << 41) # else -# define SHADOW_OFFSET (1ULL << 44) +# if SANITIZER_MAC +# define SHADOW_OFFSET (1ULL << 44) +# else +# define SHADOW_OFFSET 0x7fff8000ULL +# endif # endif # endif # endif #endif // ASAN_FLEXIBLE_MAPPING_AND_OFFSET #define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE) -#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) | (SHADOW_OFFSET)) +#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET)) #define SHADOW_TO_MEM(shadow) (((shadow) - SHADOW_OFFSET) << SHADOW_SCALE) -#if SANITIZER_WORDSIZE == 64 -# if defined(__powerpc64__) - static const uptr kHighMemEnd = 0x00000fffffffffffUL; -# else - static const uptr kHighMemEnd = 0x00007fffffffffffUL; -# endif -#else // SANITIZER_WORDSIZE == 32 - static const uptr kHighMemEnd = 0xffffffff; -#endif // SANITIZER_WORDSIZE - - #define kLowMemBeg 0 #define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0) @@ -68,59 +92,121 @@ extern SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_mapping_offset; #define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg) #define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd) +# define kMidShadowBeg MEM_TO_SHADOW(kMidMemBeg) +# define kMidShadowEnd MEM_TO_SHADOW(kMidMemEnd) + // With the zero shadow base we can not actually map pages starting from 0. // This constant is somewhat arbitrary. #define kZeroBaseShadowStart (1 << 18) #define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \ : kZeroBaseShadowStart) -#define kShadowGapEnd (kHighShadowBeg - 1) +#define kShadowGapEnd ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1) -#define kGlobalAndStackRedzone \ - (SHADOW_GRANULARITY < 32 ? 32 : SHADOW_GRANULARITY) +#define kShadowGap2Beg (kMidMemBeg ? kMidShadowEnd + 1 : 0) +#define kShadowGap2End (kMidMemBeg ? kMidMemBeg - 1 : 0) + +#define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0) +#define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0) + +#define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below. + +#if DO_ASAN_MAPPING_PROFILE +# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++; +#else +# define PROFILE_ASAN_MAPPING() +#endif + +// If 1, all shadow boundaries are constants. +// Don't set to 1 other than for testing. +#define ASAN_FIXED_MAPPING 0 namespace __asan { +extern uptr AsanMappingProfile[]; + +#if ASAN_FIXED_MAPPING +// Fixed mapping for 64-bit Linux. Mostly used for performance comparison +// with non-fixed mapping. As of r175253 (Feb 2013) the performance +// difference between fixed and non-fixed mapping is below the noise level. +static uptr kHighMemEnd = 0x7fffffffffffULL; +static uptr kMidMemBeg = 0x3000000000ULL; +static uptr kMidMemEnd = 0x4fffffffffULL; +#else +SANITIZER_INTERFACE_ATTRIBUTE +extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init. +#endif + static inline bool AddrIsInLowMem(uptr a) { + PROFILE_ASAN_MAPPING(); return a < kLowMemEnd; } static inline bool AddrIsInLowShadow(uptr a) { + PROFILE_ASAN_MAPPING(); return a >= kLowShadowBeg && a <= kLowShadowEnd; } static inline bool AddrIsInHighMem(uptr a) { + PROFILE_ASAN_MAPPING(); return a >= kHighMemBeg && a <= kHighMemEnd; } +static inline bool AddrIsInMidMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd; +} + static inline bool AddrIsInMem(uptr a) { - return AddrIsInLowMem(a) || AddrIsInHighMem(a); + PROFILE_ASAN_MAPPING(); + return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a); } static inline uptr MemToShadow(uptr p) { + PROFILE_ASAN_MAPPING(); CHECK(AddrIsInMem(p)); return MEM_TO_SHADOW(p); } static inline bool AddrIsInHighShadow(uptr a) { - return a >= kHighShadowBeg && a <= kHighMemEnd; + PROFILE_ASAN_MAPPING(); + return a >= kHighShadowBeg && a <= kHighMemEnd; +} + +static inline bool AddrIsInMidShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return kMidMemBeg && a >= kMidShadowBeg && a <= kMidMemEnd; } static inline bool AddrIsInShadow(uptr a) { - return AddrIsInLowShadow(a) || AddrIsInHighShadow(a); + PROFILE_ASAN_MAPPING(); + return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a); } static inline bool AddrIsInShadowGap(uptr a) { + PROFILE_ASAN_MAPPING(); + if (kMidMemBeg) { + if (a <= kShadowGapEnd) + return SHADOW_OFFSET == 0 || a >= kShadowGapBeg; + return (a >= kShadowGap2Beg && a <= kShadowGap2End) || + (a >= kShadowGap3Beg && a <= kShadowGap3End); + } + // In zero-based shadow mode we treat addresses near zero as addresses + // in shadow gap as well. + if (SHADOW_OFFSET == 0) + return a <= kShadowGapEnd; return a >= kShadowGapBeg && a <= kShadowGapEnd; } static inline bool AddrIsAlignedByGranularity(uptr a) { + PROFILE_ASAN_MAPPING(); return (a & (SHADOW_GRANULARITY - 1)) == 0; } static inline bool AddressIsPoisoned(uptr a) { + PROFILE_ASAN_MAPPING(); const uptr kAccessSize = 1; - u8 *shadow_address = (u8*)MemToShadow(a); + u8 *shadow_address = (u8*)MEM_TO_SHADOW(a); s8 shadow_value = *shadow_address; if (shadow_value) { u8 last_accessed_byte = (a & (SHADOW_GRANULARITY - 1)) @@ -130,6 +216,9 @@ static inline bool AddressIsPoisoned(uptr a) { return false; } +// Must be after all calls to PROFILE_ASAN_MAPPING(). +static const uptr kAsanMappingProfileSize = __LINE__; + } // namespace __asan #endif // ASAN_MAPPING_H diff --git a/lib/asan/asan_new_delete.cc b/lib/asan/asan_new_delete.cc index 5d1f23c542e6..d5eb6eca9321 100644 --- a/lib/asan/asan_new_delete.cc +++ b/lib/asan/asan_new_delete.cc @@ -28,7 +28,8 @@ void ReplaceOperatorsNewAndDelete() { } using namespace __asan; // NOLINT // On Android new() goes through malloc interceptors. -#if !ASAN_ANDROID +// See also https://code.google.com/p/address-sanitizer/issues/detail?id=131. +#if !SANITIZER_ANDROID // Fake std::nothrow_t to avoid including <new>. namespace std { @@ -39,6 +40,14 @@ struct nothrow_t {}; GET_STACK_TRACE_MALLOC;\ return asan_memalign(0, size, &stack, type); +// On OS X it's not enough to just provide our own 'operator new' and +// 'operator delete' implementations, because they're going to be in the +// runtime dylib, and the main executable will depend on both the runtime +// dylib and libstdc++, each of those'll have its implementation of new and +// delete. +// To make sure that C++ allocation/deallocation operators are overridden on +// OS X we need to intercept them using their mangled names. +#if !SANITIZER_MAC INTERCEPTOR_ATTRIBUTE void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); } INTERCEPTOR_ATTRIBUTE @@ -50,10 +59,26 @@ INTERCEPTOR_ATTRIBUTE void *operator new[](size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY(FROM_NEW_BR); } +#else // SANITIZER_MAC +INTERCEPTOR(void *, _Znwm, size_t size) { + OPERATOR_NEW_BODY(FROM_NEW); +} +INTERCEPTOR(void *, _Znam, size_t size) { + OPERATOR_NEW_BODY(FROM_NEW_BR); +} +INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(FROM_NEW); +} +INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(FROM_NEW_BR); +} +#endif + #define OPERATOR_DELETE_BODY(type) \ GET_STACK_TRACE_FREE;\ asan_free(ptr, &stack, type); +#if !SANITIZER_MAC INTERCEPTOR_ATTRIBUTE void operator delete(void *ptr) { OPERATOR_DELETE_BODY(FROM_NEW); } INTERCEPTOR_ATTRIBUTE @@ -65,4 +90,19 @@ INTERCEPTOR_ATTRIBUTE void operator delete[](void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY(FROM_NEW_BR); } +#else // SANITIZER_MAC +INTERCEPTOR(void, _ZdlPv, void *ptr) { + OPERATOR_DELETE_BODY(FROM_NEW); +} +INTERCEPTOR(void, _ZdaPv, void *ptr) { + OPERATOR_DELETE_BODY(FROM_NEW_BR); +} +INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) { + OPERATOR_DELETE_BODY(FROM_NEW); +} +INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) { + OPERATOR_DELETE_BODY(FROM_NEW_BR); +} +#endif + #endif diff --git a/lib/asan/asan_poisoning.cc b/lib/asan/asan_poisoning.cc index dc5749243569..772b5e64b027 100644 --- a/lib/asan/asan_poisoning.cc +++ b/lib/asan/asan_poisoning.cc @@ -12,10 +12,7 @@ // Shadow memory poisoning by ASan RTL and by user application. //===----------------------------------------------------------------------===// -#include "asan_interceptors.h" -#include "asan_internal.h" -#include "asan_mapping.h" -#include "sanitizer/asan_interface.h" +#include "asan_poisoning.h" #include "sanitizer_common/sanitizer_libc.h" namespace __asan { @@ -23,11 +20,11 @@ namespace __asan { void PoisonShadow(uptr addr, uptr size, u8 value) { if (!flags()->poison_heap) return; CHECK(AddrIsAlignedByGranularity(addr)); + CHECK(AddrIsInMem(addr)); CHECK(AddrIsAlignedByGranularity(addr + size)); - uptr shadow_beg = MemToShadow(addr); - uptr shadow_end = MemToShadow(addr + size - SHADOW_GRANULARITY) + 1; - CHECK(REAL(memset) != 0); - REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg); + CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY)); + CHECK(REAL(memset)); + FastPoisonShadow(addr, size, value); } void PoisonShadowPartialRightRedzone(uptr addr, @@ -36,20 +33,10 @@ void PoisonShadowPartialRightRedzone(uptr addr, u8 value) { if (!flags()->poison_heap) return; CHECK(AddrIsAlignedByGranularity(addr)); - u8 *shadow = (u8*)MemToShadow(addr); - for (uptr i = 0; i < redzone_size; - i += SHADOW_GRANULARITY, shadow++) { - if (i + SHADOW_GRANULARITY <= size) { - *shadow = 0; // fully addressable - } else if (i >= size) { - *shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable - } else { - *shadow = size - i; // first size-i bytes are addressable - } - } + CHECK(AddrIsInMem(addr)); + FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value); } - struct ShadowSegmentEndpoint { u8 *chunk; s8 offset; // in [0, SHADOW_GRANULARITY) @@ -182,6 +169,55 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) { return 0; } +#define CHECK_SMALL_REGION(p, size, isWrite) \ + do { \ + uptr __p = reinterpret_cast<uptr>(p); \ + uptr __size = size; \ + if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \ + __asan::AddressIsPoisoned(__p + __size - 1))) { \ + GET_CURRENT_PC_BP_SP; \ + uptr __bad = __asan_region_is_poisoned(__p, __size); \ + __asan_report_error(pc, bp, sp, __bad, isWrite, __size);\ + } \ + } while (false); \ + + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +u16 __sanitizer_unaligned_load16(const u16 *p) { + CHECK_SMALL_REGION(p, sizeof(*p), false); + return *p; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +u32 __sanitizer_unaligned_load32(const u32 *p) { + CHECK_SMALL_REGION(p, sizeof(*p), false); + return *p; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +u64 __sanitizer_unaligned_load64(const u64 *p) { + CHECK_SMALL_REGION(p, sizeof(*p), false); + return *p; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store16(u16 *p, u16 x) { + CHECK_SMALL_REGION(p, sizeof(*p), true); + *p = x; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store32(u32 *p, u32 x) { + CHECK_SMALL_REGION(p, sizeof(*p), true); + *p = x; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_unaligned_store64(u64 *p, u64 x) { + CHECK_SMALL_REGION(p, sizeof(*p), true); + *p = x; +} + // This is a simplified version of __asan_(un)poison_memory_region, which // assumes that left border of region to be poisoned is properly aligned. static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) { diff --git a/lib/asan/asan_poisoning.h b/lib/asan/asan_poisoning.h new file mode 100644 index 000000000000..86f81e5d0ae5 --- /dev/null +++ b/lib/asan/asan_poisoning.h @@ -0,0 +1,58 @@ +//===-- asan_poisoning.h ----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Shadow memory poisoning by ASan RTL and by user application. +//===----------------------------------------------------------------------===// + +#include "asan_interceptors.h" +#include "asan_internal.h" +#include "asan_mapping.h" + +namespace __asan { + +// Poisons the shadow memory for "size" bytes starting from "addr". +void PoisonShadow(uptr addr, uptr size, u8 value); + +// Poisons the shadow memory for "redzone_size" bytes starting from +// "addr + size". +void PoisonShadowPartialRightRedzone(uptr addr, + uptr size, + uptr redzone_size, + u8 value); + +// Fast versions of PoisonShadow and PoisonShadowPartialRightRedzone that +// assume that memory addresses are properly aligned. Use in +// performance-critical code with care. +ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, + u8 value) { + DCHECK(flags()->poison_heap); + uptr shadow_beg = MEM_TO_SHADOW(aligned_beg); + uptr shadow_end = MEM_TO_SHADOW( + aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1; + REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg); +} + +ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone( + uptr aligned_addr, uptr size, uptr redzone_size, u8 value) { + DCHECK(flags()->poison_heap); + u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr); + for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) { + if (i + SHADOW_GRANULARITY <= size) { + *shadow = 0; // fully addressable + } else if (i >= size) { + *shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable + } else { + *shadow = size - i; // first size-i bytes are addressable + } + } +} + +} // namespace __asan diff --git a/lib/asan/asan_posix.cc b/lib/asan/asan_posix.cc index ceaf120fc803..5126a756d1c8 100644 --- a/lib/asan/asan_posix.cc +++ b/lib/asan/asan_posix.cc @@ -11,14 +11,15 @@ // // Posix-specific details. //===----------------------------------------------------------------------===// -#if defined(__linux__) || defined(__APPLE__) + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_LINUX || SANITIZER_MAC #include "asan_internal.h" #include "asan_interceptors.h" #include "asan_mapping.h" #include "asan_report.h" #include "asan_stack.h" -#include "asan_thread_registry.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_procmaps.h" @@ -42,7 +43,7 @@ static void MaybeInstallSigaction(int signum, sigact.sa_sigaction = handler; sigact.sa_flags = SA_SIGINFO; if (flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK; - CHECK(0 == REAL(sigaction)(signum, &sigact, 0)); + CHECK_EQ(0, REAL(sigaction)(signum, &sigact, 0)); if (flags()->verbosity >= 1) { Report("Installed the sigaction for signal %d\n", signum); } @@ -59,7 +60,7 @@ static void ASAN_OnSIGSEGV(int, siginfo_t *siginfo, void *context) { void SetAlternateSignalStack() { stack_t altstack, oldstack; - CHECK(0 == sigaltstack(0, &oldstack)); + CHECK_EQ(0, sigaltstack(0, &oldstack)); // If the alternate stack is already in place, do nothing. if ((oldstack.ss_flags & SS_DISABLE) == 0) return; // TODO(glider): the mapped stack should have the MAP_STACK flag in the @@ -69,10 +70,10 @@ void SetAlternateSignalStack() { altstack.ss_sp = base; altstack.ss_flags = 0; altstack.ss_size = kAltStackSize; - CHECK(0 == sigaltstack(&altstack, 0)); + CHECK_EQ(0, sigaltstack(&altstack, 0)); if (flags()->verbosity > 0) { Report("Alternative stack for T%d set: [%p,%p)\n", - asanThreadRegistry().GetCurrentTidOrInvalid(), + GetCurrentTidOrInvalid(), altstack.ss_sp, (char*)altstack.ss_sp + altstack.ss_size); } } @@ -82,7 +83,7 @@ void UnsetAlternateSignalStack() { altstack.ss_sp = 0; altstack.ss_flags = SS_DISABLE; altstack.ss_size = 0; - CHECK(0 == sigaltstack(&altstack, &oldstack)); + CHECK_EQ(0, sigaltstack(&altstack, &oldstack)); UnmapOrDie(oldstack.ss_sp, oldstack.ss_size); } @@ -102,7 +103,7 @@ static bool tsd_key_inited = false; void AsanTSDInit(void (*destructor)(void *tsd)) { CHECK(!tsd_key_inited); tsd_key_inited = true; - CHECK(0 == pthread_key_create(&tsd_key, destructor)); + CHECK_EQ(0, pthread_key_create(&tsd_key, destructor)); } void *AsanTSDGet() { @@ -117,4 +118,4 @@ void AsanTSDSet(void *tsd) { } // namespace __asan -#endif // __linux__ || __APPLE_ +#endif // SANITIZER_LINUX || SANITIZER_MAC diff --git a/lib/asan/asan_preinit.cc b/lib/asan/asan_preinit.cc new file mode 100644 index 000000000000..586f551c23c3 --- /dev/null +++ b/lib/asan/asan_preinit.cc @@ -0,0 +1,31 @@ +//===-- asan_preinit.cc ---------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Call __asan_init at the very early stage of process startup. +// On Linux we use .preinit_array section (unless PIC macro is defined). +//===----------------------------------------------------------------------===// +#include "asan_internal.h" + +#if ASAN_USE_PREINIT_ARRAY && !defined(PIC) + // On Linux, we force __asan_init to be called before anyone else + // by placing it into .preinit_array section. + // FIXME: do we have anything like this on Mac? + // The symbol is called __local_asan_preinit, because it's not intended to be + // exported. + __attribute__((section(".preinit_array"), used)) + void (*__local_asan_preinit)(void) = __asan_init; +#elif SANITIZER_WINDOWS && defined(_DLL) + // On Windows, when using dynamic CRT (/MD), we can put a pointer + // to __asan_init into the global list of C initializers. + // See crt0dat.c in the CRT sources for the details. + #pragma section(".CRT$XIB", long, read) // NOLINT + __declspec(allocate(".CRT$XIB")) void (*__asan_preinit)() = __asan_init; +#endif diff --git a/lib/asan/asan_report.cc b/lib/asan/asan_report.cc index 35ab9cabde67..aeeebf452ca8 100644 --- a/lib/asan/asan_report.cc +++ b/lib/asan/asan_report.cc @@ -17,8 +17,8 @@ #include "asan_report.h" #include "asan_stack.h" #include "asan_thread.h" -#include "asan_thread_registry.h" #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_symbolizer.h" @@ -120,19 +120,7 @@ static void PrintShadowBytes(const char *before, u8 *bytes, Printf("\n"); } -static void PrintShadowMemoryForAddress(uptr addr) { - if (!AddrIsInMem(addr)) - return; - uptr shadow_addr = MemToShadow(addr); - const uptr n_bytes_per_row = 16; - uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1); - Printf("Shadow bytes around the buggy address:\n"); - for (int i = -5; i <= 5; i++) { - const char *prefix = (i == 0) ? "=>" : " "; - PrintShadowBytes(prefix, - (u8*)(aligned_shadow + i * n_bytes_per_row), - (u8*)shadow_addr, n_bytes_per_row); - } +static void PrintLegend() { Printf("Shadow byte legend (one shadow byte represents %d " "application bytes):\n", (int)SHADOW_GRANULARITY); PrintShadowByte(" Addressable: ", 0); @@ -141,8 +129,8 @@ static void PrintShadowMemoryForAddress(uptr addr) { PrintShadowByte("", i, " "); Printf("\n"); PrintShadowByte(" Heap left redzone: ", kAsanHeapLeftRedzoneMagic); - PrintShadowByte(" Heap righ redzone: ", kAsanHeapRightRedzoneMagic); - PrintShadowByte(" Freed Heap region: ", kAsanHeapFreeMagic); + PrintShadowByte(" Heap right redzone: ", kAsanHeapRightRedzoneMagic); + PrintShadowByte(" Freed heap region: ", kAsanHeapFreeMagic); PrintShadowByte(" Stack left redzone: ", kAsanStackLeftRedzoneMagic); PrintShadowByte(" Stack mid redzone: ", kAsanStackMidRedzoneMagic); PrintShadowByte(" Stack right redzone: ", kAsanStackRightRedzoneMagic); @@ -155,6 +143,23 @@ static void PrintShadowMemoryForAddress(uptr addr) { PrintShadowByte(" ASan internal: ", kAsanInternalHeapMagic); } +static void PrintShadowMemoryForAddress(uptr addr) { + if (!AddrIsInMem(addr)) + return; + uptr shadow_addr = MemToShadow(addr); + const uptr n_bytes_per_row = 16; + uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1); + Printf("Shadow bytes around the buggy address:\n"); + for (int i = -5; i <= 5; i++) { + const char *prefix = (i == 0) ? "=>" : " "; + PrintShadowBytes(prefix, + (u8*)(aligned_shadow + i * n_bytes_per_row), + (u8*)shadow_addr, n_bytes_per_row); + } + if (flags()->print_legend) + PrintLegend(); +} + static void PrintZoneForPointer(uptr ptr, uptr zone_ptr, const char *zone_name) { if (zone_ptr) { @@ -176,30 +181,43 @@ static bool IsASCII(unsigned char c) { return /*0x00 <= c &&*/ c <= 0x7F; } +static const char *MaybeDemangleGlobalName(const char *name) { + // We can spoil names of globals with C linkage, so use an heuristic + // approach to check if the name should be demangled. + return (name[0] == '_' && name[1] == 'Z') ? Demangle(name) : name; +} + // Check if the global is a zero-terminated ASCII string. If so, print it. static void PrintGlobalNameIfASCII(const __asan_global &g) { for (uptr p = g.beg; p < g.beg + g.size - 1; p++) { - if (!IsASCII(*(unsigned char*)p)) return; + unsigned char c = *(unsigned char*)p; + if (c == '\0' || !IsASCII(c)) return; } - if (*(char*)(g.beg + g.size - 1) != 0) return; - Printf(" '%s' is ascii string '%s'\n", g.name, (char*)g.beg); + if (*(char*)(g.beg + g.size - 1) != '\0') return; + Printf(" '%s' is ascii string '%s'\n", + MaybeDemangleGlobalName(g.name), (char*)g.beg); } -bool DescribeAddressRelativeToGlobal(uptr addr, const __asan_global &g) { - if (addr < g.beg - kGlobalAndStackRedzone) return false; +bool DescribeAddressRelativeToGlobal(uptr addr, uptr size, + const __asan_global &g) { + static const uptr kMinimalDistanceFromAnotherGlobal = 64; + if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false; if (addr >= g.beg + g.size_with_redzone) return false; Decorator d; Printf("%s", d.Location()); - Printf("%p is located ", (void*)addr); if (addr < g.beg) { - Printf("%zd bytes to the left", g.beg - addr); - } else if (addr >= g.beg + g.size) { - Printf("%zd bytes to the right", addr - (g.beg + g.size)); + Printf("%p is located %zd bytes to the left", (void*)addr, g.beg - addr); + } else if (addr + size > g.beg + g.size) { + if (addr < g.beg + g.size) + addr = g.beg + g.size; + Printf("%p is located %zd bytes to the right", (void*)addr, + addr - (g.beg + g.size)); } else { - Printf("%zd bytes inside", addr - g.beg); // Can it happen? + // Can it happen? + Printf("%p is located %zd bytes inside", (void*)addr, addr - g.beg); } - Printf(" of global variable '%s' (0x%zx) of size %zu\n", - g.name, g.beg, g.size); + Printf(" of global variable '%s' from '%s' (0x%zx) of size %zu\n", + MaybeDemangleGlobalName(g.name), g.module_name, g.beg, g.size); Printf("%s", d.EndLocation()); PrintGlobalNameIfASCII(g); return true; @@ -226,34 +244,70 @@ bool DescribeAddressIfShadow(uptr addr) { return false; } +// Return " (thread_name) " or an empty string if the name is empty. +const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[], + uptr buff_len) { + const char *name = t->name; + if (name[0] == '\0') return ""; + buff[0] = 0; + internal_strncat(buff, " (", 3); + internal_strncat(buff, name, buff_len - 4); + internal_strncat(buff, ")", 2); + return buff; +} + +const char *ThreadNameWithParenthesis(u32 tid, char buff[], + uptr buff_len) { + if (tid == kInvalidTid) return ""; + asanThreadRegistry().CheckLocked(); + AsanThreadContext *t = GetThreadContextByTidLocked(tid); + return ThreadNameWithParenthesis(t, buff, buff_len); +} + bool DescribeAddressIfStack(uptr addr, uptr access_size) { - AsanThread *t = asanThreadRegistry().FindThreadByStackAddress(addr); + AsanThread *t = FindThreadByStackAddress(addr); if (!t) return false; const sptr kBufSize = 4095; char buf[kBufSize]; uptr offset = 0; - const char *frame_descr = t->GetFrameNameByAddr(addr, &offset); + uptr frame_pc = 0; + char tname[128]; + const char *frame_descr = t->GetFrameNameByAddr(addr, &offset, &frame_pc); + +#ifdef __powerpc64__ + // On PowerPC64, the address of a function actually points to a + // three-doubleword data structure with the first field containing + // the address of the function's code. + frame_pc = *reinterpret_cast<uptr *>(frame_pc); +#endif + // This string is created by the compiler and has the following form: - // "FunctioName n alloc_1 alloc_2 ... alloc_n" + // "n alloc_1 alloc_2 ... alloc_n" // where alloc_i looks like "offset size len ObjectName ". CHECK(frame_descr); - // Report the function name and the offset. - const char *name_end = internal_strchr(frame_descr, ' '); - CHECK(name_end); - buf[0] = 0; - internal_strncat(buf, frame_descr, - Min(kBufSize, - static_cast<sptr>(name_end - frame_descr))); Decorator d; Printf("%s", d.Location()); - Printf("Address %p is located at offset %zu " - "in frame <%s> of T%d's stack:\n", - (void*)addr, offset, Demangle(buf), t->tid()); + Printf("Address %p is located in stack of thread T%d%s " + "at offset %zu in frame\n", + addr, t->tid(), + ThreadNameWithParenthesis(t->tid(), tname, sizeof(tname)), + offset); + // Now we print the frame where the alloca has happened. + // We print this frame as a stack trace with one element. + // The symbolizer may print more than one frame if inlining was involved. + // The frame numbers may be different than those in the stack trace printed + // previously. That's unfortunate, but I have no better solution, + // especially given that the alloca may be from entirely different place + // (e.g. use-after-scope, or different thread's stack). + StackTrace alloca_stack; + alloca_stack.trace[0] = frame_pc + 16; + alloca_stack.size = 1; Printf("%s", d.EndLocation()); + PrintStack(&alloca_stack); // Report the number of stack objects. char *p; - uptr n_objects = internal_simple_strtoll(name_end, &p, 10); - CHECK(n_objects > 0); + uptr n_objects = internal_simple_strtoll(frame_descr, &p, 10); + CHECK_GT(n_objects, 0); Printf(" This frame has %zu object(s):\n", n_objects); // Report all objects in this frame. for (uptr i = 0; i < n_objects; i++) { @@ -276,87 +330,73 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size) { Printf("HINT: this may be a false positive if your program uses " "some custom stack unwind mechanism or swapcontext\n" " (longjmp and C++ exceptions *are* supported)\n"); - DescribeThread(t->summary()); + DescribeThread(t->context()); return true; } static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr, uptr access_size) { - uptr offset; + sptr offset; Decorator d; Printf("%s", d.Location()); - Printf("%p is located ", (void*)addr); - if (chunk.AddrIsInside(addr, access_size, &offset)) { - Printf("%zu bytes inside of", offset); - } else if (chunk.AddrIsAtLeft(addr, access_size, &offset)) { - Printf("%zu bytes to the left of", offset); + if (chunk.AddrIsAtLeft(addr, access_size, &offset)) { + Printf("%p is located %zd bytes to the left of", (void*)addr, offset); } else if (chunk.AddrIsAtRight(addr, access_size, &offset)) { - Printf("%zu bytes to the right of", offset); + if (offset < 0) { + addr -= offset; + offset = 0; + } + Printf("%p is located %zd bytes to the right of", (void*)addr, offset); + } else if (chunk.AddrIsInside(addr, access_size, &offset)) { + Printf("%p is located %zd bytes inside of", (void*)addr, offset); } else { - Printf(" somewhere around (this is AddressSanitizer bug!)"); + Printf("%p is located somewhere around (this is AddressSanitizer bug!)", + (void*)addr); } Printf(" %zu-byte region [%p,%p)\n", chunk.UsedSize(), (void*)(chunk.Beg()), (void*)(chunk.End())); Printf("%s", d.EndLocation()); } -// Return " (thread_name) " or an empty string if the name is empty. -const char *ThreadNameWithParenthesis(AsanThreadSummary *t, char buff[], - uptr buff_len) { - const char *name = t->name(); - if (*name == 0) return ""; - buff[0] = 0; - internal_strncat(buff, " (", 3); - internal_strncat(buff, name, buff_len - 4); - internal_strncat(buff, ")", 2); - return buff; -} - -const char *ThreadNameWithParenthesis(u32 tid, char buff[], - uptr buff_len) { - if (tid == kInvalidTid) return ""; - AsanThreadSummary *t = asanThreadRegistry().FindByTid(tid); - return ThreadNameWithParenthesis(t, buff, buff_len); -} - void DescribeHeapAddress(uptr addr, uptr access_size) { AsanChunkView chunk = FindHeapChunkByAddress(addr); if (!chunk.IsValid()) return; DescribeAccessToHeapChunk(chunk, addr, access_size); CHECK(chunk.AllocTid() != kInvalidTid); - AsanThreadSummary *alloc_thread = - asanThreadRegistry().FindByTid(chunk.AllocTid()); + asanThreadRegistry().CheckLocked(); + AsanThreadContext *alloc_thread = + GetThreadContextByTidLocked(chunk.AllocTid()); StackTrace alloc_stack; chunk.GetAllocStack(&alloc_stack); - AsanThread *t = asanThreadRegistry().GetCurrent(); + AsanThread *t = GetCurrentThread(); CHECK(t); char tname[128]; Decorator d; if (chunk.FreeTid() != kInvalidTid) { - AsanThreadSummary *free_thread = - asanThreadRegistry().FindByTid(chunk.FreeTid()); + AsanThreadContext *free_thread = + GetThreadContextByTidLocked(chunk.FreeTid()); Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(), - free_thread->tid(), + free_thread->tid, ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)), d.EndAllocation()); StackTrace free_stack; chunk.GetFreeStack(&free_stack); PrintStack(&free_stack); Printf("%spreviously allocated by thread T%d%s here:%s\n", - d.Allocation(), alloc_thread->tid(), + d.Allocation(), alloc_thread->tid, ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)), d.EndAllocation()); PrintStack(&alloc_stack); - DescribeThread(t->summary()); + DescribeThread(t->context()); DescribeThread(free_thread); DescribeThread(alloc_thread); } else { Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(), - alloc_thread->tid(), + alloc_thread->tid, ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)), d.EndAllocation()); PrintStack(&alloc_stack); - DescribeThread(t->summary()); + DescribeThread(t->context()); DescribeThread(alloc_thread); } } @@ -366,7 +406,7 @@ void DescribeAddress(uptr addr, uptr access_size) { if (DescribeAddressIfShadow(addr)) return; CHECK(AddrIsInMem(addr)); - if (DescribeAddressIfGlobal(addr)) + if (DescribeAddressIfGlobal(addr, access_size)) return; if (DescribeAddressIfStack(addr, access_size)) return; @@ -376,26 +416,27 @@ void DescribeAddress(uptr addr, uptr access_size) { // ------------------- Thread description -------------------- {{{1 -void DescribeThread(AsanThreadSummary *summary) { - CHECK(summary); +void DescribeThread(AsanThreadContext *context) { + CHECK(context); + asanThreadRegistry().CheckLocked(); // No need to announce the main thread. - if (summary->tid() == 0 || summary->announced()) { + if (context->tid == 0 || context->announced) { return; } - summary->set_announced(true); + context->announced = true; char tname[128]; - Printf("Thread T%d%s", summary->tid(), - ThreadNameWithParenthesis(summary->tid(), tname, sizeof(tname))); + Printf("Thread T%d%s", context->tid, + ThreadNameWithParenthesis(context->tid, tname, sizeof(tname))); Printf(" created by T%d%s here:\n", - summary->parent_tid(), - ThreadNameWithParenthesis(summary->parent_tid(), + context->parent_tid, + ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname))); - PrintStack(summary->stack()); + PrintStack(&context->stack); // Recursively described parent thread if needed. if (flags()->print_full_thread_history) { - AsanThreadSummary *parent_summary = - asanThreadRegistry().FindByTid(summary->parent_tid()); - DescribeThread(parent_summary); + AsanThreadContext *parent_context = + GetThreadContextByTidLocked(context->parent_tid); + DescribeThread(parent_context); } } @@ -414,25 +455,31 @@ class ScopedInErrorReport { // they are defined as no-return. Report("AddressSanitizer: while reporting a bug found another one." "Ignoring.\n"); - u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); + u32 current_tid = GetCurrentTidOrInvalid(); if (current_tid != reporting_thread_tid) { // ASan found two bugs in different threads simultaneously. Sleep // long enough to make sure that the thread which started to print // an error report will finish doing it. SleepForSeconds(Max(100, flags()->sleep_before_dying + 1)); } - // If we're still not dead for some reason, use raw Exit() instead of + // If we're still not dead for some reason, use raw _exit() instead of // Die() to bypass any additional checks. - Exit(flags()->exitcode); + internal__exit(flags()->exitcode); } ASAN_ON_ERROR(); - reporting_thread_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); + // Make sure the registry and sanitizer report mutexes are locked while + // we're printing an error report. + // We can lock them only here to avoid self-deadlock in case of + // recursive reports. + asanThreadRegistry().Lock(); + CommonSanitizerReportMutex.Lock(); + reporting_thread_tid = GetCurrentTidOrInvalid(); Printf("====================================================" "=============\n"); if (reporting_thread_tid != kInvalidTid) { // We started reporting an error message. Stop using the fake stack // in case we call an instrumented function from a symbolizer. - AsanThread *curr_thread = asanThreadRegistry().GetCurrent(); + AsanThread *curr_thread = GetCurrentThread(); CHECK(curr_thread); curr_thread->fake_stack().StopUsingFakeStack(); } @@ -440,12 +487,13 @@ class ScopedInErrorReport { // Destructor is NORETURN, as functions that report errors are. NORETURN ~ScopedInErrorReport() { // Make sure the current thread is announced. - AsanThread *curr_thread = asanThreadRegistry().GetCurrent(); + AsanThread *curr_thread = GetCurrentThread(); if (curr_thread) { - DescribeThread(curr_thread->summary()); + DescribeThread(curr_thread->context()); } // Print memory stats. - __asan_print_accumulated_stats(); + if (flags()->print_stats) + __asan_print_accumulated_stats(); if (error_report_callback) { error_report_callback(error_message_buffer); } @@ -454,6 +502,22 @@ class ScopedInErrorReport { } }; +static void ReportSummary(const char *error_type, StackTrace *stack) { + if (!stack->size) return; + if (IsSymbolizerAvailable()) { + AddressInfo ai; + // Currently, we include the first stack frame into the report summary. + // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc). + uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]); + SymbolizeCode(pc, &ai, 1); + ReportErrorSummary(error_type, + StripPathPrefix(ai.file, + common_flags()->strip_path_prefix), + ai.line, ai.function); + } + // FIXME: do we need to print anything at all if there is no symbolizer? +} + void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) { ScopedInErrorReport in_report; Decorator d; @@ -461,32 +525,44 @@ void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr) { Report("ERROR: AddressSanitizer: SEGV on unknown address %p" " (pc %p sp %p bp %p T%d)\n", (void*)addr, (void*)pc, (void*)sp, (void*)bp, - asanThreadRegistry().GetCurrentTidOrInvalid()); + GetCurrentTidOrInvalid()); Printf("%s", d.EndWarning()); Printf("AddressSanitizer can not provide additional info.\n"); GET_STACK_TRACE_FATAL(pc, bp); PrintStack(&stack); + ReportSummary("SEGV", &stack); } void ReportDoubleFree(uptr addr, StackTrace *stack) { ScopedInErrorReport in_report; Decorator d; Printf("%s", d.Warning()); - Report("ERROR: AddressSanitizer: attempting double-free on %p:\n", addr); + char tname[128]; + u32 curr_tid = GetCurrentTidOrInvalid(); + Report("ERROR: AddressSanitizer: attempting double-free on %p in " + "thread T%d%s:\n", + addr, curr_tid, + ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname))); + Printf("%s", d.EndWarning()); PrintStack(stack); DescribeHeapAddress(addr, 1); + ReportSummary("double-free", stack); } void ReportFreeNotMalloced(uptr addr, StackTrace *stack) { ScopedInErrorReport in_report; Decorator d; Printf("%s", d.Warning()); + char tname[128]; + u32 curr_tid = GetCurrentTidOrInvalid(); Report("ERROR: AddressSanitizer: attempting free on address " - "which was not malloc()-ed: %p\n", addr); + "which was not malloc()-ed: %p in thread T%d%s\n", addr, + curr_tid, ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname))); Printf("%s", d.EndWarning()); PrintStack(stack); DescribeHeapAddress(addr, 1); + ReportSummary("bad-free", stack); } void ReportAllocTypeMismatch(uptr addr, StackTrace *stack, @@ -505,6 +581,7 @@ void ReportAllocTypeMismatch(uptr addr, StackTrace *stack, Printf("%s", d.EndWarning()); PrintStack(stack); DescribeHeapAddress(addr, 1); + ReportSummary("alloc-dealloc-mismatch", stack); Report("HINT: if you don't care about these warnings you may set " "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n"); } @@ -519,6 +596,7 @@ void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) { Printf("%s", d.EndWarning()); PrintStack(stack); DescribeHeapAddress(addr, 1); + ReportSummary("bad-malloc_usable_size", stack); } void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) { @@ -531,6 +609,7 @@ void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) { Printf("%s", d.EndWarning()); PrintStack(stack); DescribeHeapAddress(addr, 1); + ReportSummary("bad-__asan_get_allocated_size", stack); } void ReportStringFunctionMemoryRangesOverlap( @@ -538,14 +617,17 @@ void ReportStringFunctionMemoryRangesOverlap( const char *offset2, uptr length2, StackTrace *stack) { ScopedInErrorReport in_report; Decorator d; + char bug_type[100]; + internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function); Printf("%s", d.Warning()); - Report("ERROR: AddressSanitizer: %s-param-overlap: " + Report("ERROR: AddressSanitizer: %s: " "memory ranges [%p,%p) and [%p, %p) overlap\n", \ - function, offset1, offset1 + length1, offset2, offset2 + length2); + bug_type, offset1, offset1 + length1, offset2, offset2 + length2); Printf("%s", d.EndWarning()); PrintStack(stack); DescribeAddress((uptr)offset1, length1); DescribeAddress((uptr)offset2, length2); + ReportSummary(bug_type, stack); } // ----------------------- Mac-specific reports ----------------- {{{1 @@ -642,7 +724,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, bug_descr, (void*)addr, pc, bp, sp); Printf("%s", d.EndWarning()); - u32 curr_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); + u32 curr_tid = GetCurrentTidOrInvalid(); char tname[128]; Printf("%s%s of size %zu at %p thread T%d%s%s\n", d.Access(), @@ -655,7 +737,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, PrintStack(&stack); DescribeAddress(addr, access_size); - + ReportSummary(bug_descr, &stack); PrintShadowMemoryForAddress(addr); } diff --git a/lib/asan/asan_report.h b/lib/asan/asan_report.h index f0617f91970e..db271fc10e97 100644 --- a/lib/asan/asan_report.h +++ b/lib/asan/asan_report.h @@ -15,21 +15,21 @@ #include "asan_allocator.h" #include "asan_internal.h" #include "asan_thread.h" -#include "sanitizer/asan_interface.h" namespace __asan { // The following functions prints address description depending // on the memory type (shadow/heap/stack/global). void DescribeHeapAddress(uptr addr, uptr access_size); -bool DescribeAddressIfGlobal(uptr addr); -bool DescribeAddressRelativeToGlobal(uptr addr, const __asan_global &g); +bool DescribeAddressIfGlobal(uptr addr, uptr access_size); +bool DescribeAddressRelativeToGlobal(uptr addr, uptr access_size, + const __asan_global &g); bool DescribeAddressIfShadow(uptr addr); bool DescribeAddressIfStack(uptr addr, uptr access_size); // Determines memory type on its own. void DescribeAddress(uptr addr, uptr access_size); -void DescribeThread(AsanThreadSummary *summary); +void DescribeThread(AsanThreadContext *context); // Different kinds of error reports. void NORETURN ReportSIGSEGV(uptr pc, uptr sp, uptr bp, uptr addr); diff --git a/lib/asan/asan_rtl.cc b/lib/asan/asan_rtl.cc index 11adbee5bdea..f989c5c0d2a5 100644 --- a/lib/asan/asan_rtl.cc +++ b/lib/asan/asan_rtl.cc @@ -15,19 +15,21 @@ #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_mapping.h" +#include "asan_poisoning.h" #include "asan_report.h" #include "asan_stack.h" #include "asan_stats.h" #include "asan_thread.h" -#include "asan_thread_registry.h" -#include "sanitizer/asan_interface.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_symbolizer.h" +#include "lsan/lsan_common.h" namespace __asan { +uptr AsanMappingProfile[kAsanMappingProfileSize]; + static void AsanDie() { static atomic_uint32_t num_calls; if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) { @@ -38,13 +40,19 @@ static void AsanDie() { Report("Sleeping for %d second(s)\n", flags()->sleep_before_dying); SleepForSeconds(flags()->sleep_before_dying); } - if (flags()->unmap_shadow_on_exit) - UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); + if (flags()->unmap_shadow_on_exit) { + if (kMidMemBeg) { + UnmapOrDie((void*)kLowShadowBeg, kMidMemBeg - kLowShadowBeg); + UnmapOrDie((void*)kMidMemEnd, kHighShadowEnd - kMidMemEnd); + } else { + UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); + } + } if (death_callback) death_callback(); if (flags()->abort_on_error) Abort(); - Exit(flags()->exitcode); + internal__exit(flags()->exitcode); } static void AsanCheckFailed(const char *file, int line, const char *cond, @@ -57,97 +65,118 @@ static void AsanCheckFailed(const char *file, int line, const char *cond, } // -------------------------- Flags ------------------------- {{{1 -static const int kDeafultMallocContextSize = 30; - -static Flags asan_flags; +static const int kDefaultMallocContextSize = 30; -Flags *flags() { - return &asan_flags; -} +Flags asan_flags_dont_use_directly; // use via flags(). static const char *MaybeCallAsanDefaultOptions() { return (&__asan_default_options) ? __asan_default_options() : ""; } +static const char *MaybeUseAsanDefaultOptionsCompileDefiniton() { +#ifdef ASAN_DEFAULT_OPTIONS +// Stringize the macro value. +# define ASAN_STRINGIZE(x) #x +# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options) + return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS); +#else + return ""; +#endif +} + static void ParseFlagsFromString(Flags *f, const char *str) { + ParseCommonFlagsFromString(str); + CHECK((uptr)common_flags()->malloc_context_size <= kStackTraceMax); + ParseFlag(str, &f->quarantine_size, "quarantine_size"); - ParseFlag(str, &f->symbolize, "symbolize"); ParseFlag(str, &f->verbosity, "verbosity"); ParseFlag(str, &f->redzone, "redzone"); - CHECK(f->redzone >= 16); + CHECK_GE(f->redzone, 16); CHECK(IsPowerOfTwo(f->redzone)); ParseFlag(str, &f->debug, "debug"); ParseFlag(str, &f->report_globals, "report_globals"); - ParseFlag(str, &f->check_initialization_order, "initialization_order"); - ParseFlag(str, &f->malloc_context_size, "malloc_context_size"); - CHECK((uptr)f->malloc_context_size <= kStackTraceMax); + ParseFlag(str, &f->check_initialization_order, "check_initialization_order"); ParseFlag(str, &f->replace_str, "replace_str"); ParseFlag(str, &f->replace_intrin, "replace_intrin"); - ParseFlag(str, &f->replace_cfallocator, "replace_cfallocator"); ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free"); ParseFlag(str, &f->use_fake_stack, "use_fake_stack"); ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size"); + ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte"); ParseFlag(str, &f->exitcode, "exitcode"); ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning"); ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying"); ParseFlag(str, &f->handle_segv, "handle_segv"); + ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler"); ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack"); ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size"); ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit"); ParseFlag(str, &f->abort_on_error, "abort_on_error"); + ParseFlag(str, &f->print_stats, "print_stats"); + ParseFlag(str, &f->print_legend, "print_legend"); ParseFlag(str, &f->atexit, "atexit"); ParseFlag(str, &f->disable_core, "disable_core"); - ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix"); ParseFlag(str, &f->allow_reexec, "allow_reexec"); ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history"); ParseFlag(str, &f->log_path, "log_path"); - ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal"); - ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc"); ParseFlag(str, &f->poison_heap, "poison_heap"); ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch"); ParseFlag(str, &f->use_stack_depot, "use_stack_depot"); + ParseFlag(str, &f->strict_memcmp, "strict_memcmp"); + ParseFlag(str, &f->strict_init_order, "strict_init_order"); + ParseFlag(str, &f->detect_leaks, "detect_leaks"); } void InitializeFlags(Flags *f, const char *env) { - internal_memset(f, 0, sizeof(*f)); + CommonFlags *cf = common_flags(); + cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH"); + cf->symbolize = true; + cf->malloc_context_size = kDefaultMallocContextSize; + cf->fast_unwind_on_fatal = false; + cf->fast_unwind_on_malloc = true; + cf->strip_path_prefix = ""; + internal_memset(f, 0, sizeof(*f)); f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28; - f->symbolize = false; f->verbosity = 0; - f->redzone = ASAN_ALLOCATOR_VERSION == 2 ? 16 : (ASAN_LOW_MEMORY) ? 64 : 128; + f->redzone = 16; f->debug = false; f->report_globals = 1; - f->check_initialization_order = true; - f->malloc_context_size = kDeafultMallocContextSize; + f->check_initialization_order = false; f->replace_str = true; f->replace_intrin = true; - f->replace_cfallocator = true; f->mac_ignore_invalid_free = false; f->use_fake_stack = true; - f->max_malloc_fill_size = 0; + f->max_malloc_fill_size = 0x1000; // By default, fill only the first 4K. + f->malloc_fill_byte = 0xbe; f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE; f->allow_user_poisoning = true; f->sleep_before_dying = 0; f->handle_segv = ASAN_NEEDS_SEGV; + f->allow_user_segv_handler = false; f->use_sigaltstack = false; f->check_malloc_usable_size = true; f->unmap_shadow_on_exit = false; f->abort_on_error = false; + f->print_stats = false; + f->print_legend = true; f->atexit = false; f->disable_core = (SANITIZER_WORDSIZE == 64); - f->strip_path_prefix = ""; f->allow_reexec = true; f->print_full_thread_history = true; f->log_path = 0; - f->fast_unwind_on_fatal = false; - f->fast_unwind_on_malloc = true; f->poison_heap = true; // Turn off alloc/dealloc mismatch checker on Mac for now. // TODO(glider): Fix known issues and enable this back. - f->alloc_dealloc_mismatch = (ASAN_MAC == 0); - f->use_stack_depot = true; // Only affects allocator2. + f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0);; + f->use_stack_depot = true; + f->strict_memcmp = true; + f->strict_init_order = false; + f->detect_leaks = false; + + // Override from compile definition. + ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefiniton()); // Override from user-specified string. ParseFlagsFromString(f, MaybeCallAsanDefaultOptions()); @@ -158,6 +187,20 @@ void InitializeFlags(Flags *f, const char *env) { // Override from command line. ParseFlagsFromString(f, env); + +#if !CAN_SANITIZE_LEAKS + if (f->detect_leaks) { + Report("%s: detect_leaks is not supported on this platform.\n", + SanitizerToolName); + f->detect_leaks = false; + } +#endif + + if (f->detect_leaks && !f->use_stack_depot) { + Report("%s: detect_leaks is ignored (requires use_stack_depot).\n", + SanitizerToolName); + f->detect_leaks = false; + } } // -------------------------- Globals --------------------- {{{1 @@ -165,6 +208,10 @@ int asan_inited; bool asan_init_is_running; void (*death_callback)(void); +#if !ASAN_FIXED_MAPPING +uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; +#endif + // -------------------------- Misc ---------------- {{{1 void ShowStatsAndAbort() { __asan_print_accumulated_stats(); @@ -174,8 +221,8 @@ void ShowStatsAndAbort() { // ---------------------- mmap -------------------- {{{1 // Reserve memory range [beg, end]. static void ReserveShadowMemoryRange(uptr beg, uptr end) { - CHECK((beg % GetPageSizeCached()) == 0); - CHECK(((end + 1) % GetPageSizeCached()) == 0); + CHECK_EQ((beg % GetPageSizeCached()), 0); + CHECK_EQ(((end + 1) % GetPageSizeCached()), 0); uptr size = end - beg + 1; void *res = MmapFixedNoReserve(beg, size); if (res != (void*)beg) { @@ -211,6 +258,17 @@ ASAN_REPORT_ERROR(store, true, 4) ASAN_REPORT_ERROR(store, true, 8) ASAN_REPORT_ERROR(store, true, 16) +#define ASAN_REPORT_ERROR_N(type, is_write) \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_ ## type ## _n(uptr addr, uptr size); \ +void __asan_report_ ## type ## _n(uptr addr, uptr size) { \ + GET_CALLER_PC_BP_SP; \ + __asan_report_error(pc, bp, sp, addr, is_write, size); \ +} + +ASAN_REPORT_ERROR_N(load, false) +ASAN_REPORT_ERROR_N(store, true) + // Force the linker to keep the symbols for various ASan interface functions. // We want to keep those in the executable in order to let the instrumented // dynamic libraries access the symbol even if it is not used by the executable @@ -249,7 +307,7 @@ static NOINLINE void force_interface_symbols() { case 27: __asan_set_error_exit_code(0); break; case 28: __asan_stack_free(0, 0, 0); break; case 29: __asan_stack_malloc(0, 0); break; - case 30: __asan_before_dynamic_init(0, 0); break; + case 30: __asan_before_dynamic_init(0); break; case 31: __asan_after_dynamic_init(); break; case 32: __asan_poison_stack_memory(0, 0); break; case 33: __asan_unpoison_stack_memory(0, 0); break; @@ -261,6 +319,83 @@ static NOINLINE void force_interface_symbols() { static void asan_atexit() { Printf("AddressSanitizer exit stats:\n"); __asan_print_accumulated_stats(); + // Print AsanMappingProfile. + for (uptr i = 0; i < kAsanMappingProfileSize; i++) { + if (AsanMappingProfile[i] == 0) continue; + Printf("asan_mapping.h:%zd -- %zd\n", i, AsanMappingProfile[i]); + } +} + +static void InitializeHighMemEnd() { +#if !ASAN_FIXED_MAPPING +#if SANITIZER_WORDSIZE == 64 +# if defined(__powerpc64__) + // FIXME: + // On PowerPC64 we have two different address space layouts: 44- and 46-bit. + // We somehow need to figure our which one we are using now and choose + // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL. + // Note that with 'ulimit -s unlimited' the stack is moved away from the top + // of the address space, so simply checking the stack address is not enough. + kHighMemEnd = (1ULL << 44) - 1; // 0x00000fffffffffffUL +# else + kHighMemEnd = (1ULL << 47) - 1; // 0x00007fffffffffffUL; +# endif +#else // SANITIZER_WORDSIZE == 32 + kHighMemEnd = (1ULL << 32) - 1; // 0xffffffff; +#endif // SANITIZER_WORDSIZE +#endif // !ASAN_FIXED_MAPPING +} + +static void ProtectGap(uptr a, uptr size) { + CHECK_EQ(a, (uptr)Mprotect(a, size)); +} + +static void PrintAddressSpaceLayout() { + Printf("|| `[%p, %p]` || HighMem ||\n", + (void*)kHighMemBeg, (void*)kHighMemEnd); + Printf("|| `[%p, %p]` || HighShadow ||\n", + (void*)kHighShadowBeg, (void*)kHighShadowEnd); + if (kMidMemBeg) { + Printf("|| `[%p, %p]` || ShadowGap3 ||\n", + (void*)kShadowGap3Beg, (void*)kShadowGap3End); + Printf("|| `[%p, %p]` || MidMem ||\n", + (void*)kMidMemBeg, (void*)kMidMemEnd); + Printf("|| `[%p, %p]` || ShadowGap2 ||\n", + (void*)kShadowGap2Beg, (void*)kShadowGap2End); + Printf("|| `[%p, %p]` || MidShadow ||\n", + (void*)kMidShadowBeg, (void*)kMidShadowEnd); + } + Printf("|| `[%p, %p]` || ShadowGap ||\n", + (void*)kShadowGapBeg, (void*)kShadowGapEnd); + if (kLowShadowBeg) { + Printf("|| `[%p, %p]` || LowShadow ||\n", + (void*)kLowShadowBeg, (void*)kLowShadowEnd); + Printf("|| `[%p, %p]` || LowMem ||\n", + (void*)kLowMemBeg, (void*)kLowMemEnd); + } + Printf("MemToShadow(shadow): %p %p %p %p", + (void*)MEM_TO_SHADOW(kLowShadowBeg), + (void*)MEM_TO_SHADOW(kLowShadowEnd), + (void*)MEM_TO_SHADOW(kHighShadowBeg), + (void*)MEM_TO_SHADOW(kHighShadowEnd)); + if (kMidMemBeg) { + Printf(" %p %p", + (void*)MEM_TO_SHADOW(kMidShadowBeg), + (void*)MEM_TO_SHADOW(kMidShadowEnd)); + } + Printf("\n"); + Printf("red_zone=%zu\n", (uptr)flags()->redzone); + Printf("malloc_context_size=%zu\n", + (uptr)common_flags()->malloc_context_size); + + Printf("SHADOW_SCALE: %zx\n", (uptr)SHADOW_SCALE); + Printf("SHADOW_GRANULARITY: %zx\n", (uptr)SHADOW_GRANULARITY); + Printf("SHADOW_OFFSET: %zx\n", (uptr)SHADOW_OFFSET); + CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7); + if (kMidMemBeg) + CHECK(kMidShadowBeg > kLowShadowEnd && + kMidMemBeg > kMidShadowEnd && + kHighShadowBeg > kMidMemEnd); } } // namespace __asan @@ -283,11 +418,25 @@ int NOINLINE __asan_set_error_exit_code(int exit_code) { void NOINLINE __asan_handle_no_return() { int local_stack; - AsanThread *curr_thread = asanThreadRegistry().GetCurrent(); + AsanThread *curr_thread = GetCurrentThread(); CHECK(curr_thread); uptr PageSize = GetPageSizeCached(); uptr top = curr_thread->stack_top(); uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1); + static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M + if (top - bottom > kMaxExpectedCleanupSize) { + static bool reported_warning = false; + if (reported_warning) + return; + reported_warning = true; + Report("WARNING: ASan is ignoring requested __asan_handle_no_return: " + "stack top: %p; bottom %p; size: %p (%zd)\n" + "False positive error reports may follow\n" + "For details see " + "http://code.google.com/p/address-sanitizer/issues/detail?id=189\n", + top, bottom, top - bottom, top - bottom); + return; + } PoisonShadow(bottom, top - bottom, 0); } @@ -297,8 +446,10 @@ void NOINLINE __asan_set_death_callback(void (*callback)(void)) { void __asan_init() { if (asan_inited) return; + SanitizerToolName = "AddressSanitizer"; CHECK(!asan_init_is_running && "ASan init calls itself!"); asan_init_is_running = true; + InitializeHighMemEnd(); // Make sure we are not statically linked. AsanDoesNotSupportStaticLinkage(); @@ -334,49 +485,48 @@ void __asan_init() { ReplaceSystemMalloc(); ReplaceOperatorsNewAndDelete(); - if (flags()->verbosity) { - Printf("|| `[%p, %p]` || HighMem ||\n", - (void*)kHighMemBeg, (void*)kHighMemEnd); - Printf("|| `[%p, %p]` || HighShadow ||\n", - (void*)kHighShadowBeg, (void*)kHighShadowEnd); - Printf("|| `[%p, %p]` || ShadowGap ||\n", - (void*)kShadowGapBeg, (void*)kShadowGapEnd); - Printf("|| `[%p, %p]` || LowShadow ||\n", - (void*)kLowShadowBeg, (void*)kLowShadowEnd); - Printf("|| `[%p, %p]` || LowMem ||\n", - (void*)kLowMemBeg, (void*)kLowMemEnd); - Printf("MemToShadow(shadow): %p %p %p %p\n", - (void*)MEM_TO_SHADOW(kLowShadowBeg), - (void*)MEM_TO_SHADOW(kLowShadowEnd), - (void*)MEM_TO_SHADOW(kHighShadowBeg), - (void*)MEM_TO_SHADOW(kHighShadowEnd)); - Printf("red_zone=%zu\n", (uptr)flags()->redzone); - Printf("malloc_context_size=%zu\n", (uptr)flags()->malloc_context_size); - - Printf("SHADOW_SCALE: %zx\n", (uptr)SHADOW_SCALE); - Printf("SHADOW_GRANULARITY: %zx\n", (uptr)SHADOW_GRANULARITY); - Printf("SHADOW_OFFSET: %zx\n", (uptr)SHADOW_OFFSET); - CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7); + uptr shadow_start = kLowShadowBeg; + if (kLowShadowBeg) shadow_start -= GetMmapGranularity(); + uptr shadow_end = kHighShadowEnd; + bool full_shadow_is_available = + MemoryRangeIsAvailable(shadow_start, shadow_end); + +#if SANITIZER_LINUX && defined(__x86_64__) && !ASAN_FIXED_MAPPING + if (!full_shadow_is_available) { + kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0; + kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0; } +#endif + + if (flags()->verbosity) + PrintAddressSpaceLayout(); if (flags()->disable_core) { DisableCoreDumper(); } - uptr shadow_start = kLowShadowBeg; - if (kLowShadowBeg > 0) shadow_start -= GetMmapGranularity(); - uptr shadow_end = kHighShadowEnd; - if (MemoryRangeIsAvailable(shadow_start, shadow_end)) { - if (kLowShadowBeg != kLowShadowEnd) { - // mmap the low shadow plus at least one page. - ReserveShadowMemoryRange(kLowShadowBeg - GetMmapGranularity(), - kLowShadowEnd); - } + if (full_shadow_is_available) { + // mmap the low shadow plus at least one page at the left. + if (kLowShadowBeg) + ReserveShadowMemoryRange(shadow_start, kLowShadowEnd); + // mmap the high shadow. + ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd); + // protect the gap. + ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); + } else if (kMidMemBeg && + MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) && + MemoryRangeIsAvailable(kMidMemEnd + 1, shadow_end)) { + CHECK(kLowShadowBeg != kLowShadowEnd); + // mmap the low shadow plus at least one page at the left. + ReserveShadowMemoryRange(shadow_start, kLowShadowEnd); + // mmap the mid shadow. + ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd); // mmap the high shadow. ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd); - // protect the gap - void *prot = Mprotect(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); - CHECK(prot == (void*)kShadowGapBeg); + // protect the gaps. + ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); + ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1); + ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1); } else { Report("Shadow memory range interleaves with an existing memory mapping. " "ASan cannot proceed correctly. ABORTING.\n"); @@ -386,11 +536,10 @@ void __asan_init() { InstallSignalHandlers(); // Start symbolizer process if necessary. - if (flags()->symbolize) { - const char *external_symbolizer = GetEnv("ASAN_SYMBOLIZER_PATH"); - if (external_symbolizer) { - InitializeExternalSymbolizer(external_symbolizer); - } + const char* external_symbolizer = common_flags()->external_symbolizer_path; + if (common_flags()->symbolize && external_symbolizer && + external_symbolizer[0]) { + InitializeExternalSymbolizer(external_symbolizer); } // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited @@ -398,25 +547,27 @@ void __asan_init() { asan_inited = 1; asan_init_is_running = false; - asanThreadRegistry().Init(); - asanThreadRegistry().GetMain()->ThreadStart(); + // Create main thread. + AsanTSDInit(AsanThread::TSDDtor); + AsanThread *main_thread = AsanThread::Create(0, 0); + CreateThreadContextArgs create_main_args = { main_thread, 0 }; + u32 main_tid = asanThreadRegistry().CreateThread( + 0, true, 0, &create_main_args); + CHECK_EQ(0, main_tid); + SetCurrentThread(main_thread); + main_thread->ThreadStart(internal_getpid()); force_interface_symbols(); // no-op. + InitializeAllocator(); + +#if CAN_SANITIZE_LEAKS + __lsan::InitCommonLsan(); + if (flags()->detect_leaks) { + Atexit(__lsan::DoLeakCheck); + } +#endif // CAN_SANITIZE_LEAKS + if (flags()->verbosity) { Report("AddressSanitizer Init done\n"); } } - -#if defined(ASAN_USE_PREINIT_ARRAY) - // On Linux, we force __asan_init to be called before anyone else - // by placing it into .preinit_array section. - // FIXME: do we have anything like this on Mac? - __attribute__((section(".preinit_array"))) - typeof(__asan_init) *__asan_preinit =__asan_init; -#elif defined(_WIN32) && defined(_DLL) - // On Windows, when using dynamic CRT (/MD), we can put a pointer - // to __asan_init into the global list of C initializers. - // See crt0dat.c in the CRT sources for the details. - #pragma section(".CRT$XIB", long, read) // NOLINT - __declspec(allocate(".CRT$XIB")) void (*__asan_preinit)() = __asan_init; -#endif diff --git a/lib/asan/asan_stack.cc b/lib/asan/asan_stack.cc index ebf22fd34ca1..21dae7df096a 100644 --- a/lib/asan/asan_stack.cc +++ b/lib/asan/asan_stack.cc @@ -11,9 +11,10 @@ // // Code for ASan stack trace. //===----------------------------------------------------------------------===// +#include "asan_internal.h" #include "asan_flags.h" #include "asan_stack.h" -#include "sanitizer/asan_interface.h" +#include "sanitizer_common/sanitizer_flags.h" namespace __asan { @@ -24,8 +25,8 @@ static bool MaybeCallAsanSymbolize(const void *pc, char *out_buffer, } void PrintStack(StackTrace *stack) { - stack->PrintStack(stack->trace, stack->size, flags()->symbolize, - flags()->strip_path_prefix, MaybeCallAsanSymbolize); + stack->PrintStack(stack->trace, stack->size, common_flags()->symbolize, + common_flags()->strip_path_prefix, MaybeCallAsanSymbolize); } } // namespace __asan @@ -35,7 +36,7 @@ void PrintStack(StackTrace *stack) { // Provide default implementation of __asan_symbolize that does nothing // and may be overriden by user if he wants to use his own symbolization. // ASan on Windows has its own implementation of this. -#if !defined(_WIN32) && !SANITIZER_SUPPORTS_WEAK_HOOKS +#if !SANITIZER_WINDOWS && !SANITIZER_SUPPORTS_WEAK_HOOKS SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE NOINLINE bool __asan_symbolize(const void *pc, char *out_buffer, int out_size) { return false; diff --git a/lib/asan/asan_stack.h b/lib/asan/asan_stack.h index 46c9f3408725..176aa183c93b 100644 --- a/lib/asan/asan_stack.h +++ b/lib/asan/asan_stack.h @@ -14,12 +14,13 @@ #ifndef ASAN_STACK_H #define ASAN_STACK_H -#include "sanitizer_common/sanitizer_stacktrace.h" #include "asan_flags.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_stacktrace.h" namespace __asan { -void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast); void PrintStack(StackTrace *stack); } // namespace __asan @@ -27,10 +28,24 @@ void PrintStack(StackTrace *stack); // Get the stack trace with the given pc and bp. // The pc will be in the position 0 of the resulting stack trace. // The bp may refer to the current frame or to the caller's frame. -// fast_unwind is currently unused. +#if SANITIZER_WINDOWS #define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \ StackTrace stack; \ - GetStackTrace(&stack, max_s, pc, bp, fast) + GetStackTrace(&stack, max_s, pc, bp, 0, 0, fast) +#else +#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \ + StackTrace stack; \ + { \ + uptr stack_top = 0, stack_bottom = 0; \ + AsanThread *t; \ + if (asan_inited && (t = GetCurrentThread())) { \ + stack_top = t->stack_top(); \ + stack_bottom = t->stack_bottom(); \ + } \ + GetStackTrace(&stack, max_s, pc, bp, \ + stack_top, stack_bottom, fast); \ + } +#endif // SANITIZER_WINDOWS // NOTE: A Rule of thumb is to retrieve stack trace in the interceptors // as early as possible (in functions exposed to the user), as we generally @@ -42,24 +57,24 @@ void PrintStack(StackTrace *stack); #define GET_STACK_TRACE_FATAL(pc, bp) \ GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp, \ - flags()->fast_unwind_on_fatal) + common_flags()->fast_unwind_on_fatal) -#define GET_STACK_TRACE_FATAL_HERE \ - GET_STACK_TRACE(kStackTraceMax, flags()->fast_unwind_on_fatal) +#define GET_STACK_TRACE_FATAL_HERE \ + GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal) -#define GET_STACK_TRACE_THREAD \ +#define GET_STACK_TRACE_THREAD \ GET_STACK_TRACE(kStackTraceMax, true) -#define GET_STACK_TRACE_MALLOC \ - GET_STACK_TRACE(flags()->malloc_context_size, \ - flags()->fast_unwind_on_malloc) +#define GET_STACK_TRACE_MALLOC \ + GET_STACK_TRACE(common_flags()->malloc_context_size, \ + common_flags()->fast_unwind_on_malloc) #define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC #define PRINT_CURRENT_STACK() \ { \ GET_STACK_TRACE(kStackTraceMax, \ - flags()->fast_unwind_on_fatal); \ + common_flags()->fast_unwind_on_fatal); \ PrintStack(&stack); \ } diff --git a/lib/asan/asan_stats.cc b/lib/asan/asan_stats.cc index c57c8cc61aed..ba7c1ab6e91a 100644 --- a/lib/asan/asan_stats.cc +++ b/lib/asan/asan_stats.cc @@ -14,14 +14,14 @@ #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_stats.h" -#include "asan_thread_registry.h" -#include "sanitizer/asan_interface.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_mutex.h" #include "sanitizer_common/sanitizer_stackdepot.h" namespace __asan { AsanStats::AsanStats() { - CHECK(REAL(memset) != 0); + CHECK(REAL(memset)); REAL(memset)(this, 0, sizeof(AsanStats)); } @@ -58,7 +58,7 @@ static BlockingMutex print_lock(LINKER_INITIALIZED); static void PrintAccumulatedStats() { AsanStats stats; - asanThreadRegistry().GetAccumulatedStats(&stats); + GetAccumulatedStats(&stats); // Use lock to keep reports from mixing up. BlockingMutexLock lock(&print_lock); stats.Print(); @@ -68,21 +68,103 @@ static void PrintAccumulatedStats() { PrintInternalAllocatorStats(); } +static AsanStats unknown_thread_stats(LINKER_INITIALIZED); +static AsanStats accumulated_stats(LINKER_INITIALIZED); +// Required for malloc_zone_statistics() on OS X. This can't be stored in +// per-thread AsanStats. +static uptr max_malloced_memory; +static BlockingMutex acc_stats_lock(LINKER_INITIALIZED); + +static void FlushToAccumulatedStatsUnlocked(AsanStats *stats) { + acc_stats_lock.CheckLocked(); + uptr *dst = (uptr*)&accumulated_stats; + uptr *src = (uptr*)stats; + uptr num_fields = sizeof(*stats) / sizeof(uptr); + for (uptr i = 0; i < num_fields; i++) { + dst[i] += src[i]; + src[i] = 0; + } +} + +static void FlushThreadStats(ThreadContextBase *tctx_base, void *arg) { + AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base); + if (AsanThread *t = tctx->thread) + FlushToAccumulatedStatsUnlocked(&t->stats()); +} + +static void UpdateAccumulatedStatsUnlocked() { + acc_stats_lock.CheckLocked(); + { + ThreadRegistryLock l(&asanThreadRegistry()); + asanThreadRegistry().RunCallbackForEachThreadLocked(FlushThreadStats, 0); + } + FlushToAccumulatedStatsUnlocked(&unknown_thread_stats); + // This is not very accurate: we may miss allocation peaks that happen + // between two updates of accumulated_stats_. For more accurate bookkeeping + // the maximum should be updated on every malloc(), which is unacceptable. + if (max_malloced_memory < accumulated_stats.malloced) { + max_malloced_memory = accumulated_stats.malloced; + } +} + +void FlushToAccumulatedStats(AsanStats *stats) { + BlockingMutexLock lock(&acc_stats_lock); + FlushToAccumulatedStatsUnlocked(stats); +} + +void GetAccumulatedStats(AsanStats *stats) { + BlockingMutexLock lock(&acc_stats_lock); + UpdateAccumulatedStatsUnlocked(); + internal_memcpy(stats, &accumulated_stats, sizeof(accumulated_stats)); +} + +void FillMallocStatistics(AsanMallocStats *malloc_stats) { + BlockingMutexLock lock(&acc_stats_lock); + UpdateAccumulatedStatsUnlocked(); + malloc_stats->blocks_in_use = accumulated_stats.mallocs; + malloc_stats->size_in_use = accumulated_stats.malloced; + malloc_stats->max_size_in_use = max_malloced_memory; + malloc_stats->size_allocated = accumulated_stats.mmaped; +} + +AsanStats &GetCurrentThreadStats() { + AsanThread *t = GetCurrentThread(); + return (t) ? t->stats() : unknown_thread_stats; +} + } // namespace __asan // ---------------------- Interface ---------------- {{{1 using namespace __asan; // NOLINT uptr __asan_get_current_allocated_bytes() { - return asanThreadRegistry().GetCurrentAllocatedBytes(); + BlockingMutexLock lock(&acc_stats_lock); + UpdateAccumulatedStatsUnlocked(); + uptr malloced = accumulated_stats.malloced; + uptr freed = accumulated_stats.freed; + // Return sane value if malloced < freed due to racy + // way we update accumulated stats. + return (malloced > freed) ? malloced - freed : 1; } uptr __asan_get_heap_size() { - return asanThreadRegistry().GetHeapSize(); + BlockingMutexLock lock(&acc_stats_lock); + UpdateAccumulatedStatsUnlocked(); + return accumulated_stats.mmaped - accumulated_stats.munmaped; } uptr __asan_get_free_bytes() { - return asanThreadRegistry().GetFreeBytes(); + BlockingMutexLock lock(&acc_stats_lock); + UpdateAccumulatedStatsUnlocked(); + uptr total_free = accumulated_stats.mmaped + - accumulated_stats.munmaped + + accumulated_stats.really_freed + + accumulated_stats.really_freed_redzones; + uptr total_used = accumulated_stats.malloced + + accumulated_stats.malloced_redzones; + // Return sane value if total_free < total_used due to racy + // way we update accumulated stats. + return (total_free > total_used) ? total_free - total_used : 1; } uptr __asan_get_unmapped_bytes() { diff --git a/lib/asan/asan_stats.h b/lib/asan/asan_stats.h index 37846bc92ad2..68495fb33f95 100644 --- a/lib/asan/asan_stats.h +++ b/lib/asan/asan_stats.h @@ -56,6 +56,15 @@ struct AsanStats { void Print(); }; +// Returns stats for GetCurrentThread(), or stats for fake "unknown thread" +// if GetCurrentThread() returns 0. +AsanStats &GetCurrentThreadStats(); +// Flushes all thread-local stats to accumulated stats, and makes +// a copy of accumulated stats. +void GetAccumulatedStats(AsanStats *stats); +// Flushes a given stats into accumulated stats. +void FlushToAccumulatedStats(AsanStats *stats); + // A cross-platform equivalent of malloc_statistics_t on Mac OS. struct AsanMallocStats { uptr blocks_in_use; @@ -64,6 +73,8 @@ struct AsanMallocStats { uptr size_allocated; }; +void FillMallocStatistics(AsanMallocStats *malloc_stats); + } // namespace __asan #endif // ASAN_STATS_H diff --git a/lib/asan/asan_thread.cc b/lib/asan/asan_thread.cc index 778e91932ed5..da28381031a5 100644 --- a/lib/asan/asan_thread.cc +++ b/lib/asan/asan_thread.cc @@ -13,46 +13,81 @@ //===----------------------------------------------------------------------===// #include "asan_allocator.h" #include "asan_interceptors.h" +#include "asan_poisoning.h" #include "asan_stack.h" #include "asan_thread.h" -#include "asan_thread_registry.h" #include "asan_mapping.h" #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "lsan/lsan_common.h" namespace __asan { -AsanThread::AsanThread(LinkerInitialized x) - : fake_stack_(x), - malloc_storage_(x), - stats_(x) { } +// AsanThreadContext implementation. -AsanThread *AsanThread::Create(u32 parent_tid, thread_callback_t start_routine, - void *arg, StackTrace *stack) { +void AsanThreadContext::OnCreated(void *arg) { + CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg); + if (args->stack) { + internal_memcpy(&stack, args->stack, sizeof(stack)); + } + thread = args->thread; + thread->set_context(this); +} + +void AsanThreadContext::OnFinished() { + // Drop the link to the AsanThread object. + thread = 0; +} + +static char thread_registry_placeholder[sizeof(ThreadRegistry)]; +static ThreadRegistry *asan_thread_registry; + +static ThreadContextBase *GetAsanThreadContext(u32 tid) { + void *mem = MmapOrDie(sizeof(AsanThreadContext), "AsanThreadContext"); + return new(mem) AsanThreadContext(tid); +} + +ThreadRegistry &asanThreadRegistry() { + static bool initialized; + // Don't worry about thread_safety - this should be called when there is + // a single thread. + if (!initialized) { + // Never reuse ASan threads: we store pointer to AsanThreadContext + // in TSD and can't reliably tell when no more TSD destructors will + // be called. It would be wrong to reuse AsanThreadContext for another + // thread before all TSD destructors will be called for it. + asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry( + GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads); + initialized = true; + } + return *asan_thread_registry; +} + +AsanThreadContext *GetThreadContextByTidLocked(u32 tid) { + return static_cast<AsanThreadContext *>( + asanThreadRegistry().GetThreadLocked(tid)); +} + +// AsanThread implementation. + +AsanThread *AsanThread::Create(thread_callback_t start_routine, + void *arg) { uptr PageSize = GetPageSizeCached(); uptr size = RoundUpTo(sizeof(AsanThread), PageSize); AsanThread *thread = (AsanThread*)MmapOrDie(size, __FUNCTION__); thread->start_routine_ = start_routine; thread->arg_ = arg; - - const uptr kSummaryAllocSize = PageSize; - CHECK_LE(sizeof(AsanThreadSummary), kSummaryAllocSize); - AsanThreadSummary *summary = - (AsanThreadSummary*)MmapOrDie(PageSize, "AsanThreadSummary"); - summary->Init(parent_tid, stack); - summary->set_thread(thread); - thread->set_summary(summary); + thread->context_ = 0; return thread; } -void AsanThreadSummary::TSDDtor(void *tsd) { - AsanThreadSummary *summary = (AsanThreadSummary*)tsd; - if (flags()->verbosity >= 1) { - Report("T%d TSDDtor\n", summary->tid()); - } - if (summary->thread()) { - summary->thread()->Destroy(); - } +void AsanThread::TSDDtor(void *tsd) { + AsanThreadContext *context = (AsanThreadContext*)tsd; + if (flags()->verbosity >= 1) + Report("T%d TSDDtor\n", context->tid); + if (context->thread) + context->thread->Destroy(); } void AsanThread::Destroy() { @@ -60,8 +95,8 @@ void AsanThread::Destroy() { Report("T%d exited\n", tid()); } - asanThreadRegistry().UnregisterThread(this); - CHECK(summary()->thread() == 0); + asanThreadRegistry().FinishThread(tid()); + FlushToAccumulatedStats(&stats_); // We also clear the shadow on thread destruction because // some code may still be executing in later TSD destructors // and we don't want it to have any poisoned stack. @@ -86,15 +121,16 @@ void AsanThread::Init() { AsanPlatformThreadInit(); } -thread_return_t AsanThread::ThreadStart() { +thread_return_t AsanThread::ThreadStart(uptr os_id) { Init(); + asanThreadRegistry().StartThread(tid(), os_id, 0); if (flags()->use_sigaltstack) SetAlternateSignalStack(); if (!start_routine_) { // start_routine_ == 0 if we're on the main thread or on one of the // OS X libdispatch worker threads. But nobody is supposed to call // ThreadStart() for the worker threads. - CHECK(tid() == 0); + CHECK_EQ(tid(), 0); return 0; } @@ -117,7 +153,8 @@ void AsanThread::ClearShadowForThreadStack() { PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0); } -const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset) { +const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset, + uptr *frame_pc) { uptr bottom = 0; if (AddrIsInStack(addr)) { bottom = stack_bottom(); @@ -125,6 +162,7 @@ const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset) { bottom = fake_stack().AddrIsInFakeStack(addr); CHECK(bottom); *offset = addr - bottom; + *frame_pc = ((uptr*)bottom)[2]; return (const char *)((uptr*)bottom)[1]; } uptr aligned_addr = addr & ~(SANITIZER_WORDSIZE/8 - 1); // align addr. @@ -149,7 +187,79 @@ const char *AsanThread::GetFrameNameByAddr(uptr addr, uptr *offset) { uptr* ptr = (uptr*)SHADOW_TO_MEM((uptr)(shadow_ptr + 1)); CHECK(ptr[0] == kCurrentStackFrameMagic); *offset = addr - (uptr)ptr; + *frame_pc = ptr[2]; return (const char*)ptr[1]; } +static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base, + void *addr) { + AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base); + AsanThread *t = tctx->thread; + return (t && t->fake_stack().StackSize() && + (t->fake_stack().AddrIsInFakeStack((uptr)addr) || + t->AddrIsInStack((uptr)addr))); +} + +AsanThread *GetCurrentThread() { + AsanThreadContext *context = (AsanThreadContext*)AsanTSDGet(); + if (!context) { + if (SANITIZER_ANDROID) { + // On Android, libc constructor is called _after_ asan_init, and cleans up + // TSD. Try to figure out if this is still the main thread by the stack + // address. We are not entirely sure that we have correct main thread + // limits, so only do this magic on Android, and only if the found thread + // is the main thread. + AsanThreadContext *tctx = GetThreadContextByTidLocked(0); + if (ThreadStackContainsAddress(tctx, &context)) { + SetCurrentThread(tctx->thread); + return tctx->thread; + } + } + return 0; + } + return context->thread; +} + +void SetCurrentThread(AsanThread *t) { + CHECK(t->context()); + if (flags()->verbosity >= 2) { + Report("SetCurrentThread: %p for thread %p\n", + t->context(), (void*)GetThreadSelf()); + } + // Make sure we do not reset the current AsanThread. + CHECK_EQ(0, AsanTSDGet()); + AsanTSDSet(t->context()); + CHECK_EQ(t->context(), AsanTSDGet()); +} + +u32 GetCurrentTidOrInvalid() { + AsanThread *t = GetCurrentThread(); + return t ? t->tid() : kInvalidTid; +} + +AsanThread *FindThreadByStackAddress(uptr addr) { + asanThreadRegistry().CheckLocked(); + AsanThreadContext *tctx = static_cast<AsanThreadContext *>( + asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress, + (void *)addr)); + return tctx ? tctx->thread : 0; +} } // namespace __asan + +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { +bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end, + uptr *tls_begin, uptr *tls_end, + uptr *cache_begin, uptr *cache_end) { + // FIXME: Stub. + return false; +} + +void LockThreadRegistry() { + __asan::asanThreadRegistry().Lock(); +} + +void UnlockThreadRegistry() { + __asan::asanThreadRegistry().Unlock(); +} +} // namespace __lsan diff --git a/lib/asan/asan_thread.h b/lib/asan/asan_thread.h index acc27e52e224..14062b62f751 100644 --- a/lib/asan/asan_thread.h +++ b/lib/asan/asan_thread.h @@ -16,76 +16,58 @@ #include "asan_allocator.h" #include "asan_internal.h" +#include "asan_fake_stack.h" #include "asan_stack.h" #include "asan_stats.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_thread_registry.h" namespace __asan { const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits. +const u32 kMaxNumberOfThreads = (1 << 22); // 4M class AsanThread; // These objects are created for every thread and are never deleted, // so we can find them by tid even if the thread is long dead. -class AsanThreadSummary { +class AsanThreadContext : public ThreadContextBase { public: - explicit AsanThreadSummary(LinkerInitialized) { } // for T0. - void Init(u32 parent_tid, StackTrace *stack) { - parent_tid_ = parent_tid; - announced_ = false; - tid_ = kInvalidTid; - if (stack) { - internal_memcpy(&stack_, stack, sizeof(*stack)); - } - thread_ = 0; - name_[0] = 0; + explicit AsanThreadContext(int tid) + : ThreadContextBase(tid), + announced(false), + thread(0) { + internal_memset(&stack, 0, sizeof(stack)); } - u32 tid() { return tid_; } - void set_tid(u32 tid) { tid_ = tid; } - u32 parent_tid() { return parent_tid_; } - bool announced() { return announced_; } - void set_announced(bool announced) { announced_ = announced; } - StackTrace *stack() { return &stack_; } - AsanThread *thread() { return thread_; } - void set_thread(AsanThread *thread) { thread_ = thread; } - static void TSDDtor(void *tsd); - void set_name(const char *name) { - internal_strncpy(name_, name, sizeof(name_) - 1); - } - const char *name() { return name_; } + bool announced; + StackTrace stack; + AsanThread *thread; - private: - u32 tid_; - u32 parent_tid_; - bool announced_; - StackTrace stack_; - AsanThread *thread_; - char name_[128]; + void OnCreated(void *arg); + void OnFinished(); }; -// AsanThreadSummary objects are never freed, so we need many of them. -COMPILER_CHECK(sizeof(AsanThreadSummary) <= 4094); +// AsanThreadContext objects are never freed, so we need many of them. +COMPILER_CHECK(sizeof(AsanThreadContext) <= 4096); // AsanThread are stored in TSD and destroyed when the thread dies. class AsanThread { public: - explicit AsanThread(LinkerInitialized); // for T0. - static AsanThread *Create(u32 parent_tid, thread_callback_t start_routine, - void *arg, StackTrace *stack); + static AsanThread *Create(thread_callback_t start_routine, void *arg); + static void TSDDtor(void *tsd); void Destroy(); void Init(); // Should be called from the thread itself. - thread_return_t ThreadStart(); + thread_return_t ThreadStart(uptr os_id); uptr stack_top() { return stack_top_; } uptr stack_bottom() { return stack_bottom_; } uptr stack_size() { return stack_top_ - stack_bottom_; } - u32 tid() { return summary_->tid(); } - AsanThreadSummary *summary() { return summary_; } - void set_summary(AsanThreadSummary *summary) { summary_ = summary; } + u32 tid() { return context_->tid; } + AsanThreadContext *context() { return context_; } + void set_context(AsanThreadContext *context) { context_ = context; } - const char *GetFrameNameByAddr(uptr addr, uptr *offset); + const char *GetFrameNameByAddr(uptr addr, uptr *offset, uptr *frame_pc); bool AddrIsInStack(uptr addr) { return addr >= stack_bottom_ && addr < stack_top_; @@ -96,9 +78,10 @@ class AsanThread { AsanStats &stats() { return stats_; } private: + AsanThread() {} void SetThreadStackTopAndBottom(); void ClearShadowForThreadStack(); - AsanThreadSummary *summary_; + AsanThreadContext *context_; thread_callback_t start_routine_; void *arg_; uptr stack_top_; @@ -109,6 +92,23 @@ class AsanThread { AsanStats stats_; }; +struct CreateThreadContextArgs { + AsanThread *thread; + StackTrace *stack; +}; + +// Returns a single instance of registry. +ThreadRegistry &asanThreadRegistry(); + +// Must be called under ThreadRegistryLock. +AsanThreadContext *GetThreadContextByTidLocked(u32 tid); + +// Get the current thread. May return 0. +AsanThread *GetCurrentThread(); +void SetCurrentThread(AsanThread *t); +u32 GetCurrentTidOrInvalid(); +AsanThread *FindThreadByStackAddress(uptr addr); + } // namespace __asan #endif // ASAN_THREAD_H diff --git a/lib/asan/asan_thread_registry.cc b/lib/asan/asan_thread_registry.cc deleted file mode 100644 index 80675405fbd5..000000000000 --- a/lib/asan/asan_thread_registry.cc +++ /dev/null @@ -1,198 +0,0 @@ -//===-- asan_thread_registry.cc -------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// AsanThreadRegistry-related code. AsanThreadRegistry is a container -// for summaries of all created threads. -//===----------------------------------------------------------------------===// - -#include "asan_stack.h" -#include "asan_thread.h" -#include "asan_thread_registry.h" -#include "sanitizer_common/sanitizer_common.h" - -namespace __asan { - -static AsanThreadRegistry asan_thread_registry(LINKER_INITIALIZED); - -AsanThreadRegistry &asanThreadRegistry() { - return asan_thread_registry; -} - -AsanThreadRegistry::AsanThreadRegistry(LinkerInitialized x) - : main_thread_(x), - main_thread_summary_(x), - accumulated_stats_(x), - max_malloced_memory_(x), - mu_(x) { } - -void AsanThreadRegistry::Init() { - AsanTSDInit(AsanThreadSummary::TSDDtor); - main_thread_.set_summary(&main_thread_summary_); - main_thread_summary_.set_thread(&main_thread_); - RegisterThread(&main_thread_); - SetCurrent(&main_thread_); - // At this point only one thread exists. - inited_ = true; -} - -void AsanThreadRegistry::RegisterThread(AsanThread *thread) { - BlockingMutexLock lock(&mu_); - u32 tid = n_threads_; - n_threads_++; - CHECK(n_threads_ < kMaxNumberOfThreads); - - AsanThreadSummary *summary = thread->summary(); - CHECK(summary != 0); - summary->set_tid(tid); - thread_summaries_[tid] = summary; -} - -void AsanThreadRegistry::UnregisterThread(AsanThread *thread) { - BlockingMutexLock lock(&mu_); - FlushToAccumulatedStatsUnlocked(&thread->stats()); - AsanThreadSummary *summary = thread->summary(); - CHECK(summary); - summary->set_thread(0); -} - -AsanThread *AsanThreadRegistry::GetMain() { - return &main_thread_; -} - -AsanThread *AsanThreadRegistry::GetCurrent() { - AsanThreadSummary *summary = (AsanThreadSummary *)AsanTSDGet(); - if (!summary) { -#if ASAN_ANDROID - // On Android, libc constructor is called _after_ asan_init, and cleans up - // TSD. Try to figure out if this is still the main thread by the stack - // address. We are not entirely sure that we have correct main thread - // limits, so only do this magic on Android, and only if the found thread is - // the main thread. - AsanThread* thread = FindThreadByStackAddress((uptr)&summary); - if (thread && thread->tid() == 0) { - SetCurrent(thread); - return thread; - } -#endif - return 0; - } - return summary->thread(); -} - -void AsanThreadRegistry::SetCurrent(AsanThread *t) { - CHECK(t->summary()); - if (flags()->verbosity >= 2) { - Report("SetCurrent: %p for thread %p\n", - t->summary(), (void*)GetThreadSelf()); - } - // Make sure we do not reset the current AsanThread. - CHECK(AsanTSDGet() == 0); - AsanTSDSet(t->summary()); - CHECK(AsanTSDGet() == t->summary()); -} - -AsanStats &AsanThreadRegistry::GetCurrentThreadStats() { - AsanThread *t = GetCurrent(); - return (t) ? t->stats() : main_thread_.stats(); -} - -void AsanThreadRegistry::GetAccumulatedStats(AsanStats *stats) { - BlockingMutexLock lock(&mu_); - UpdateAccumulatedStatsUnlocked(); - internal_memcpy(stats, &accumulated_stats_, sizeof(accumulated_stats_)); -} - -uptr AsanThreadRegistry::GetCurrentAllocatedBytes() { - BlockingMutexLock lock(&mu_); - UpdateAccumulatedStatsUnlocked(); - uptr malloced = accumulated_stats_.malloced; - uptr freed = accumulated_stats_.freed; - // Return sane value if malloced < freed due to racy - // way we update accumulated stats. - return (malloced > freed) ? malloced - freed : 1; -} - -uptr AsanThreadRegistry::GetHeapSize() { - BlockingMutexLock lock(&mu_); - UpdateAccumulatedStatsUnlocked(); - return accumulated_stats_.mmaped - accumulated_stats_.munmaped; -} - -uptr AsanThreadRegistry::GetFreeBytes() { - BlockingMutexLock lock(&mu_); - UpdateAccumulatedStatsUnlocked(); - uptr total_free = accumulated_stats_.mmaped - - accumulated_stats_.munmaped - + accumulated_stats_.really_freed - + accumulated_stats_.really_freed_redzones; - uptr total_used = accumulated_stats_.malloced - + accumulated_stats_.malloced_redzones; - // Return sane value if total_free < total_used due to racy - // way we update accumulated stats. - return (total_free > total_used) ? total_free - total_used : 1; -} - -// Return several stats counters with a single call to -// UpdateAccumulatedStatsUnlocked(). -void AsanThreadRegistry::FillMallocStatistics(AsanMallocStats *malloc_stats) { - BlockingMutexLock lock(&mu_); - UpdateAccumulatedStatsUnlocked(); - malloc_stats->blocks_in_use = accumulated_stats_.mallocs; - malloc_stats->size_in_use = accumulated_stats_.malloced; - malloc_stats->max_size_in_use = max_malloced_memory_; - malloc_stats->size_allocated = accumulated_stats_.mmaped; -} - -AsanThreadSummary *AsanThreadRegistry::FindByTid(u32 tid) { - CHECK(tid < n_threads_); - CHECK(thread_summaries_[tid]); - return thread_summaries_[tid]; -} - -AsanThread *AsanThreadRegistry::FindThreadByStackAddress(uptr addr) { - BlockingMutexLock lock(&mu_); - for (u32 tid = 0; tid < n_threads_; tid++) { - AsanThread *t = thread_summaries_[tid]->thread(); - if (!t || !(t->fake_stack().StackSize())) continue; - if (t->fake_stack().AddrIsInFakeStack(addr) || t->AddrIsInStack(addr)) { - return t; - } - } - return 0; -} - -void AsanThreadRegistry::UpdateAccumulatedStatsUnlocked() { - for (u32 tid = 0; tid < n_threads_; tid++) { - AsanThread *t = thread_summaries_[tid]->thread(); - if (t != 0) { - FlushToAccumulatedStatsUnlocked(&t->stats()); - } - } - // This is not very accurate: we may miss allocation peaks that happen - // between two updates of accumulated_stats_. For more accurate bookkeeping - // the maximum should be updated on every malloc(), which is unacceptable. - if (max_malloced_memory_ < accumulated_stats_.malloced) { - max_malloced_memory_ = accumulated_stats_.malloced; - } -} - -void AsanThreadRegistry::FlushToAccumulatedStatsUnlocked(AsanStats *stats) { - // AsanStats consists of variables of type uptr only. - uptr *dst = (uptr*)&accumulated_stats_; - uptr *src = (uptr*)stats; - uptr num_fields = sizeof(AsanStats) / sizeof(uptr); - for (uptr i = 0; i < num_fields; i++) { - dst[i] += src[i]; - src[i] = 0; - } -} - -} // namespace __asan diff --git a/lib/asan/asan_thread_registry.h b/lib/asan/asan_thread_registry.h deleted file mode 100644 index adb1a6d4f32d..000000000000 --- a/lib/asan/asan_thread_registry.h +++ /dev/null @@ -1,85 +0,0 @@ -//===-- asan_thread_registry.h ----------------------------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// ASan-private header for asan_thread_registry.cc -//===----------------------------------------------------------------------===// - -#ifndef ASAN_THREAD_REGISTRY_H -#define ASAN_THREAD_REGISTRY_H - -#include "asan_stack.h" -#include "asan_stats.h" -#include "asan_thread.h" -#include "sanitizer_common/sanitizer_mutex.h" - -namespace __asan { - -// Stores summaries of all created threads, returns current thread, -// thread by tid, thread by stack address. There is a single instance -// of AsanThreadRegistry for the whole program. -// AsanThreadRegistry is thread-safe. -class AsanThreadRegistry { - public: - explicit AsanThreadRegistry(LinkerInitialized); - void Init(); - void RegisterThread(AsanThread *thread); - void UnregisterThread(AsanThread *thread); - - AsanThread *GetMain(); - // Get the current thread. May return 0. - AsanThread *GetCurrent(); - void SetCurrent(AsanThread *t); - - u32 GetCurrentTidOrInvalid() { - if (!inited_) return 0; - AsanThread *t = GetCurrent(); - return t ? t->tid() : kInvalidTid; - } - - // Returns stats for GetCurrent(), or stats for - // T0 if GetCurrent() returns 0. - AsanStats &GetCurrentThreadStats(); - // Flushes all thread-local stats to accumulated stats, and makes - // a copy of accumulated stats. - void GetAccumulatedStats(AsanStats *stats); - uptr GetCurrentAllocatedBytes(); - uptr GetHeapSize(); - uptr GetFreeBytes(); - void FillMallocStatistics(AsanMallocStats *malloc_stats); - - AsanThreadSummary *FindByTid(u32 tid); - AsanThread *FindThreadByStackAddress(uptr addr); - - private: - void UpdateAccumulatedStatsUnlocked(); - // Adds values of all counters in "stats" to accumulated stats, - // and fills "stats" with zeroes. - void FlushToAccumulatedStatsUnlocked(AsanStats *stats); - - static const u32 kMaxNumberOfThreads = (1 << 22); // 4M - AsanThreadSummary *thread_summaries_[kMaxNumberOfThreads]; - AsanThread main_thread_; - AsanThreadSummary main_thread_summary_; - AsanStats accumulated_stats_; - // Required for malloc_zone_statistics() on OS X. This can't be stored in - // per-thread AsanStats. - uptr max_malloced_memory_; - u32 n_threads_; - BlockingMutex mu_; - bool inited_; -}; - -// Returns a single instance of registry. -AsanThreadRegistry &asanThreadRegistry(); - -} // namespace __asan - -#endif // ASAN_THREAD_REGISTRY_H diff --git a/lib/asan/asan_win.cc b/lib/asan/asan_win.cc index d8ce050641bc..f74de7227ed2 100644 --- a/lib/asan/asan_win.cc +++ b/lib/asan/asan_win.cc @@ -11,7 +11,9 @@ // // Windows-specific details. //===----------------------------------------------------------------------===// -#ifdef _WIN32 + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_WINDOWS #include <windows.h> #include <dbghelp.h> @@ -30,30 +32,6 @@ static BlockingMutex dbghelp_lock(LINKER_INITIALIZED); static bool dbghelp_initialized = false; #pragma comment(lib, "dbghelp.lib") -void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) { - (void)fast; - stack->max_size = max_s; - void *tmp[kStackTraceMax]; - - // FIXME: CaptureStackBackTrace might be too slow for us. - // FIXME: Compare with StackWalk64. - // FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc - uptr cs_ret = CaptureStackBackTrace(1, stack->max_size, tmp, 0); - uptr offset = 0; - // Skip the RTL frames by searching for the PC in the stacktrace. - // FIXME: this doesn't work well for the malloc/free stacks yet. - for (uptr i = 0; i < cs_ret; i++) { - if (pc != (uptr)tmp[i]) - continue; - offset = i; - break; - } - - stack->size = cs_ret - offset; - for (uptr i = 0; i < stack->size; i++) - stack->trace[i] = (uptr)tmp[i + offset]; -} - // ---------------------- TSD ---------------- {{{1 static bool tsd_key_inited = false; diff --git a/lib/asan/dynamic/asan_interceptors_dynamic.cc b/lib/asan/dynamic/asan_interceptors_dynamic.cc deleted file mode 100644 index 4f0f7bd2d5f8..000000000000 --- a/lib/asan/dynamic/asan_interceptors_dynamic.cc +++ /dev/null @@ -1,111 +0,0 @@ -//===-- asan_interceptors_dynamic.cc --------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// __DATA,__interpose section of the dynamic runtime library for Mac OS. -//===----------------------------------------------------------------------===// - -#if defined(__APPLE__) - -#include "../asan_interceptors.h" -#include "../asan_intercepted_functions.h" - -namespace __asan { - -#if !MAC_INTERPOSE_FUNCTIONS -# error \ - Dynamic interposing library should be built with -DMAC_INTERPOSE_FUNCTIONS -#endif - -#define INTERPOSE_FUNCTION(function) \ - { reinterpret_cast<const uptr>(WRAP(function)), \ - reinterpret_cast<const uptr>(function) } - -#define INTERPOSE_FUNCTION_2(function, wrapper) \ - { reinterpret_cast<const uptr>(wrapper), \ - reinterpret_cast<const uptr>(function) } - -struct interpose_substitution { - const uptr replacement; - const uptr original; -}; - -__attribute__((used)) -const interpose_substitution substitutions[] - __attribute__((section("__DATA, __interpose"))) = { - INTERPOSE_FUNCTION(strlen), - INTERPOSE_FUNCTION(memcmp), - INTERPOSE_FUNCTION(memcpy), - INTERPOSE_FUNCTION(memmove), - INTERPOSE_FUNCTION(memset), - INTERPOSE_FUNCTION(strchr), - INTERPOSE_FUNCTION(strcat), - INTERPOSE_FUNCTION(strncat), - INTERPOSE_FUNCTION(strcpy), - INTERPOSE_FUNCTION(strncpy), - INTERPOSE_FUNCTION(pthread_create), - INTERPOSE_FUNCTION(longjmp), -#if ASAN_INTERCEPT__LONGJMP - INTERPOSE_FUNCTION(_longjmp), -#endif -#if ASAN_INTERCEPT_SIGLONGJMP - INTERPOSE_FUNCTION(siglongjmp), -#endif -#if ASAN_INTERCEPT_STRDUP - INTERPOSE_FUNCTION(strdup), -#endif -#if ASAN_INTERCEPT_STRNLEN - INTERPOSE_FUNCTION(strnlen), -#endif -#if ASAN_INTERCEPT_INDEX - INTERPOSE_FUNCTION_2(index, WRAP(strchr)), -#endif - INTERPOSE_FUNCTION(strcmp), - INTERPOSE_FUNCTION(strncmp), -#if ASAN_INTERCEPT_STRCASECMP_AND_STRNCASECMP - INTERPOSE_FUNCTION(strcasecmp), - INTERPOSE_FUNCTION(strncasecmp), -#endif - INTERPOSE_FUNCTION(atoi), - INTERPOSE_FUNCTION(atol), - INTERPOSE_FUNCTION(strtol), -#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL - INTERPOSE_FUNCTION(atoll), - INTERPOSE_FUNCTION(strtoll), -#endif -#if ASAN_INTERCEPT_MLOCKX - INTERPOSE_FUNCTION(mlock), - INTERPOSE_FUNCTION(munlock), - INTERPOSE_FUNCTION(mlockall), - INTERPOSE_FUNCTION(munlockall), -#endif - INTERPOSE_FUNCTION(dispatch_async_f), - INTERPOSE_FUNCTION(dispatch_sync_f), - INTERPOSE_FUNCTION(dispatch_after_f), - INTERPOSE_FUNCTION(dispatch_barrier_async_f), - INTERPOSE_FUNCTION(dispatch_group_async_f), -#ifndef MISSING_BLOCKS_SUPPORT - INTERPOSE_FUNCTION(dispatch_group_async), - INTERPOSE_FUNCTION(dispatch_async), - INTERPOSE_FUNCTION(dispatch_after), - INTERPOSE_FUNCTION(dispatch_source_set_event_handler), - INTERPOSE_FUNCTION(dispatch_source_set_cancel_handler), -#endif - INTERPOSE_FUNCTION(signal), - INTERPOSE_FUNCTION(sigaction), - - INTERPOSE_FUNCTION(__CFInitialize), - INTERPOSE_FUNCTION(CFStringCreateCopy), - INTERPOSE_FUNCTION(free), -}; - -} // namespace __asan - -#endif // __APPLE__ diff --git a/lib/asan/lit_tests/CMakeLists.txt b/lib/asan/lit_tests/CMakeLists.txt index 1609032d4670..d2420b50da83 100644 --- a/lib/asan/lit_tests/CMakeLists.txt +++ b/lib/asan/lit_tests/CMakeLists.txt @@ -14,9 +14,9 @@ configure_lit_site_cfg( if(COMPILER_RT_CAN_EXECUTE_TESTS) # Run ASan tests only if we're sure we may produce working binaries. set(ASAN_TEST_DEPS - clang clang-headers FileCheck count not llvm-nm llvm-symbolizer + ${SANITIZER_COMMON_LIT_TEST_DEPS} ${ASAN_RUNTIME_LIBRARIES} - ) + asan_blacklist) set(ASAN_TEST_PARAMS asan_site_config=${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg ) diff --git a/lib/asan/lit_tests/Darwin/interface_symbols_darwin.c b/lib/asan/lit_tests/Darwin/interface_symbols_darwin.c new file mode 100644 index 000000000000..3fca6e915324 --- /dev/null +++ b/lib/asan/lit_tests/Darwin/interface_symbols_darwin.c @@ -0,0 +1,39 @@ +// Check the presense of interface symbols in the ASan runtime dylib. +// If you're changing this file, please also change +// ../Linux/interface_symbols.c + +// RUN: %clang -fsanitize=address -dead_strip -O2 %s -o %t.exe +// RUN: rm -f %t.symbols %t.interface + +// RUN: nm -g `otool -L %t.exe | grep "asan_osx_dynamic.dylib" | \ +// RUN: tr -d '\011' | \ +// RUN: sed "s/.dylib.*/.dylib/"` \ +// RUN: | grep " T " | sed "s/.* T //" \ +// RUN: | grep "__asan_" | sed "s/___asan_/__asan_/" \ +// RUN: | grep -v "__asan_malloc_hook" \ +// RUN: | grep -v "__asan_free_hook" \ +// RUN: | grep -v "__asan_symbolize" \ +// RUN: | grep -v "__asan_default_options" \ +// RUN: | grep -v "__asan_on_error" > %t.symbols + +// RUN: cat %p/../../asan_interface_internal.h \ +// RUN: | sed "s/\/\/.*//" | sed "s/typedef.*//" \ +// RUN: | grep -v "OPTIONAL" \ +// RUN: | grep "__asan_.*(" | sed "s/.* __asan_/__asan_/;s/(.*//" \ +// RUN: > %t.interface +// RUN: echo __asan_report_load1 >> %t.interface +// RUN: echo __asan_report_load2 >> %t.interface +// RUN: echo __asan_report_load4 >> %t.interface +// RUN: echo __asan_report_load8 >> %t.interface +// RUN: echo __asan_report_load16 >> %t.interface +// RUN: echo __asan_report_store1 >> %t.interface +// RUN: echo __asan_report_store2 >> %t.interface +// RUN: echo __asan_report_store4 >> %t.interface +// RUN: echo __asan_report_store8 >> %t.interface +// RUN: echo __asan_report_store16 >> %t.interface +// RUN: echo __asan_report_load_n >> %t.interface +// RUN: echo __asan_report_store_n >> %t.interface + +// RUN: cat %t.interface | sort -u | diff %t.symbols - + +int main() { return 0; } diff --git a/lib/asan/lit_tests/Darwin/lit.local.cfg b/lib/asan/lit_tests/Darwin/lit.local.cfg new file mode 100644 index 000000000000..a85dfcd24c08 --- /dev/null +++ b/lib/asan/lit_tests/Darwin/lit.local.cfg @@ -0,0 +1,9 @@ +def getRoot(config): + if not config.parent: + return config + return getRoot(config.parent) + +root = getRoot(config) + +if root.host_os not in ['Darwin']: + config.unsupported = True diff --git a/lib/asan/lit_tests/Darwin/reexec-insert-libraries-env.cc b/lib/asan/lit_tests/Darwin/reexec-insert-libraries-env.cc new file mode 100644 index 000000000000..40a459fd84db --- /dev/null +++ b/lib/asan/lit_tests/Darwin/reexec-insert-libraries-env.cc @@ -0,0 +1,20 @@ +// Make sure ASan doesn't hang in an exec loop if DYLD_INSERT_LIBRARIES is set. +// This is a regression test for +// https://code.google.com/p/address-sanitizer/issues/detail?id=159 + +// RUN: %clangxx_asan -m64 %s -o %t +// RUN: %clangxx -m64 %p/../SharedLibs/darwin-dummy-shared-lib-so.cc \ +// RUN: -dynamiclib -o darwin-dummy-shared-lib-so.dylib + +// FIXME: the following command line may hang in the case of a regression. +// RUN: DYLD_INSERT_LIBRARIES=darwin-dummy-shared-lib-so.dylib \ +// RUN: %t 2>&1 | FileCheck %s || exit 1 +#include <stdio.h> +#include <stdlib.h> + +int main() { + const char kEnvName[] = "DYLD_INSERT_LIBRARIES"; + printf("%s=%s\n", kEnvName, getenv(kEnvName)); + // CHECK: {{DYLD_INSERT_LIBRARIES=.*darwin-dummy-shared-lib-so.dylib.*}} + return 0; +} diff --git a/lib/asan/lit_tests/Darwin/unset-insert-libraries-on-exec.cc b/lib/asan/lit_tests/Darwin/unset-insert-libraries-on-exec.cc new file mode 100644 index 000000000000..cf89949cf942 --- /dev/null +++ b/lib/asan/lit_tests/Darwin/unset-insert-libraries-on-exec.cc @@ -0,0 +1,20 @@ +// Make sure ASan removes the runtime library from DYLD_INSERT_LIBRARIES before +// executing other programs. + +// RUN: %clangxx_asan -m64 %s -o %t +// RUN: %clangxx -m64 %p/../SharedLibs/darwin-dummy-shared-lib-so.cc \ +// RUN: -dynamiclib -o darwin-dummy-shared-lib-so.dylib + +// Make sure DYLD_INSERT_LIBRARIES doesn't contain the runtime library before +// execl(). + +// RUN: %t >/dev/null 2>&1 +// RUN: DYLD_INSERT_LIBRARIES=darwin-dummy-shared-lib-so.dylib \ +// RUN: %t 2>&1 | FileCheck %s || exit 1 +#include <unistd.h> +int main() { + execl("/bin/bash", "/bin/bash", "-c", + "echo DYLD_INSERT_LIBRARIES=$DYLD_INSERT_LIBRARIES", NULL); + // CHECK: {{DYLD_INSERT_LIBRARIES=.*darwin-dummy-shared-lib-so.dylib.*}} + return 0; +} diff --git a/lib/asan/lit_tests/Helpers/init-order-atexit-extra.cc b/lib/asan/lit_tests/Helpers/init-order-atexit-extra.cc new file mode 100644 index 000000000000..e4189d19d099 --- /dev/null +++ b/lib/asan/lit_tests/Helpers/init-order-atexit-extra.cc @@ -0,0 +1,16 @@ +#include <stdio.h> + +class C { + public: + C() { value = 42; } + ~C() { } + int value; +}; + +C c; + +void AccessC() { + printf("C value: %d\n", c.value); +} + +int main() { return 0; } diff --git a/lib/asan/lit_tests/Helpers/initialization-blacklist-extra2.cc b/lib/asan/lit_tests/Helpers/initialization-blacklist-extra2.cc new file mode 100644 index 000000000000..69455a0a6fc9 --- /dev/null +++ b/lib/asan/lit_tests/Helpers/initialization-blacklist-extra2.cc @@ -0,0 +1,4 @@ +int zero_init(); +int badSrcGlobal = zero_init(); +int readBadSrcGlobal() { return badSrcGlobal; } + diff --git a/lib/asan/lit_tests/Helpers/initialization-blacklist.txt b/lib/asan/lit_tests/Helpers/initialization-blacklist.txt index c5f6610937f0..fa4a83667f4b 100644 --- a/lib/asan/lit_tests/Helpers/initialization-blacklist.txt +++ b/lib/asan/lit_tests/Helpers/initialization-blacklist.txt @@ -1,2 +1,3 @@ global-init:*badGlobal* global-init-type:*badNamespace::BadClass* +global-init-src:*initialization-blacklist-extra2.cc diff --git a/lib/asan/lit_tests/Helpers/initialization-constexpr-extra.cc b/lib/asan/lit_tests/Helpers/initialization-constexpr-extra.cc new file mode 100644 index 000000000000..b32466a981b3 --- /dev/null +++ b/lib/asan/lit_tests/Helpers/initialization-constexpr-extra.cc @@ -0,0 +1,3 @@ +// Constexpr: +int getCoolestInteger(); +static int coolest_integer = getCoolestInteger(); diff --git a/lib/asan/lit_tests/Helpers/initialization-nobug-extra.cc b/lib/asan/lit_tests/Helpers/initialization-nobug-extra.cc index 490b3339054a..886165affd76 100644 --- a/lib/asan/lit_tests/Helpers/initialization-nobug-extra.cc +++ b/lib/asan/lit_tests/Helpers/initialization-nobug-extra.cc @@ -4,6 +4,6 @@ static int ab = getAB(); // Function local statics: int countCalls(); static int one = countCalls(); -// Constexpr: -int getCoolestInteger(); -static int coolest_integer = getCoolestInteger(); +// Trivial constructor, non-trivial destructor: +int getStructWithDtorValue(); +static int val = getStructWithDtorValue(); diff --git a/lib/asan/lit_tests/Linux/asan_prelink_test.cc b/lib/asan/lit_tests/Linux/asan_prelink_test.cc new file mode 100644 index 000000000000..c209c39c8c42 --- /dev/null +++ b/lib/asan/lit_tests/Linux/asan_prelink_test.cc @@ -0,0 +1,28 @@ +// Test if asan works with prelink. +// It does not actually use prelink, but relies on ld's flag -Ttext-segment +// or gold's flag -Ttext (we try the first flag first, if that fails we +// try the second flag). +// +// RUN: %clangxx_asan -m64 -c %s -o %t.o +// RUN: %clangxx_asan -m64 -DBUILD_SO=1 -fPIC -shared %s -o %t.so -Wl,-Ttext-segment=0x3600000000 ||\ +// RUN: %clangxx_asan -m64 -DBUILD_SO=1 -fPIC -shared %s -o %t.so -Wl,-Ttext=0x3600000000 +// RUN: %clangxx_asan -m64 %t.o %t.so -Wl,-R. -o %t +// RUN: ASAN_OPTIONS=verbosity=1 %t 2>&1 | FileCheck %s + +// REQUIRES: x86_64-supported-target +#if BUILD_SO +int G; +int *getG() { + return &G; +} +#else +#include <stdio.h> +extern int *getG(); +int main(int argc, char **argv) { + long p = (long)getG(); + printf("SO mapped at %lx\n", p & ~0xffffffffUL); + *getG() = 0; +} +#endif +// CHECK: 0x003000000000, 0x004fffffffff{{.*}} MidMem +// CHECK: SO mapped at 3600000000 diff --git a/lib/asan/lit_tests/Linux/glob.cc b/lib/asan/lit_tests/Linux/glob.cc new file mode 100644 index 000000000000..e05228ff39e3 --- /dev/null +++ b/lib/asan/lit_tests/Linux/glob.cc @@ -0,0 +1,30 @@ +// RUN: %clangxx_asan -m64 -O0 %s -o %t && %t %p 2>&1 | FileCheck %s +// RUN: %clangxx_asan -m64 -O3 %s -o %t && %t %p 2>&1 | FileCheck %s +// RUN: %clangxx_asan -m32 -O0 %s -o %t && %t %p 2>&1 | FileCheck %s +// RUN: %clangxx_asan -m32 -O3 %s -o %t && %t %p 2>&1 | FileCheck %s + +#include <assert.h> +#include <glob.h> +#include <stdio.h> +#include <string.h> +#include <errno.h> +#include <string> + + +int main(int argc, char *argv[]) { + std::string path = argv[1]; + std::string pattern = path + "/glob_test_root/*a"; + printf("pattern: %s\n", pattern.c_str()); + + glob_t globbuf; + int res = glob(pattern.c_str(), 0, 0, &globbuf); + + printf("%d %s\n", errno, strerror(errno)); + assert(res == 0); + assert(globbuf.gl_pathc == 2); + printf("%zu\n", strlen(globbuf.gl_pathv[0])); + printf("%zu\n", strlen(globbuf.gl_pathv[1])); + printf("PASS\n"); + // CHECK: PASS + return 0; +} diff --git a/lib/asan/lit_tests/Linux/glob_test_root/aa b/lib/asan/lit_tests/Linux/glob_test_root/aa new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/lib/asan/lit_tests/Linux/glob_test_root/aa diff --git a/lib/asan/lit_tests/Linux/glob_test_root/ab b/lib/asan/lit_tests/Linux/glob_test_root/ab new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/lib/asan/lit_tests/Linux/glob_test_root/ab diff --git a/lib/asan/lit_tests/Linux/glob_test_root/ba b/lib/asan/lit_tests/Linux/glob_test_root/ba new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/lib/asan/lit_tests/Linux/glob_test_root/ba diff --git a/lib/asan/lit_tests/Linux/heavy_uar_test.cc b/lib/asan/lit_tests/Linux/heavy_uar_test.cc new file mode 100644 index 000000000000..c0f4560fb4e7 --- /dev/null +++ b/lib/asan/lit_tests/Linux/heavy_uar_test.cc @@ -0,0 +1,55 @@ +// RUN: %clangxx_asan -fsanitize=use-after-return -m64 -O0 %s -o %t && \ +// RUN: %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -fsanitize=use-after-return -m64 -O2 %s -o %t && \ +// RUN: %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -fsanitize=use-after-return -m32 -O2 %s -o %t && \ +// RUN: %t 2>&1 | %symbolize | FileCheck %s + +#include <stdio.h> +#include <string.h> +#include <stdlib.h> + +__attribute__((noinline)) +char *pretend_to_do_something(char *x) { + __asm__ __volatile__("" : : "r" (x) : "memory"); + return x; +} + +__attribute__((noinline)) +char *LeakStack() { + char x[1024]; + memset(x, 0, sizeof(x)); + return pretend_to_do_something(x); +} + +template<size_t kFrameSize> +__attribute__((noinline)) +void RecuriveFunctionWithStackFrame(int depth) { + if (depth <= 0) return; + char x[kFrameSize]; + x[0] = depth; + pretend_to_do_something(x); + RecuriveFunctionWithStackFrame<kFrameSize>(depth - 1); +} + +int main(int argc, char **argv) { + int n_iter = argc >= 2 ? atoi(argv[1]) : 1000; + int depth = argc >= 3 ? atoi(argv[2]) : 500; + for (int i = 0; i < n_iter; i++) { + RecuriveFunctionWithStackFrame<10>(depth); + RecuriveFunctionWithStackFrame<100>(depth); + RecuriveFunctionWithStackFrame<500>(depth); + RecuriveFunctionWithStackFrame<1024>(depth); + RecuriveFunctionWithStackFrame<2000>(depth); + RecuriveFunctionWithStackFrame<5000>(depth); + RecuriveFunctionWithStackFrame<10000>(depth); + } + char *stale_stack = LeakStack(); + RecuriveFunctionWithStackFrame<1024>(10); + stale_stack[100]++; + // CHECK: ERROR: AddressSanitizer: stack-use-after-return on address + // CHECK: is located in stack of thread T0 at offset 132 in frame + // CHECK: in LeakStack(){{.*}}heavy_uar_test.cc: + // CHECK: [32, 1056) 'x' + return 0; +} diff --git a/lib/asan/lit_tests/Linux/initialization-bug-any-order.cc b/lib/asan/lit_tests/Linux/initialization-bug-any-order.cc index 645fe1c85ed4..4f41dda18128 100644 --- a/lib/asan/lit_tests/Linux/initialization-bug-any-order.cc +++ b/lib/asan/lit_tests/Linux/initialization-bug-any-order.cc @@ -1,12 +1,13 @@ // Test to make sure basic initialization order errors are caught. // Check that on Linux initialization order bugs are caught -// independently on order in which we list source files. +// independently on order in which we list source files (if we specify +// strict init-order checking). -// RUN: %clangxx_asan -m64 -O0 %s %p/../Helpers/initialization-bug-extra.cc\ -// RUN: -fsanitize=init-order -o %t && %t 2>&1 \ +// RUN: %clangxx_asan -m64 -O0 %s %p/../Helpers/initialization-bug-extra.cc -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true:strict_init_order=true %t 2>&1 \ // RUN: | %symbolize | FileCheck %s -// RUN: %clangxx_asan -m64 -O0 %p/../Helpers/initialization-bug-extra.cc %s\ -// RUN: -fsanitize=init-order -o %t && %t 2>&1 \ +// RUN: %clangxx_asan -m64 -O0 %p/../Helpers/initialization-bug-extra.cc %s -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true:strict_init_order=true %t 2>&1 \ // RUN: | %symbolize | FileCheck %s // Do not test with optimization -- the error may be optimized away. diff --git a/lib/asan/lit_tests/interface_symbols.c b/lib/asan/lit_tests/Linux/interface_symbols_linux.c index f3167f562922..4134c8744043 100644 --- a/lib/asan/lit_tests/interface_symbols.c +++ b/lib/asan/lit_tests/Linux/interface_symbols_linux.c @@ -1,14 +1,14 @@ // Check the presense of interface symbols in compiled file. -// RUN: %clang -fsanitize=address -dead_strip -O2 %s -o %t.exe -// RUN: nm %t.exe | grep " T " | sed "s/.* T //" \ +// RUN: %clang -fsanitize=address -O2 %s -o %t.exe +// RUN: nm -D %t.exe | grep " T " | sed "s/.* T //" \ // RUN: | grep "__asan_" | sed "s/___asan_/__asan_/" \ // RUN: | grep -v "__asan_malloc_hook" \ // RUN: | grep -v "__asan_free_hook" \ // RUN: | grep -v "__asan_symbolize" \ // RUN: | grep -v "__asan_default_options" \ // RUN: | grep -v "__asan_on_error" > %t.symbols -// RUN: cat %p/../../../include/sanitizer/asan_interface.h \ +// RUN: cat %p/../../asan_interface_internal.h \ // RUN: | sed "s/\/\/.*//" | sed "s/typedef.*//" \ // RUN: | grep -v "OPTIONAL" \ // RUN: | grep "__asan_.*(" | sed "s/.* __asan_/__asan_/;s/(.*//" \ @@ -23,6 +23,12 @@ // RUN: echo __asan_report_store4 >> %t.interface // RUN: echo __asan_report_store8 >> %t.interface // RUN: echo __asan_report_store16 >> %t.interface +// RUN: echo __asan_report_load_n >> %t.interface +// RUN: echo __asan_report_store_n >> %t.interface // RUN: cat %t.interface | sort -u | diff %t.symbols - +// FIXME: nm -D on powerpc somewhy shows ASan interface symbols residing +// in "initialized data section". +// REQUIRES: x86_64-supported-target,i386-supported-target + int main() { return 0; } diff --git a/lib/asan/lit_tests/Linux/malloc-in-qsort.cc b/lib/asan/lit_tests/Linux/malloc-in-qsort.cc index a3fa255b186d..ee2e81f0d2ab 100644 --- a/lib/asan/lit_tests/Linux/malloc-in-qsort.cc +++ b/lib/asan/lit_tests/Linux/malloc-in-qsort.cc @@ -1,10 +1,14 @@ -// RUN: %clangxx_asan -O2 %s -o %t +// RUN: %clangxx_asan -m64 -O2 %s -o %t // RUN: ASAN_OPTIONS=fast_unwind_on_malloc=1 %t 2>&1 | %symbolize | FileCheck %s --check-prefix=CHECK-FAST // RUN: ASAN_OPTIONS=fast_unwind_on_malloc=0 %t 2>&1 | %symbolize | FileCheck %s --check-prefix=CHECK-SLOW // Test how well we unwind in presence of qsort in the stack // (i.e. if we can unwind through a function compiled w/o frame pointers). // https://code.google.com/p/address-sanitizer/issues/detail?id=137 + +// Fast unwinder is only avaliable on x86_64 and i386. +// REQUIRES: x86_64-supported-target + #include <stdlib.h> #include <stdio.h> diff --git a/lib/asan/lit_tests/malloc_delete_mismatch.cc b/lib/asan/lit_tests/Linux/malloc_delete_mismatch.cc index f34b33a38fb3..f34b33a38fb3 100644 --- a/lib/asan/lit_tests/malloc_delete_mismatch.cc +++ b/lib/asan/lit_tests/Linux/malloc_delete_mismatch.cc diff --git a/lib/asan/lit_tests/Linux/overflow-in-qsort.cc b/lib/asan/lit_tests/Linux/overflow-in-qsort.cc index c298991a8348..8bc43ca0a5c3 100644 --- a/lib/asan/lit_tests/Linux/overflow-in-qsort.cc +++ b/lib/asan/lit_tests/Linux/overflow-in-qsort.cc @@ -1,10 +1,14 @@ -// RUN: %clangxx_asan -O2 %s -o %t +// RUN: %clangxx_asan -m64 -O2 %s -o %t // RUN: ASAN_OPTIONS=fast_unwind_on_fatal=1 %t 2>&1 | %symbolize | FileCheck %s --check-prefix=CHECK-FAST // RUN: ASAN_OPTIONS=fast_unwind_on_fatal=0 %t 2>&1 | %symbolize | FileCheck %s --check-prefix=CHECK-SLOW // Test how well we unwind in presence of qsort in the stack // (i.e. if we can unwind through a function compiled w/o frame pointers). // https://code.google.com/p/address-sanitizer/issues/detail?id=137 + +// Fast unwinder is only avaliable on x86_64 and i386. +// REQUIRES: x86_64-supported-target + #include <stdlib.h> #include <stdio.h> diff --git a/lib/asan/lit_tests/Linux/preinit_test.cc b/lib/asan/lit_tests/Linux/preinit_test.cc new file mode 100644 index 000000000000..28e509472c0c --- /dev/null +++ b/lib/asan/lit_tests/Linux/preinit_test.cc @@ -0,0 +1,27 @@ +// RUN: %clangxx -DFUNC=zzzz %s -shared -o %t.so -fPIC +// RUN: %clangxx_asan -DFUNC=main %s -o %t -Wl,-R. %t.so +// RUN: %t + +// This test ensures that we call __asan_init early enough. +// We build a shared library w/o asan instrumentation +// and the binary with asan instrumentation. +// Both files include the same header (emulated by -DFUNC here) +// with C++ template magic which runs global initializer at library load time. +// The function get() is instrumented with asan, but called +// before the usual constructors are run. +// So, we must make sure that __asan_init is executed even earlier. +// +// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=56393 + +struct A { + int foo() const { return 0; } +}; +A get () { return A(); } +template <class> struct O { + static A const e; +}; +template <class T> A const O <T>::e = get(); +int FUNC() { + return O<int>::e.foo(); +} + diff --git a/lib/asan/lit_tests/Linux/rlimit_mmap_test.cc b/lib/asan/lit_tests/Linux/rlimit_mmap_test.cc index 5026e24e424d..86794756c76f 100644 --- a/lib/asan/lit_tests/Linux/rlimit_mmap_test.cc +++ b/lib/asan/lit_tests/Linux/rlimit_mmap_test.cc @@ -11,6 +11,6 @@ int main(int argc, char **argv) { struct rlimit mmap_resource_limit = { 0, 0 }; assert(0 == setrlimit(RLIMIT_AS, &mmap_resource_limit)); x = malloc(10000000); -// CHECK: AddressSanitizer is unable to mmap +// CHECK: ERROR: Failed to mmap return 0; } diff --git a/lib/asan/lit_tests/Linux/swapcontext_test.cc b/lib/asan/lit_tests/Linux/swapcontext_test.cc index 0404b4f602bd..47a8d9891f51 100644 --- a/lib/asan/lit_tests/Linux/swapcontext_test.cc +++ b/lib/asan/lit_tests/Linux/swapcontext_test.cc @@ -8,6 +8,9 @@ // RUN: %clangxx_asan -m32 -O1 %s -o %t && %t 2>&1 | FileCheck %s // RUN: %clangxx_asan -m32 -O2 %s -o %t && %t 2>&1 | FileCheck %s // RUN: %clangxx_asan -m32 -O3 %s -o %t && %t 2>&1 | FileCheck %s +// +// This test is too sublte to try on non-x86 arch for now. +// REQUIRES: x86_64-supported-target,i386-supported-target #include <stdio.h> #include <ucontext.h> @@ -16,9 +19,26 @@ ucontext_t orig_context; ucontext_t child_context; +const int kStackSize = 1 << 20; + +__attribute__((noinline)) +void Throw() { + throw 1; +} + +__attribute__((noinline)) +void ThrowAndCatch() { + try { + Throw(); + } catch(int a) { + printf("ThrowAndCatch: %d\n", a); + } +} + void Child(int mode) { char x[32] = {0}; // Stack gets poisoned. printf("Child: %p\n", x); + ThrowAndCatch(); // Simulate __asan_handle_no_return(). // (a) Do nothing, just return to parent function. // (b) Jump into the original function. Stack remains poisoned unless we do // something. @@ -30,9 +50,7 @@ void Child(int mode) { } } -int Run(int arg, int mode) { - const int kStackSize = 1 << 20; - char child_stack[kStackSize + 1]; +int Run(int arg, int mode, char *child_stack) { printf("Child stack: %p\n", child_stack); // Setup child context. getcontext(&child_context); @@ -54,13 +72,23 @@ int Run(int arg, int mode) { } int main(int argc, char **argv) { + char stack[kStackSize + 1]; // CHECK: WARNING: ASan doesn't fully support makecontext/swapcontext int ret = 0; - ret += Run(argc - 1, 0); + ret += Run(argc - 1, 0, stack); printf("Test1 passed\n"); // CHECK: Test1 passed - ret += Run(argc - 1, 1); + ret += Run(argc - 1, 1, stack); printf("Test2 passed\n"); // CHECK: Test2 passed + char *heap = new char[kStackSize + 1]; + ret += Run(argc - 1, 0, heap); + printf("Test3 passed\n"); + // CHECK: Test3 passed + ret += Run(argc - 1, 1, heap); + printf("Test4 passed\n"); + // CHECK: Test4 passed + + delete [] heap; return ret; } diff --git a/lib/asan/lit_tests/Linux/syscalls.cc b/lib/asan/lit_tests/Linux/syscalls.cc new file mode 100644 index 000000000000..b2edcfb92375 --- /dev/null +++ b/lib/asan/lit_tests/Linux/syscalls.cc @@ -0,0 +1,22 @@ +// RUN: %clangxx_asan -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -m64 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s + +#include <assert.h> +#include <errno.h> +#include <glob.h> +#include <stdio.h> +#include <string.h> + +#include <sanitizer/linux_syscall_hooks.h> + +/* Test the presence of __sanitizer_syscall_ in the tool runtime, and general + sanity of their behaviour. */ + +int main(int argc, char *argv[]) { + char buf[1000]; + __sanitizer_syscall_pre_recvmsg(0, buf - 1, 0); + // CHECK: AddressSanitizer: stack-buffer-{{.*}}erflow + // CHECK: READ of size {{.*}} at {{.*}} thread T0 + // CHECK: #0 {{.*}} in __sanitizer_syscall_pre_recvmsg + return 0; +} diff --git a/lib/asan/lit_tests/Linux/time_null_regtest.cc b/lib/asan/lit_tests/Linux/time_null_regtest.cc new file mode 100644 index 000000000000..975bca3d105a --- /dev/null +++ b/lib/asan/lit_tests/Linux/time_null_regtest.cc @@ -0,0 +1,20 @@ +// RUN: %clangxx_asan -m64 -O0 %s -fsanitize-address-zero-base-shadow -pie -o %t && %t 2>&1 | %symbolize | FileCheck %s + +// Zero-base shadow only works on x86_64 and i386. +// REQUIRES: x86_64-supported-target + +// A regression test for time(NULL), which caused ASan to crash in the +// zero-based shadow mode on Linux. +// FIXME: this test does not work on Darwin, because the code pages of the +// executable interleave with the zero-based shadow. + +#include <stdio.h> +#include <stdlib.h> +#include <time.h> + +int main() { + time_t t = time(NULL); + fprintf(stderr, "Time: %s\n", ctime(&t)); // NOLINT + // CHECK: {{Time: .* .* .*}} + return 0; +} diff --git a/lib/asan/lit_tests/Linux/zero-base-shadow.cc b/lib/asan/lit_tests/Linux/zero-base-shadow.cc new file mode 100644 index 000000000000..682e7e8d7e59 --- /dev/null +++ b/lib/asan/lit_tests/Linux/zero-base-shadow.cc @@ -0,0 +1,31 @@ +// RUN: %clangxx_asan -m64 -O0 -fsanitize-address-zero-base-shadow -fPIE -pie %s -o %t && %t 2>&1 | %symbolize > %t.out +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-64 < %t.out +// RUN: %clangxx_asan -m64 -O1 -fsanitize-address-zero-base-shadow -fPIE -pie %s -o %t && %t 2>&1 | %symbolize > %t.out +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-64 < %t.out +// RUN: %clangxx_asan -m64 -O2 -fsanitize-address-zero-base-shadow -fPIE -pie %s -o %t && %t 2>&1 | %symbolize > %t.out +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-64 < %t.out +// RUN: %clangxx_asan -m32 -O0 -fsanitize-address-zero-base-shadow -fPIE -pie %s -o %t && %t 2>&1 | %symbolize > %t.out +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-32 < %t.out +// RUN: %clangxx_asan -m32 -O1 -fsanitize-address-zero-base-shadow -fPIE -pie %s -o %t && %t 2>&1 | %symbolize > %t.out +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-32 < %t.out +// RUN: %clangxx_asan -m32 -O2 -fsanitize-address-zero-base-shadow -fPIE -pie %s -o %t && %t 2>&1 | %symbolize > %t.out +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-32 < %t.out + +// Zero-base shadow only works on x86_64 and i386. +// REQUIRES: x86_64-supported-target,i386-supported-target + +#include <string.h> +int main(int argc, char **argv) { + char x[10]; + memset(x, 0, 10); + int res = x[argc * 10]; // BOOOM + // CHECK: {{READ of size 1 at 0x.* thread T0}} + // CHECK: {{ #0 0x.* in _?main .*zero-base-shadow.cc:}}[[@LINE-2]] + // CHECK: {{Address 0x.* is .* frame}} + // CHECK: main + + // Check that shadow for stack memory occupies lower part of address space. + // CHECK-64: =>0x0f + // CHECK-32: =>0x1 + return res; +} diff --git a/lib/asan/lit_tests/SharedLibs/darwin-dummy-shared-lib-so.cc b/lib/asan/lit_tests/SharedLibs/darwin-dummy-shared-lib-so.cc new file mode 100644 index 000000000000..5d939991476e --- /dev/null +++ b/lib/asan/lit_tests/SharedLibs/darwin-dummy-shared-lib-so.cc @@ -0,0 +1,13 @@ +//===----------- darwin-dummy-shared-lib-so.cc ------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// +void foo() {} diff --git a/lib/asan/lit_tests/SharedLibs/init-order-dlopen-so.cc b/lib/asan/lit_tests/SharedLibs/init-order-dlopen-so.cc new file mode 100644 index 000000000000..20ef2d8a00bb --- /dev/null +++ b/lib/asan/lit_tests/SharedLibs/init-order-dlopen-so.cc @@ -0,0 +1,12 @@ +#include <stdio.h> +#include <unistd.h> + +void inc_global(); + +int slow_init() { + sleep(1); + inc_global(); + return 42; +} + +int slowly_init_glob = slow_init(); diff --git a/lib/asan/lit_tests/Unit/lit.cfg b/lib/asan/lit_tests/Unit/lit.cfg index 243eb7fbeec0..e24361b014e9 100644 --- a/lib/asan/lit_tests/Unit/lit.cfg +++ b/lib/asan/lit_tests/Unit/lit.cfg @@ -11,9 +11,8 @@ def get_required_attr(config, attr_name): return attr_value # Setup attributes common for all compiler-rt projects. -llvm_src_root = get_required_attr(config, 'llvm_src_root') -compiler_rt_lit_unit_cfg = os.path.join(llvm_src_root, "projects", - "compiler-rt", "lib", +compiler_rt_src_root = get_required_attr(config, 'compiler_rt_src_root') +compiler_rt_lit_unit_cfg = os.path.join(compiler_rt_src_root, "lib", "lit.common.unit.cfg") lit.load_config(config, compiler_rt_lit_unit_cfg) diff --git a/lib/asan/lit_tests/Unit/lit.site.cfg.in b/lib/asan/lit_tests/Unit/lit.site.cfg.in index 401c3a8cc2eb..315d24d1ed09 100644 --- a/lib/asan/lit_tests/Unit/lit.site.cfg.in +++ b/lib/asan/lit_tests/Unit/lit.site.cfg.in @@ -3,8 +3,15 @@ config.target_triple = "@TARGET_TRIPLE@" config.llvm_src_root = "@LLVM_SOURCE_DIR@" -config.build_type = "@CMAKE_BUILD_TYPE@" +config.compiler_rt_src_root = "@COMPILER_RT_SOURCE_DIR@" +config.llvm_build_mode = "@LLVM_BUILD_MODE@" config.asan_binary_dir = "@ASAN_BINARY_DIR@" +try: + config.llvm_build_mode = config.llvm_build_mode % lit.params +except KeyError,e: + key, = e.args + lit.fatal("unable to find %r parameter, use '--param=%s=VALUE'" % (key, key)) + # Let the main config do the real work. lit.load_config(config, "@ASAN_SOURCE_DIR@/lit_tests/Unit/lit.cfg") diff --git a/lib/asan/lit_tests/allow_user_segv.cc b/lib/asan/lit_tests/allow_user_segv.cc new file mode 100644 index 000000000000..f8aed0d4ca80 --- /dev/null +++ b/lib/asan/lit_tests/allow_user_segv.cc @@ -0,0 +1,50 @@ +// Regression test for +// https://code.google.com/p/address-sanitizer/issues/detail?id=180 + +// RUN: %clangxx_asan -m64 -O0 %s -o %t && ASAN_OPTIONS=allow_user_segv_handler=true %t 2>&1 | FileCheck %s +// RUN: %clangxx_asan -m64 -O2 %s -o %t && ASAN_OPTIONS=allow_user_segv_handler=true %t 2>&1 | FileCheck %s +// RUN: %clangxx_asan -m32 -O0 %s -o %t && ASAN_OPTIONS=allow_user_segv_handler=true %t 2>&1 | FileCheck %s +// RUN: %clangxx_asan -m32 -O2 %s -o %t && ASAN_OPTIONS=allow_user_segv_handler=true %t 2>&1 | FileCheck %s + +#include <signal.h> +#include <stdio.h> + +struct sigaction user_sigaction; +struct sigaction original_sigaction; + +void User_OnSIGSEGV(int signum, siginfo_t *siginfo, void *context) { + fprintf(stderr, "User sigaction called\n"); + if (original_sigaction.sa_flags | SA_SIGINFO) + original_sigaction.sa_sigaction(signum, siginfo, context); + else + original_sigaction.sa_handler(signum); +} + +int DoSEGV() { + volatile int *x = 0; + return *x; +} + +int main() { + user_sigaction.sa_sigaction = User_OnSIGSEGV; + user_sigaction.sa_flags = SA_SIGINFO; +#if defined(__APPLE__) && !defined(__LP64__) + // On 32-bit Darwin KERN_PROTECTION_FAILURE (SIGBUS) is delivered. + int signum = SIGBUS; +#else + // On 64-bit Darwin KERN_INVALID_ADDRESS (SIGSEGV) is delivered. + // On Linux SIGSEGV is delivered as well. + int signum = SIGSEGV; +#endif + if (sigaction(signum, &user_sigaction, &original_sigaction)) { + perror("sigaction"); + return 1; + } + fprintf(stderr, "User sigaction installed\n"); + return DoSEGV(); +} + +// CHECK: User sigaction installed +// CHECK-NEXT: User sigaction called +// CHECK-NEXT: ASAN:SIGSEGV +// CHECK: AddressSanitizer: SEGV on unknown address diff --git a/lib/asan/lit_tests/default_blacklist.cc b/lib/asan/lit_tests/default_blacklist.cc new file mode 100644 index 000000000000..25a1ae1752b0 --- /dev/null +++ b/lib/asan/lit_tests/default_blacklist.cc @@ -0,0 +1,3 @@ +// Test that ASan uses the default blacklist from resource directory. +// RUN: %clangxx_asan -### %s 2>&1 | FileCheck %s +// CHECK: fsanitize-blacklist={{.*}}asan_blacklist.txt diff --git a/lib/asan/lit_tests/default_options.cc b/lib/asan/lit_tests/default_options.cc index 950a7d879194..84b80557b852 100644 --- a/lib/asan/lit_tests/default_options.cc +++ b/lib/asan/lit_tests/default_options.cc @@ -4,7 +4,7 @@ const char *kAsanDefaultOptions="verbosity=1 foo=bar"; extern "C" -__attribute__((no_address_safety_analysis)) +__attribute__((no_sanitize_address)) const char *__asan_default_options() { // CHECK: Using the defaults from __asan_default_options: {{.*}} foo=bar return kAsanDefaultOptions; diff --git a/lib/asan/lit_tests/dlclose-test.cc b/lib/asan/lit_tests/dlclose-test.cc index 229f508294bf..b15895bf3579 100644 --- a/lib/asan/lit_tests/dlclose-test.cc +++ b/lib/asan/lit_tests/dlclose-test.cc @@ -9,6 +9,11 @@ // are globals. // 6. BOOM +// This sublte test assumes that after a foo.so is dlclose-d +// we can mmap the region of memory that has been occupied by the library. +// It works on i368/x86_64 Linux, but not necessary anywhere else. +// REQUIRES: x86_64-supported-target,i386-supported-target + // RUN: %clangxx_asan -m64 -O0 %p/SharedLibs/dlclose-test-so.cc \ // RUN: -fPIC -shared -o %t-so.so // RUN: %clangxx_asan -m64 -O0 %s -o %t && %t 2>&1 | FileCheck %s @@ -39,17 +44,17 @@ #include <stdio.h> #include <string.h> #include <sys/mman.h> +#include <unistd.h> #include <string> using std::string; -static const int kPageSize = 4096; - typedef int *(fun_t)(); int main(int argc, char *argv[]) { string path = string(argv[0]) + "-so.so"; + size_t PageSize = sysconf(_SC_PAGESIZE); printf("opening %s ... \n", path.c_str()); void *lib = dlopen(path.c_str(), RTLD_NOW); if (!lib) { @@ -73,8 +78,8 @@ int main(int argc, char *argv[]) { return 1; } // Now, the page where 'addr' is unmapped. Map it. - size_t page_beg = ((size_t)addr) & ~(kPageSize - 1); - void *res = mmap((void*)(page_beg), kPageSize, + size_t page_beg = ((size_t)addr) & ~(PageSize - 1); + void *res = mmap((void*)(page_beg), PageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, 0, 0); if (res == (char*)-1L) { diff --git a/lib/asan/lit_tests/double-free.cc b/lib/asan/lit_tests/double-free.cc new file mode 100644 index 000000000000..9e201117c563 --- /dev/null +++ b/lib/asan/lit_tests/double-free.cc @@ -0,0 +1,18 @@ +// RUN: %clangxx_asan -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s + +#include <stdlib.h> +#include <string.h> +int main(int argc, char **argv) { + char *x = (char*)malloc(10 * sizeof(char)); + memset(x, 0, 10); + int res = x[argc]; + free(x); + free(x + argc - 1); // BOOM + // CHECK: AddressSanitizer: attempting double-free{{.*}}in thread T0 + // CHECK: double-free.cc:[[@LINE-2]] + // CHECK: freed by thread T0 here: + // CHECK: double-free.cc:[[@LINE-5]] + // CHECK: allocated by thread T0 here: + // CHECK: double-free.cc:[[@LINE-10]] + return res; +} diff --git a/lib/asan/lit_tests/global-demangle.cc b/lib/asan/lit_tests/global-demangle.cc new file mode 100644 index 000000000000..5696a38a7705 --- /dev/null +++ b/lib/asan/lit_tests/global-demangle.cc @@ -0,0 +1,18 @@ +// Don't run through %symbolize to avoid c++filt demangling. +// RUN: %clangxx_asan -m64 -O0 %s -o %t && %t 2>&1 | FileCheck %s + +namespace XXX { +class YYY { + public: + static char ZZZ[]; +}; +char YYY::ZZZ[] = "abc"; +} + +int main(int argc, char **argv) { + return (int)XXX::YYY::ZZZ[argc + 5]; // BOOM + // CHECK: {{READ of size 1 at 0x.*}} + // CHECK: {{0x.* is located 2 bytes to the right of global variable}} + // CHECK: 'XXX::YYY::ZZZ' {{.*}} of size 4 + // CHECK: 'XXX::YYY::ZZZ' is ascii string 'abc' +} diff --git a/lib/asan/lit_tests/heap-overflow.cc b/lib/asan/lit_tests/heap-overflow.cc index 2648ec7e5f1f..f1d719cd0b20 100644 --- a/lib/asan/lit_tests/heap-overflow.cc +++ b/lib/asan/lit_tests/heap-overflow.cc @@ -29,10 +29,8 @@ int main(int argc, char **argv) { // CHECK-Linux: {{ #0 0x.* in .*malloc}} // CHECK-Linux: {{ #1 0x.* in main .*heap-overflow.cc:21}} - // CHECK-Darwin: {{ #0 0x.* in .*mz_malloc.*}} - // CHECK-Darwin: {{ #1 0x.* in malloc_zone_malloc.*}} - // CHECK-Darwin: {{ #2 0x.* in malloc.*}} - // CHECK-Darwin: {{ #3 0x.* in _?main .*heap-overflow.cc:21}} + // CHECK-Darwin: {{ #0 0x.* in _?wrap_malloc.*}} + // CHECK-Darwin: {{ #1 0x.* in _?main .*heap-overflow.cc:21}} free(x); return res; } diff --git a/lib/asan/lit_tests/huge_negative_hea_oob.cc b/lib/asan/lit_tests/huge_negative_hea_oob.cc new file mode 100644 index 000000000000..a09e3bf87d60 --- /dev/null +++ b/lib/asan/lit_tests/huge_negative_hea_oob.cc @@ -0,0 +1,13 @@ +// RUN: %clangxx_asan -m64 %s -o %t && %t 2>&1 | FileCheck %s +// RUN: %clangxx_asan -m64 -O %s -o %t && %t 2>&1 | FileCheck %s +// Check that we can find huge buffer overflows to the left. +#include <stdlib.h> +#include <string.h> +int main(int argc, char **argv) { + char *x = (char*)malloc(1 << 20); + memset(x, 0, 10); + int res = x[-argc * 4000]; // BOOOM + // CHECK: is located 4000 bytes to the left of + free(x); + return res; +} diff --git a/lib/asan/lit_tests/init-order-atexit.cc b/lib/asan/lit_tests/init-order-atexit.cc new file mode 100644 index 000000000000..45f4f17c0cb0 --- /dev/null +++ b/lib/asan/lit_tests/init-order-atexit.cc @@ -0,0 +1,31 @@ +// Test for the following situation: +// (1) global A is constructed. +// (2) exit() is called during construction of global B. +// (3) destructor of A reads uninitialized global C from another module. +// We do *not* want to report init-order bug in this case. + +// RUN: %clangxx_asan -m64 -O0 %s %p/Helpers/init-order-atexit-extra.cc -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true:strict_init_order=true %t 2>&1 | FileCheck %s + +#include <stdio.h> +#include <stdlib.h> + +void AccessC(); + +class A { + public: + A() { } + ~A() { AccessC(); printf("PASSED\n"); } + // CHECK-NOT: AddressSanitizer + // CHECK: PASSED +}; + +A a; + +class B { + public: + B() { exit(1); } + ~B() { } +}; + +B b; diff --git a/lib/asan/lit_tests/init-order-dlopen.cc b/lib/asan/lit_tests/init-order-dlopen.cc new file mode 100644 index 000000000000..228f44204c99 --- /dev/null +++ b/lib/asan/lit_tests/init-order-dlopen.cc @@ -0,0 +1,52 @@ +// Regression test for +// https://code.google.com/p/address-sanitizer/issues/detail?id=178 + +// RUN: %clangxx_asan -m64 -O0 %p/SharedLibs/init-order-dlopen-so.cc \ +// RUN: -fPIC -shared -o %t-so.so +// If the linker doesn't support --export-dynamic (which is ELF-specific), +// try to link without that option. +// FIXME: find a better solution. +// RUN: %clangxx_asan -m64 -O0 %s -o %t -Wl,--export-dynamic || \ +// RUN: %clangxx_asan -m64 -O0 %s -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true:strict_init_order=true %t 2>&1 | FileCheck %s +#include <dlfcn.h> +#include <pthread.h> +#include <stdio.h> +#include <unistd.h> + +#include <string> + +using std::string; + +int foo() { + return 42; +} +int global = foo(); + +__attribute__((visibility("default"))) +void inc_global() { + global++; +} + +void *global_poller(void *arg) { + while (true) { + if (global != 42) + break; + usleep(100); + } + return 0; +} + +int main(int argc, char *argv[]) { + pthread_t p; + pthread_create(&p, 0, global_poller, 0); + string path = string(argv[0]) + "-so.so"; + if (0 == dlopen(path.c_str(), RTLD_NOW)) { + fprintf(stderr, "dlerror: %s\n", dlerror()); + return 1; + } + pthread_join(p, 0); + printf("PASSED\n"); + // CHECK: PASSED + return 0; +} diff --git a/lib/asan/lit_tests/initialization-blacklist.cc b/lib/asan/lit_tests/initialization-blacklist.cc index f8df24c68ea6..12fbc49ed91b 100644 --- a/lib/asan/lit_tests/initialization-blacklist.cc +++ b/lib/asan/lit_tests/initialization-blacklist.cc @@ -1,23 +1,35 @@ // Test for blacklist functionality of initialization-order checker. // RUN: %clangxx_asan -m64 -O0 %s %p/Helpers/initialization-blacklist-extra.cc\ +// RUN: %p/Helpers/initialization-blacklist-extra2.cc \ // RUN: -fsanitize-blacklist=%p/Helpers/initialization-blacklist.txt \ -// RUN: -fsanitize=init-order -o %t && %t 2>&1 +// RUN: -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 // RUN: %clangxx_asan -m64 -O1 %s %p/Helpers/initialization-blacklist-extra.cc\ +// RUN: %p/Helpers/initialization-blacklist-extra2.cc \ // RUN: -fsanitize-blacklist=%p/Helpers/initialization-blacklist.txt \ -// RUN: -fsanitize=init-order -o %t && %t 2>&1 +// RUN: -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 // RUN: %clangxx_asan -m64 -O2 %s %p/Helpers/initialization-blacklist-extra.cc\ +// RUN: %p/Helpers/initialization-blacklist-extra2.cc \ // RUN: -fsanitize-blacklist=%p/Helpers/initialization-blacklist.txt \ -// RUN: -fsanitize=init-order -o %t && %t 2>&1 +// RUN: -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 // RUN: %clangxx_asan -m32 -O0 %s %p/Helpers/initialization-blacklist-extra.cc\ +// RUN: %p/Helpers/initialization-blacklist-extra2.cc \ // RUN: -fsanitize-blacklist=%p/Helpers/initialization-blacklist.txt \ -// RUN: -fsanitize=init-order -o %t && %t 2>&1 +// RUN: -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 // RUN: %clangxx_asan -m32 -O1 %s %p/Helpers/initialization-blacklist-extra.cc\ +// RUN: %p/Helpers/initialization-blacklist-extra2.cc \ // RUN: -fsanitize-blacklist=%p/Helpers/initialization-blacklist.txt \ -// RUN: -fsanitize=init-order -o %t && %t 2>&1 +// RUN: -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 // RUN: %clangxx_asan -m32 -O2 %s %p/Helpers/initialization-blacklist-extra.cc\ +// RUN: %p/Helpers/initialization-blacklist-extra2.cc \ // RUN: -fsanitize-blacklist=%p/Helpers/initialization-blacklist.txt \ -// RUN: -fsanitize=init-order -o %t && %t 2>&1 +// RUN: -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 // Function is defined in another TU. int readBadGlobal(); @@ -27,6 +39,9 @@ int x = readBadGlobal(); // init-order bug. int accessBadObject(); int y = accessBadObject(); // init-order bug. +int readBadSrcGlobal(); +int z = readBadSrcGlobal(); // init-order bug. + int main(int argc, char **argv) { - return argc + x + y - 1; + return argc + x + y + z - 1; } diff --git a/lib/asan/lit_tests/initialization-bug.cc b/lib/asan/lit_tests/initialization-bug.cc index 8f4e33ef5a35..ee2c725f0b13 100644 --- a/lib/asan/lit_tests/initialization-bug.cc +++ b/lib/asan/lit_tests/initialization-bug.cc @@ -1,14 +1,17 @@ // Test to make sure basic initialization order errors are caught. -// RUN: %clangxx_asan -m64 -O0 %s %p/Helpers/initialization-bug-extra2.cc\ -// RUN: -fsanitize=init-order -o %t && %t 2>&1 \ +// RUN: %clangxx_asan -m64 -O0 %s %p/Helpers/initialization-bug-extra2.cc -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 \ // RUN: | %symbolize | FileCheck %s -// RUN: %clangxx_asan -m32 -O0 %s %p/Helpers/initialization-bug-extra2.cc\ -// RUN: -fsanitize=init-order -o %t && %t 2>&1 \ +// RUN: %clangxx_asan -m32 -O0 %s %p/Helpers/initialization-bug-extra2.cc -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 \ // RUN: | %symbolize | FileCheck %s // Do not test with optimization -- the error may be optimized away. +// FIXME: https://code.google.com/p/address-sanitizer/issues/detail?id=186 +// XFAIL: darwin + #include <cstdio> // The structure of the test is: diff --git a/lib/asan/lit_tests/initialization-constexpr.cc b/lib/asan/lit_tests/initialization-constexpr.cc new file mode 100644 index 000000000000..ba5410674f76 --- /dev/null +++ b/lib/asan/lit_tests/initialization-constexpr.cc @@ -0,0 +1,43 @@ +// Constexpr: +// We need to check that a global variable initialized with a constexpr +// constructor can be accessed during dynamic initialization (as a constexpr +// constructor implies that it was initialized during constant initialization, +// not dynamic initialization). + +// RUN: %clangxx_asan -m64 -O0 %s %p/Helpers/initialization-constexpr-extra.cc\ +// RUN: --std=c++11 -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 +// RUN: %clangxx_asan -m64 -O1 %s %p/Helpers/initialization-constexpr-extra.cc\ +// RUN: --std=c++11 -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 +// RUN: %clangxx_asan -m64 -O2 %s %p/Helpers/initialization-constexpr-extra.cc\ +// RUN: --std=c++11 -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 +// RUN: %clangxx_asan -m64 -O3 %s %p/Helpers/initialization-constexpr-extra.cc\ +// RUN: --std=c++11 -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 +// RUN: %clangxx_asan -m32 -O0 %s %p/Helpers/initialization-constexpr-extra.cc\ +// RUN: --std=c++11 -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 +// RUN: %clangxx_asan -m32 -O1 %s %p/Helpers/initialization-constexpr-extra.cc\ +// RUN: --std=c++11 -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 +// RUN: %clangxx_asan -m32 -O2 %s %p/Helpers/initialization-constexpr-extra.cc\ +// RUN: --std=c++11 -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 +// RUN: %clangxx_asan -m32 -O3 %s %p/Helpers/initialization-constexpr-extra.cc\ +// RUN: --std=c++11 -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 + +class Integer { + private: + int value; + + public: + constexpr Integer(int x = 0) : value(x) {} + int getValue() {return value;} +}; +Integer coolestInteger(42); +int getCoolestInteger() { return coolestInteger.getValue(); } + +int main() { return 0; } diff --git a/lib/asan/lit_tests/initialization-nobug.cc b/lib/asan/lit_tests/initialization-nobug.cc index 1b8961606811..407226e29a1b 100644 --- a/lib/asan/lit_tests/initialization-nobug.cc +++ b/lib/asan/lit_tests/initialization-nobug.cc @@ -1,24 +1,22 @@ // A collection of various initializers which shouldn't trip up initialization // order checking. If successful, this will just return 0. -// RUN: %clangxx_asan -m64 -O0 %s %p/Helpers/initialization-nobug-extra.cc\ -// RUN: --std=c++11 -fsanitize=init-order -o %t && %t 2>&1 -// RUN: %clangxx_asan -m64 -O1 %s %p/Helpers/initialization-nobug-extra.cc\ -// RUN: --std=c++11 -fsanitize=init-order -o %t && %t 2>&1 -// RUN: %clangxx_asan -m64 -O2 %s %p/Helpers/initialization-nobug-extra.cc\ -// RUN: --std=c++11 -fsanitize=init-order -o %t && %t 2>&1 -// RUN: %clangxx_asan -m64 -O3 %s %p/Helpers/initialization-nobug-extra.cc\ -// RUN: --std=c++11 -fsanitize=init-order -o %t && %t 2>&1 -// RUN: %clangxx_asan -m32 -O0 %s %p/Helpers/initialization-nobug-extra.cc\ -// RUN: --std=c++11 -fsanitize=init-order -o %t && %t 2>&1 -// RUN: %clangxx_asan -m32 -O0 %s %p/Helpers/initialization-nobug-extra.cc\ -// RUN: --std=c++11 -fsanitize=init-order -o %t && %t 2>&1 -// RUN: %clangxx_asan -m32 -O1 %s %p/Helpers/initialization-nobug-extra.cc\ -// RUN: --std=c++11 -fsanitize=init-order -o %t && %t 2>&1 -// RUN: %clangxx_asan -m32 -O2 %s %p/Helpers/initialization-nobug-extra.cc\ -// RUN: --std=c++11 -fsanitize=init-order -o %t && %t 2>&1 -// RUN: %clangxx_asan -m32 -O3 %s %p/Helpers/initialization-nobug-extra.cc\ -// RUN: --std=c++11 -fsanitize=init-order -o %t && %t 2>&1 +// RUN: %clangxx_asan -m64 -O0 %s %p/Helpers/initialization-nobug-extra.cc -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 +// RUN: %clangxx_asan -m64 -O1 %s %p/Helpers/initialization-nobug-extra.cc -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 +// RUN: %clangxx_asan -m64 -O2 %s %p/Helpers/initialization-nobug-extra.cc -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 +// RUN: %clangxx_asan -m64 -O3 %s %p/Helpers/initialization-nobug-extra.cc -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 +// RUN: %clangxx_asan -m32 -O0 %s %p/Helpers/initialization-nobug-extra.cc -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 +// RUN: %clangxx_asan -m32 -O1 %s %p/Helpers/initialization-nobug-extra.cc -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 +// RUN: %clangxx_asan -m32 -O2 %s %p/Helpers/initialization-nobug-extra.cc -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 +// RUN: %clangxx_asan -m32 -O3 %s %p/Helpers/initialization-nobug-extra.cc -fsanitize=init-order -o %t +// RUN: ASAN_OPTIONS=check_initialization_order=true %t 2>&1 // Simple access: // Make sure that accessing a global in the same TU is safe @@ -47,21 +45,12 @@ int countCalls() { return ++calls; } -// Constexpr: -// We need to check that a global variable initialized with a constexpr -// constructor can be accessed during dynamic initialization (as a constexpr -// constructor implies that it was initialized during constant initialization, -// not dynamic initialization). - -class Integer { - private: +// Trivial constructor, non-trivial destructor. +struct StructWithDtor { + ~StructWithDtor() { } int value; - - public: - constexpr Integer(int x = 0) : value(x) {} - int getValue() {return value;} }; -Integer coolestInteger(42); -int getCoolestInteger() { return coolestInteger.getValue(); } +StructWithDtor struct_with_dtor; +int getStructWithDtorValue() { return struct_with_dtor.value; } int main() { return 0; } diff --git a/lib/asan/lit_tests/interface_test.cc b/lib/asan/lit_tests/interface_test.cc new file mode 100644 index 000000000000..428a109fe70d --- /dev/null +++ b/lib/asan/lit_tests/interface_test.cc @@ -0,0 +1,8 @@ +// Check that user may include ASan interface header. +// RUN: %clang -fsanitize=address -I %p/../../../include %s -o %t && %t +// RUN: %clang -I %p/../../../include %s -o %t && %t +#include <sanitizer/asan_interface.h> + +int main() { + return 0; +} diff --git a/lib/asan/lit_tests/invalid-free.cc b/lib/asan/lit_tests/invalid-free.cc new file mode 100644 index 000000000000..0ef064056b63 --- /dev/null +++ b/lib/asan/lit_tests/invalid-free.cc @@ -0,0 +1,16 @@ +// RUN: %clangxx_asan -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s + +#include <stdlib.h> +#include <string.h> +int main(int argc, char **argv) { + char *x = (char*)malloc(10 * sizeof(char)); + memset(x, 0, 10); + int res = x[argc]; + free(x + 5); // BOOM + // CHECK: AddressSanitizer: attempting free on address{{.*}}in thread T0 + // CHECK: invalid-free.cc:[[@LINE-2]] + // CHECK: is located 5 bytes inside of 10-byte region + // CHECK: allocated by thread T0 here: + // CHECK: invalid-free.cc:[[@LINE-8]] + return res; +} diff --git a/lib/asan/lit_tests/large_func_test.cc b/lib/asan/lit_tests/large_func_test.cc index a74828811f74..ceecc29b7b0a 100644 --- a/lib/asan/lit_tests/large_func_test.cc +++ b/lib/asan/lit_tests/large_func_test.cc @@ -32,7 +32,7 @@ static void LargeFunction(int *x, int zero) { // CHECK: {{.*ERROR: AddressSanitizer: heap-buffer-overflow on address}} // CHECK: {{0x.* at pc 0x.* bp 0x.* sp 0x.*}} // CHECK: {{READ of size 4 at 0x.* thread T0}} - x[zero + 111]++; // we should report this exact line + x[zero + 103]++; // we should report this exact line // atos incorrectly extracts the symbol name for the static functions on // Darwin. // CHECK-Linux: {{#0 0x.* in LargeFunction.*large_func_test.cc:}}[[@LINE-3]] @@ -54,9 +54,10 @@ int main(int argc, char **argv) { int *x = new int[100]; LargeFunction(x, argc - 1); // CHECK: {{ #1 0x.* in _?main .*large_func_test.cc:}}[[@LINE-1]] - // CHECK: {{0x.* is located 44 bytes to the right of 400-byte region}} + // CHECK: {{0x.* is located 12 bytes to the right of 400-byte region}} // CHECK: {{allocated by thread T0 here:}} - // CHECK: {{ #0 0x.* in operator new.*}} - // CHECK: {{ #1 0x.* in _?main .*large_func_test.cc:}}[[@LINE-6]] + // CHECK-Linux: {{ #0 0x.* in operator new.*}} + // CHECK-Darwin: {{ #0 0x.* in .*_Zna.*}} + // CHECK: {{ #1 0x.* in _?main .*large_func_test.cc:}}[[@LINE-7]] delete x; } diff --git a/lib/asan/lit_tests/lit.cfg b/lib/asan/lit_tests/lit.cfg index 7875281b1f2f..5daecd9e557d 100644 --- a/lib/asan/lit_tests/lit.cfg +++ b/lib/asan/lit_tests/lit.cfg @@ -2,6 +2,14 @@ import os +def get_required_attr(config, attr_name): + attr_value = getattr(config, attr_name, None) + if not attr_value: + lit.fatal("No attribute %r in test configuration! You may need to run " + "tests from your build directory or add this attribute " + "to lit.site.cfg " % attr_name) + return attr_value + # Setup config name. config.name = 'AddressSanitizer' @@ -30,14 +38,6 @@ if llvm_src_root is None: if not llvm_config: DisplayNoConfigMessage() - # Validate that llvm-config points to the same source tree. - llvm_src_root = lit.util.capture(["llvm-config", "--src-root"]).strip() - asan_test_src_root = os.path.join(llvm_src_root, "projects", "compiler-rt", - "lib", "asan", "lit_tests") - if (os.path.realpath(asan_test_src_root) != - os.path.realpath(config.test_source_root)): - DisplayNoConfigMessage() - # Find out the presumed location of generated site config. llvm_obj_root = lit.util.capture(["llvm-config", "--obj-root"]).strip() asan_site_cfg = os.path.join(llvm_obj_root, "projects", "compiler-rt", @@ -49,8 +49,9 @@ if llvm_src_root is None: raise SystemExit # Setup attributes common for all compiler-rt projects. -compiler_rt_lit_cfg = os.path.join(llvm_src_root, "projects", "compiler-rt", - "lib", "lit.common.cfg") +compiler_rt_src_root = get_required_attr(config, "compiler_rt_src_root") +compiler_rt_lit_cfg = os.path.join(compiler_rt_src_root, "lib", + "lit.common.cfg") if (not compiler_rt_lit_cfg) or (not os.path.exists(compiler_rt_lit_cfg)): lit.fatal("Can't find common compiler-rt lit config at: %r" % compiler_rt_lit_cfg) diff --git a/lib/asan/lit_tests/lit.site.cfg.in b/lib/asan/lit_tests/lit.site.cfg.in index cf439309c6ad..08546cdabe02 100644 --- a/lib/asan/lit_tests/lit.site.cfg.in +++ b/lib/asan/lit_tests/lit.site.cfg.in @@ -5,8 +5,10 @@ config.target_triple = "@TARGET_TRIPLE@" config.host_os = "@HOST_OS@" config.llvm_src_root = "@LLVM_SOURCE_DIR@" config.llvm_obj_root = "@LLVM_BINARY_DIR@" +config.compiler_rt_src_root = "@COMPILER_RT_SOURCE_DIR@" config.llvm_tools_dir = "@LLVM_TOOLS_DIR@" config.clang = "@LLVM_BINARY_DIR@/bin/clang" +config.compiler_rt_arch = "@COMPILER_RT_SUPPORTED_ARCH@" # LLVM tools dir can be passed in lit parameters, so try to # apply substitution. diff --git a/lib/asan/lit_tests/log_path_fork_test.cc b/lib/asan/lit_tests/log_path_fork_test.cc.disabled index c6c1b49e994d..c6c1b49e994d 100644 --- a/lib/asan/lit_tests/log_path_fork_test.cc +++ b/lib/asan/lit_tests/log_path_fork_test.cc.disabled diff --git a/lib/asan/lit_tests/malloc_fill.cc b/lib/asan/lit_tests/malloc_fill.cc new file mode 100644 index 000000000000..c23516b33299 --- /dev/null +++ b/lib/asan/lit_tests/malloc_fill.cc @@ -0,0 +1,22 @@ +// Check that we fill malloc-ed memory correctly. +// RUN: %clangxx_asan -m64 %s -o %t +// RUN: %t | FileCheck %s +// RUN: ASAN_OPTIONS=max_malloc_fill_size=10:malloc_fill_byte=8 %t | FileCheck %s --check-prefix=CHECK-10-8 +// RUN: ASAN_OPTIONS=max_malloc_fill_size=20:malloc_fill_byte=171 %t | FileCheck %s --check-prefix=CHECK-20-ab + +#include <stdio.h> +int main(int argc, char **argv) { + // With asan allocator this makes sure we get memory from mmap. + static const int kSize = 1 << 25; + unsigned char *x = new unsigned char[kSize]; + printf("-"); + for (int i = 0; i <= 32; i++) { + printf("%02x", x[i]); + } + printf("-\n"); + delete [] x; +} + +// CHECK: -bebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebe- +// CHECK-10-8: -080808080808080808080000000000000000000000000000000000000000000000- +// CHECK-20-ab: -abababababababababababababababababababab00000000000000000000000000- diff --git a/lib/asan/lit_tests/memcmp_strict_test.cc b/lib/asan/lit_tests/memcmp_strict_test.cc new file mode 100644 index 000000000000..00bf921c744a --- /dev/null +++ b/lib/asan/lit_tests/memcmp_strict_test.cc @@ -0,0 +1,16 @@ +// RUN: %clangxx_asan -m64 -O0 %s -o %t && ASAN_OPTIONS=strict_memcmp=0 %t 2>&1 | %symbolize | FileCheck %s --check-prefix=CHECK-nonstrict +// RUN: %clangxx_asan -m64 -O0 %s -o %t && ASAN_OPTIONS=strict_memcmp=1 %t 2>&1 | %symbolize | FileCheck %s --check-prefix=CHECK-strict +// Default to strict_memcmp=1. +// RUN: %clangxx_asan -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s --check-prefix=CHECK-strict + +#include <stdio.h> +#include <string.h> +int main() { + char kFoo[] = "foo"; + char kFubar[] = "fubar"; + int res = memcmp(kFoo, kFubar, strlen(kFubar)); + printf("res: %d\n", res); + // CHECK-nonstrict: {{res: -1}} + // CHECK-strict: AddressSanitizer: stack-buffer-overflow + return 0; +} diff --git a/lib/asan/lit_tests/partial_right.cc b/lib/asan/lit_tests/partial_right.cc new file mode 100644 index 000000000000..c579262726f9 --- /dev/null +++ b/lib/asan/lit_tests/partial_right.cc @@ -0,0 +1,17 @@ +// RUN: %clangxx_asan -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -m64 -O1 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -m64 -O2 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -m64 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -m32 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -m32 -O1 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -m32 -O2 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -m32 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s + +#include <stdlib.h> +int main(int argc, char **argv) { + volatile int *x = (int*)malloc(2*sizeof(int) + 2); + int res = x[2]; // BOOOM + // CHECK: {{READ of size 4 at 0x.* thread T0}} + // CHECK: [[ADDR:0x[01-9a-fa-f]+]] is located 0 bytes to the right of {{.*}}-byte region [{{.*}},{{.*}}[[ADDR]]) + return res; +} diff --git a/lib/asan/lit_tests/stack-frame-demangle.cc b/lib/asan/lit_tests/stack-frame-demangle.cc index 7f4d59fc5838..bb8de16b2b8a 100644 --- a/lib/asan/lit_tests/stack-frame-demangle.cc +++ b/lib/asan/lit_tests/stack-frame-demangle.cc @@ -1,7 +1,4 @@ -// Check that ASan is able to print demangled frame name even w/o -// symbolization. - -// RUN: %clangxx_asan -m64 -O0 %s -o %t && %t 2>&1 | FileCheck %s +// RUN: %clangxx_asan -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s #include <string.h> @@ -11,12 +8,13 @@ struct YYY { char array[10]; memset(array, 0, 10); return array[x]; // BOOOM - // CHECK: {{ERROR: AddressSanitizer: stack-buffer-overflow}} - // CHECK: {{READ of size 1 at 0x.* thread T0}} - // CHECK: {{Address 0x.* is .* frame <XXX::YYY::ZZZ(.*)>}} + // CHECK: ERROR: AddressSanitizer: stack-buffer-overflow + // CHECK: READ of size 1 at + // CHECK: is located in stack of thread T0 at offset + // CHECK: XXX::YYY::ZZZ } }; -}; +} // namespace XXX int main(int argc, char **argv) { int res = XXX::YYY::ZZZ(argc + 10); diff --git a/lib/asan/lit_tests/stack-oob-frames.cc b/lib/asan/lit_tests/stack-oob-frames.cc new file mode 100644 index 000000000000..0395522252e8 --- /dev/null +++ b/lib/asan/lit_tests/stack-oob-frames.cc @@ -0,0 +1,59 @@ +// RUN: %clangxx_asan -m64 -O1 %s -o %t +// RUN: %t 0 2>&1 | %symbolize | FileCheck %s --check-prefix=CHECK0 +// RUN: %t 1 2>&1 | %symbolize | FileCheck %s --check-prefix=CHECK1 +// RUN: %t 2 2>&1 | %symbolize | FileCheck %s --check-prefix=CHECK2 +// RUN: %t 3 2>&1 | %symbolize | FileCheck %s --check-prefix=CHECK3 + +#define NOINLINE __attribute__((noinline)) +inline void break_optimization(void *arg) { + __asm__ __volatile__("" : : "r" (arg) : "memory"); +} + +NOINLINE static void Frame0(int frame, char *a, char *b, char *c) { + char s[4] = {0}; + char *d = s; + break_optimization(&d); + switch (frame) { + case 3: a[5]++; break; + case 2: b[5]++; break; + case 1: c[5]++; break; + case 0: d[5]++; break; + } +} +NOINLINE static void Frame1(int frame, char *a, char *b) { + char c[4] = {0}; Frame0(frame, a, b, c); + break_optimization(0); +} +NOINLINE static void Frame2(int frame, char *a) { + char b[4] = {0}; Frame1(frame, a, b); + break_optimization(0); +} +NOINLINE static void Frame3(int frame) { + char a[4] = {0}; Frame2(frame, a); + break_optimization(0); +} + +int main(int argc, char **argv) { + if (argc != 2) return 1; + Frame3(argv[1][0] - '0'); +} + +// CHECK0: AddressSanitizer: stack-buffer-overflow +// CHECK0: #0{{.*}}Frame0 +// CHECK0: #1{{.*}}Frame1 +// CHECK0: #2{{.*}}Frame2 +// CHECK0: #3{{.*}}Frame3 +// CHECK0: is located in stack of thread T0 at offset +// CHECK0-NEXT: #0{{.*}}Frame0 +// +// CHECK1: AddressSanitizer: stack-buffer-overflow +// CHECK1: is located in stack of thread T0 at offset +// CHECK1-NEXT: #0{{.*}}Frame1 +// +// CHECK2: AddressSanitizer: stack-buffer-overflow +// CHECK2: is located in stack of thread T0 at offset +// CHECK2-NEXT: #0{{.*}}Frame2 +// +// CHECK3: AddressSanitizer: stack-buffer-overflow +// CHECK3: is located in stack of thread T0 at offset +// CHECK3-NEXT: #0{{.*}}Frame3 diff --git a/lib/asan/lit_tests/stack-overflow.cc b/lib/asan/lit_tests/stack-overflow.cc index 3deb1e91de6c..25ea43af48a4 100644 --- a/lib/asan/lit_tests/stack-overflow.cc +++ b/lib/asan/lit_tests/stack-overflow.cc @@ -14,6 +14,7 @@ int main(int argc, char **argv) { int res = x[argc * 10]; // BOOOM // CHECK: {{READ of size 1 at 0x.* thread T0}} // CHECK: {{ #0 0x.* in _?main .*stack-overflow.cc:}}[[@LINE-2]] - // CHECK: {{Address 0x.* is .* frame <main>}} + // CHECK: {{Address 0x.* is located in stack of thread T0 at offset}} + // CHECK-NEXT: in{{.*}}main{{.*}}stack-overflow.cc return res; } diff --git a/lib/asan/lit_tests/strncpy-overflow.cc b/lib/asan/lit_tests/strncpy-overflow.cc index 18711843c4c8..5133b5c1653e 100644 --- a/lib/asan/lit_tests/strncpy-overflow.cc +++ b/lib/asan/lit_tests/strncpy-overflow.cc @@ -22,7 +22,7 @@ int main(int argc, char **argv) { strcpy(hello, "hello"); char *short_buffer = (char*)malloc(9); strncpy(short_buffer, hello, 10); // BOOM - // CHECK: {{WRITE of size 1 at 0x.* thread T0}} + // CHECK: {{WRITE of size 10 at 0x.* thread T0}} // CHECK-Linux: {{ #0 0x.* in .*strncpy}} // CHECK-Darwin: {{ #0 0x.* in _?wrap_strncpy}} // CHECK: {{ #1 0x.* in _?main .*strncpy-overflow.cc:}}[[@LINE-4]] @@ -32,9 +32,7 @@ int main(int argc, char **argv) { // CHECK-Linux: {{ #0 0x.* in .*malloc}} // CHECK-Linux: {{ #1 0x.* in main .*strncpy-overflow.cc:}}[[@LINE-10]] - // CHECK-Darwin: {{ #0 0x.* in .*mz_malloc.*}} - // CHECK-Darwin: {{ #1 0x.* in malloc_zone_malloc.*}} - // CHECK-Darwin: {{ #2 0x.* in malloc.*}} - // CHECK-Darwin: {{ #3 0x.* in _?main .*strncpy-overflow.cc:}}[[@LINE-15]] + // CHECK-Darwin: {{ #0 0x.* in _?wrap_malloc.*}} + // CHECK-Darwin: {{ #1 0x.* in _?main .*strncpy-overflow.cc:}}[[@LINE-13]] return short_buffer[8]; } diff --git a/lib/asan/lit_tests/throw_call_test.cc b/lib/asan/lit_tests/throw_call_test.cc new file mode 100644 index 000000000000..974bc51d97c5 --- /dev/null +++ b/lib/asan/lit_tests/throw_call_test.cc @@ -0,0 +1,45 @@ +// RUN: %clangxx_asan %s -o %t && %t +// http://code.google.com/p/address-sanitizer/issues/detail?id=147 (not fixed). +// BROKEN: %clangxx_asan %s -o %t -static-libstdc++ && %t +#include <stdio.h> +static volatile int zero = 0; +inline void pretend_to_do_something(void *x) { + __asm__ __volatile__("" : : "r" (x) : "memory"); +} + +__attribute__((noinline, no_sanitize_address)) +void ReallyThrow() { + fprintf(stderr, "ReallyThrow\n"); + if (zero == 0) + throw 42; +} + +__attribute__((noinline)) +void Throw() { + int a, b, c, d, e; + pretend_to_do_something(&a); + pretend_to_do_something(&b); + pretend_to_do_something(&c); + pretend_to_do_something(&d); + pretend_to_do_something(&e); + fprintf(stderr, "Throw stack = %p\n", &a); + ReallyThrow(); +} + +__attribute__((noinline)) +void CheckStack() { + int ar[100]; + pretend_to_do_something(ar); + for (int i = 0; i < 100; i++) + ar[i] = i; + fprintf(stderr, "CheckStack stack = %p, %p\n", ar, ar + 100); +} + +int main(int argc, char** argv) { + try { + Throw(); + } catch(int a) { + fprintf(stderr, "a = %d\n", a); + } + CheckStack(); +} diff --git a/lib/asan/lit_tests/throw_invoke_test.cc b/lib/asan/lit_tests/throw_invoke_test.cc new file mode 100644 index 000000000000..077a940e8d19 --- /dev/null +++ b/lib/asan/lit_tests/throw_invoke_test.cc @@ -0,0 +1,50 @@ +// RUN: %clangxx_asan %s -o %t && %t +// RUN: %clangxx_asan %s -o %t -static-libstdc++ && %t +#include <stdio.h> +static volatile int zero = 0; +inline void pretend_to_do_something(void *x) { + __asm__ __volatile__("" : : "r" (x) : "memory"); +} + +__attribute__((noinline)) +void ReallyThrow() { + fprintf(stderr, "ReallyThrow\n"); + try { + if (zero == 0) + throw 42; + else if (zero == 1) + throw 1.; + } catch(double x) { + } +} + +__attribute__((noinline)) +void Throw() { + int a, b, c, d, e; + pretend_to_do_something(&a); + pretend_to_do_something(&b); + pretend_to_do_something(&c); + pretend_to_do_something(&d); + pretend_to_do_something(&e); + fprintf(stderr, "Throw stack = %p\n", &a); + ReallyThrow(); +} + +__attribute__((noinline)) +void CheckStack() { + int ar[100]; + pretend_to_do_something(ar); + for (int i = 0; i < 100; i++) + ar[i] = i; + fprintf(stderr, "CheckStack stack = %p, %p\n", ar, ar + 100); +} + +int main(int argc, char** argv) { + try { + Throw(); + } catch(int a) { + fprintf(stderr, "a = %d\n", a); + } + CheckStack(); +} + diff --git a/lib/asan/lit_tests/time_interceptor.cc b/lib/asan/lit_tests/time_interceptor.cc new file mode 100644 index 000000000000..f5f2ad62b815 --- /dev/null +++ b/lib/asan/lit_tests/time_interceptor.cc @@ -0,0 +1,16 @@ +// RUN: %clangxx_asan -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s + +// Test the time() interceptor. + +#include <stdio.h> +#include <stdlib.h> +#include <time.h> + +int main() { + time_t *tm = (time_t*)malloc(sizeof(time_t)); + free(tm); + time_t t = time(tm); + printf("Time: %s\n", ctime(&t)); // NOLINT + // CHECK: use-after-free + return 0; +} diff --git a/lib/asan/lit_tests/unaligned_loads_and_stores.cc b/lib/asan/lit_tests/unaligned_loads_and_stores.cc new file mode 100644 index 000000000000..bcae089b427b --- /dev/null +++ b/lib/asan/lit_tests/unaligned_loads_and_stores.cc @@ -0,0 +1,52 @@ +// RUN: %clangxx_asan -O0 -I %p/../../../include %s -o %t +// RUN: %t A 2>&1 | %symbolize | FileCheck --check-prefix=CHECK-A %s +// RUN: %t B 2>&1 | %symbolize | FileCheck --check-prefix=CHECK-B %s +// RUN: %t C 2>&1 | %symbolize | FileCheck --check-prefix=CHECK-C %s +// RUN: %t D 2>&1 | %symbolize | FileCheck --check-prefix=CHECK-D %s +// RUN: %t E 2>&1 | %symbolize | FileCheck --check-prefix=CHECK-E %s + +// RUN: %t K 2>&1 | %symbolize | FileCheck --check-prefix=CHECK-K %s +// RUN: %t L 2>&1 | %symbolize | FileCheck --check-prefix=CHECK-L %s +// RUN: %t M 2>&1 | %symbolize | FileCheck --check-prefix=CHECK-M %s +// RUN: %t N 2>&1 | %symbolize | FileCheck --check-prefix=CHECK-N %s +// RUN: %t O 2>&1 | %symbolize | FileCheck --check-prefix=CHECK-O %s + +#include <sanitizer/asan_interface.h> + +#include <stdlib.h> +#include <string.h> +int main(int argc, char **argv) { + if (argc != 2) return 1; + char *x = new char[16]; + memset(x, 0xab, 16); + int res = 1; + switch (argv[1][0]) { + case 'A': res = __sanitizer_unaligned_load16(x + 15); break; +// CHECK-A ERROR: AddressSanitizer: heap-buffer-overflow on address +// CHECK-A: main{{.*}}unaligned_loads_and_stores.cc:[[@LINE-2]] +// CHECK-A: is located 0 bytes to the right of 16-byte region + case 'B': res = __sanitizer_unaligned_load32(x + 14); break; +// CHECK-B: main{{.*}}unaligned_loads_and_stores.cc:[[@LINE-1]] + case 'C': res = __sanitizer_unaligned_load32(x + 13); break; +// CHECK-C: main{{.*}}unaligned_loads_and_stores.cc:[[@LINE-1]] + case 'D': res = __sanitizer_unaligned_load64(x + 15); break; +// CHECK-D: main{{.*}}unaligned_loads_and_stores.cc:[[@LINE-1]] + case 'E': res = __sanitizer_unaligned_load64(x + 9); break; +// CHECK-E: main{{.*}}unaligned_loads_and_stores.cc:[[@LINE-1]] + + case 'K': __sanitizer_unaligned_store16(x + 15, 0); break; +// CHECK-K ERROR: AddressSanitizer: heap-buffer-overflow on address +// CHECK-K: main{{.*}}unaligned_loads_and_stores.cc:[[@LINE-2]] +// CHECK-K: is located 0 bytes to the right of 16-byte region + case 'L': __sanitizer_unaligned_store32(x + 15, 0); break; +// CHECK-L: main{{.*}}unaligned_loads_and_stores.cc:[[@LINE-1]] + case 'M': __sanitizer_unaligned_store32(x + 13, 0); break; +// CHECK-M: main{{.*}}unaligned_loads_and_stores.cc:[[@LINE-1]] + case 'N': __sanitizer_unaligned_store64(x + 10, 0); break; +// CHECK-N: main{{.*}}unaligned_loads_and_stores.cc:[[@LINE-1]] + case 'O': __sanitizer_unaligned_store64(x + 14, 0); break; +// CHECK-O: main{{.*}}unaligned_loads_and_stores.cc:[[@LINE-1]] + } + delete x; + return res; +} diff --git a/lib/asan/lit_tests/use-after-free-right.cc b/lib/asan/lit_tests/use-after-free-right.cc new file mode 100644 index 000000000000..b0de07b04a08 --- /dev/null +++ b/lib/asan/lit_tests/use-after-free-right.cc @@ -0,0 +1,46 @@ +// RUN: %clangxx_asan -m64 -O0 %s -o %t && %t 2>&1 | %symbolize > %t.out +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-%os < %t.out +// RUN: %clangxx_asan -m64 -O1 %s -o %t && %t 2>&1 | %symbolize > %t.out +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-%os < %t.out +// RUN: %clangxx_asan -m64 -O2 %s -o %t && %t 2>&1 | %symbolize > %t.out +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-%os < %t.out +// RUN: %clangxx_asan -m64 -O3 %s -o %t && %t 2>&1 | %symbolize > %t.out +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-%os < %t.out +// RUN: %clangxx_asan -m32 -O0 %s -o %t && %t 2>&1 | %symbolize > %t.out +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-%os < %t.out +// RUN: %clangxx_asan -m32 -O1 %s -o %t && %t 2>&1 | %symbolize > %t.out +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-%os < %t.out +// RUN: %clangxx_asan -m32 -O2 %s -o %t && %t 2>&1 | %symbolize > %t.out +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-%os < %t.out +// RUN: %clangxx_asan -m32 -O3 %s -o %t && %t 2>&1 | %symbolize > %t.out +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-%os < %t.out + +// Test use-after-free report in the case when access is at the right border of +// the allocation. + +#include <stdlib.h> +int main() { + volatile char *x = (char*)malloc(sizeof(char)); + free((void*)x); + *x = 42; + // CHECK: {{.*ERROR: AddressSanitizer: heap-use-after-free on address}} + // CHECK: {{0x.* at pc 0x.* bp 0x.* sp 0x.*}} + // CHECK: {{WRITE of size 1 at 0x.* thread T0}} + // CHECK: {{ #0 0x.* in _?main .*use-after-free-right.cc:25}} + // CHECK: {{0x.* is located 0 bytes inside of 1-byte region .0x.*,0x.*}} + // CHECK: {{freed by thread T0 here:}} + + // CHECK-Linux: {{ #0 0x.* in .*free}} + // CHECK-Linux: {{ #1 0x.* in main .*use-after-free-right.cc:24}} + + // CHECK-Darwin: {{ #0 0x.* in _?wrap_free}} + // CHECK-Darwin: {{ #1 0x.* in _?main .*use-after-free-right.cc:24}} + + // CHECK: {{previously allocated by thread T0 here:}} + + // CHECK-Linux: {{ #0 0x.* in .*malloc}} + // CHECK-Linux: {{ #1 0x.* in main .*use-after-free-right.cc:23}} + + // CHECK-Darwin: {{ #0 0x.* in _?wrap_malloc.*}} + // CHECK-Darwin: {{ #1 0x.* in _?main .*use-after-free-right.cc:23}} +} diff --git a/lib/asan/lit_tests/use-after-free.cc b/lib/asan/lit_tests/use-after-free.cc index 24d5a2a54807..aee185dc4518 100644 --- a/lib/asan/lit_tests/use-after-free.cc +++ b/lib/asan/lit_tests/use-after-free.cc @@ -30,19 +30,14 @@ int main() { // CHECK-Linux: {{ #0 0x.* in .*free}} // CHECK-Linux: {{ #1 0x.* in main .*use-after-free.cc:21}} - // CHECK-Darwin: {{ #0 0x.* in .*free_common.*}} - // CHECK-Darwin: {{ #1 0x.* in .*mz_free.*}} - // We override free() on Darwin, thus no malloc_zone_free - // CHECK-Darwin: {{ #2 0x.* in _?wrap_free}} - // CHECK-Darwin: {{ #3 0x.* in _?main .*use-after-free.cc:21}} + // CHECK-Darwin: {{ #0 0x.* in _?wrap_free}} + // CHECK-Darwin: {{ #1 0x.* in _?main .*use-after-free.cc:21}} // CHECK: {{previously allocated by thread T0 here:}} // CHECK-Linux: {{ #0 0x.* in .*malloc}} // CHECK-Linux: {{ #1 0x.* in main .*use-after-free.cc:20}} - // CHECK-Darwin: {{ #0 0x.* in .*mz_malloc.*}} - // CHECK-Darwin: {{ #1 0x.* in malloc_zone_malloc.*}} - // CHECK-Darwin: {{ #2 0x.* in malloc.*}} - // CHECK-Darwin: {{ #3 0x.* in _?main .*use-after-free.cc:20}} + // CHECK-Darwin: {{ #0 0x.* in _?wrap_malloc.*}} + // CHECK-Darwin: {{ #1 0x.* in _?main .*use-after-free.cc:20}} } diff --git a/lib/asan/lit_tests/use-after-poison.cc b/lib/asan/lit_tests/use-after-poison.cc new file mode 100644 index 000000000000..d87342900245 --- /dev/null +++ b/lib/asan/lit_tests/use-after-poison.cc @@ -0,0 +1,20 @@ +// Check that __asan_poison_memory_region works. +// RUN: %clangxx_asan -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// +// Check that we can disable it +// RUN: ASAN_OPTIONS=allow_user_poisoning=0 %t + +#include <stdlib.h> + +extern "C" void __asan_poison_memory_region(void *, size_t); + +int main(int argc, char **argv) { + char *x = new char[16]; + x[10] = 0; + __asan_poison_memory_region(x, 16); + int res = x[argc * 10]; // BOOOM + // CHECK: ERROR: AddressSanitizer: use-after-poison + // CHECK: main{{.*}}use-after-poison.cc:[[@LINE-2]] + delete [] x; + return res; +} diff --git a/lib/asan/lit_tests/use-after-scope-inlined.cc b/lib/asan/lit_tests/use-after-scope-inlined.cc index 3d730de6ab35..5c121ea187eb 100644 --- a/lib/asan/lit_tests/use-after-scope-inlined.cc +++ b/lib/asan/lit_tests/use-after-scope-inlined.cc @@ -23,7 +23,8 @@ int main(int argc, char *argv[]) { // CHECK: READ of size 4 at 0x{{.*}} thread T0 // CHECK: #0 0x{{.*}} in {{_?}}main // CHECK: {{.*}}use-after-scope-inlined.cc:[[@LINE-4]] - // CHECK: Address 0x{{.*}} is located at offset - // CHECK: [[OFFSET:[^ ]*]] in frame <main> of T0{{.*}}: + // CHECK: Address 0x{{.*}} is located in stack of thread T0 at offset + // CHECK: [[OFFSET:[^ ]*]] in frame + // CHECK: main // CHECK: {{\[}}[[OFFSET]], {{.*}}) 'x.i' } diff --git a/lib/asan/lit_tests/wait.cc b/lib/asan/lit_tests/wait.cc new file mode 100644 index 000000000000..88fbb17176fa --- /dev/null +++ b/lib/asan/lit_tests/wait.cc @@ -0,0 +1,77 @@ +// RUN: %clangxx_asan -DWAIT -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT -m64 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT -m32 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT -m32 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s + +// RUN: %clangxx_asan -DWAITPID -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAITPID -m64 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAITPID -m32 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAITPID -m32 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s + +// RUN: %clangxx_asan -DWAITID -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAITID -m64 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAITID -m32 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAITID -m32 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s + +// RUN: %clangxx_asan -DWAIT3 -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT3 -m64 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT3 -m32 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT3 -m32 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s + +// RUN: %clangxx_asan -DWAIT4 -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT4 -m64 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT4 -m32 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT4 -m32 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s + +// RUN: %clangxx_asan -DWAIT3_RUSAGE -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT3_RUSAGE -m64 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT3_RUSAGE -m32 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT3_RUSAGE -m32 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s + +// RUN: %clangxx_asan -DWAIT4_RUSAGE -m64 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT4_RUSAGE -m64 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT4_RUSAGE -m32 -O0 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s +// RUN: %clangxx_asan -DWAIT4_RUSAGE -m32 -O3 %s -o %t && %t 2>&1 | %symbolize | FileCheck %s + + +#include <assert.h> +#include <sys/wait.h> +#include <unistd.h> + +int main(int argc, char **argv) { + pid_t pid = fork(); + if (pid) { // parent + int x[3]; + int *status = x + argc * 3; + int res; +#if defined(WAIT) + res = wait(status); +#elif defined(WAITPID) + res = waitpid(pid, status, WNOHANG); +#elif defined(WAITID) + siginfo_t *si = (siginfo_t*)(x + argc * 3); + res = waitid(P_ALL, 0, si, WEXITED | WNOHANG); +#elif defined(WAIT3) + res = wait3(status, WNOHANG, NULL); +#elif defined(WAIT4) + res = wait4(pid, status, WNOHANG, NULL); +#elif defined(WAIT3_RUSAGE) || defined(WAIT4_RUSAGE) + struct rusage *ru = (struct rusage*)(x + argc * 3); + int good_status; +# if defined(WAIT3_RUSAGE) + res = wait3(&good_status, WNOHANG, ru); +# elif defined(WAIT4_RUSAGE) + res = wait4(pid, &good_status, WNOHANG, ru); +# endif +#endif + // CHECK: stack-buffer-overflow + // CHECK: {{WRITE of size .* at 0x.* thread T0}} + // CHECK: {{in .*wait}} + // CHECK: {{in _?main .*wait.cc:}} + // CHECK: is located in stack of thread T0 at offset + // CHECK: {{in _?main}} + return res != -1; + } + // child + return 0; +} diff --git a/lib/asan/scripts/asan_symbolize.py b/lib/asan/scripts/asan_symbolize.py index 7b30bb55914e..bd3bf1e9b53e 100755 --- a/lib/asan/scripts/asan_symbolize.py +++ b/lib/asan/scripts/asan_symbolize.py @@ -8,6 +8,7 @@ # #===------------------------------------------------------------------------===# import bisect +import getopt import os import re import subprocess @@ -18,6 +19,7 @@ symbolizers = {} filetypes = {} vmaddrs = {} DEBUG = False +demangle = False; # FIXME: merge the code that calls fix_filename(). @@ -60,7 +62,7 @@ class LLVMSymbolizer(Symbolizer): return None cmd = [self.symbolizer_path, '--use-symbol-table=true', - '--demangle=false', + '--demangle=%s' % demangle, '--functions=true', '--inlining=true'] if DEBUG: @@ -111,7 +113,10 @@ class Addr2LineSymbolizer(Symbolizer): self.pipe = self.open_addr2line() def open_addr2line(self): - cmd = ['addr2line', '-f', '-e', self.binary] + cmd = ['addr2line', '-f'] + if demangle: + cmd += ['--demangle'] + cmd += ['-e', self.binary] if DEBUG: print ' '.join(cmd) return subprocess.Popen(cmd, @@ -352,5 +357,9 @@ class SymbolizationLoop(object): if __name__ == '__main__': + opts, args = getopt.getopt(sys.argv[1:], "d", ["demangle"]) + for o, a in opts: + if o in ("-d", "--demangle"): + demangle = True; loop = SymbolizationLoop() loop.process_stdin() diff --git a/lib/asan/tests/CMakeLists.txt b/lib/asan/tests/CMakeLists.txt index 272950bc5450..80d6f5d67aad 100644 --- a/lib/asan/tests/CMakeLists.txt +++ b/lib/asan/tests/CMakeLists.txt @@ -15,6 +15,13 @@ include(CompilerRTCompile) include_directories(..) include_directories(../..) +# Use zero-based shadow on Android. +if(ANDROID) + set(ASAN_TESTS_USE_ZERO_BASE_SHADOW TRUE) +else() + set(ASAN_TESTS_USE_ZERO_BASE_SHADOW FALSE) +endif() + set(ASAN_UNITTEST_HEADERS asan_mac_test.h asan_test_config.h @@ -25,6 +32,7 @@ set(ASAN_UNITTEST_COMMON_CFLAGS -I${COMPILER_RT_SOURCE_DIR}/include -I${COMPILER_RT_SOURCE_DIR}/lib -I${COMPILER_RT_SOURCE_DIR}/lib/asan + -I${COMPILER_RT_SOURCE_DIR}/lib/sanitizer_common/tests -Wall -Wno-format -Werror @@ -32,39 +40,37 @@ set(ASAN_UNITTEST_COMMON_CFLAGS -O2 ) +if(ASAN_TESTS_USE_ZERO_BASE_SHADOW) + list(APPEND ASAN_UNITTEST_COMMON_CFLAGS -fPIE) +endif() if(SUPPORTS_NO_VARIADIC_MACROS_FLAG) list(APPEND ASAN_UNITTEST_COMMON_CFLAGS -Wno-variadic-macros) endif() # Use -D instead of definitions to please custom compile command. +list(APPEND ASAN_UNITTEST_COMMON_CFLAGS + -DASAN_HAS_BLACKLIST=1 + -DASAN_HAS_EXCEPTIONS=1 + -DASAN_UAR=0) if(ANDROID) list(APPEND ASAN_UNITTEST_COMMON_CFLAGS - -DASAN_LOW_MEMORY=1 - -DASAN_HAS_BLACKLIST=1 - -DASAN_HAS_EXCEPTIONS=1 - -DASAN_NEEDS_SEGV=0 - -DASAN_UAR=0 - -fPIE - ) + -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0 + -DASAN_NEEDS_SEGV=0) else() list(APPEND ASAN_UNITTEST_COMMON_CFLAGS - -DASAN_HAS_BLACKLIST=1 - -DASAN_HAS_EXCEPTIONS=1 - -DASAN_NEEDS_SEGV=1 - -DASAN_UAR=0 - ) + -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=1 + -DASAN_NEEDS_SEGV=1) endif() set(ASAN_LINK_FLAGS) -if(ANDROID) - # On Android, we link with ASan runtime manually +if(ASAN_TESTS_USE_ZERO_BASE_SHADOW) list(APPEND ASAN_LINK_FLAGS -pie) -else() - # On other platforms, we depend on Clang driver behavior, - # passing -fsanitize=address flag. +endif() +# On Android, we link with ASan runtime manually. On other platforms we depend +# on Clang driver behavior, passing -fsanitize=address flag. +if(NOT ANDROID) list(APPEND ASAN_LINK_FLAGS -fsanitize=address) endif() - # Unit tests on Mac depend on Foundation. if(APPLE) list(APPEND ASAN_LINK_FLAGS -framework Foundation) @@ -77,13 +83,17 @@ set(ASAN_BLACKLIST_FILE "${CMAKE_CURRENT_SOURCE_DIR}/asan_test.ignore") set(ASAN_UNITTEST_INSTRUMENTED_CFLAGS ${ASAN_UNITTEST_COMMON_CFLAGS} -fsanitize=address - -mllvm "-asan-blacklist=${ASAN_BLACKLIST_FILE}" + "-fsanitize-blacklist=${ASAN_BLACKLIST_FILE}" -mllvm -asan-stack=1 -mllvm -asan-globals=1 -mllvm -asan-mapping-scale=0 # default will be used -mllvm -asan-mapping-offset-log=-1 # default will be used -mllvm -asan-use-after-return=0 ) +if(ASAN_TESTS_USE_ZERO_BASE_SHADOW) + list(APPEND ASAN_UNITTEST_INSTRUMENTED_CFLAGS + -fsanitize-address-zero-base-shadow) +endif() # Compile source for the given architecture, using compiler # options in ${ARGN}, and add it to the object list. @@ -117,6 +127,16 @@ set_target_properties(AsanUnitTests PROPERTIES FOLDER "ASan unit tests") add_custom_target(AsanBenchmarks) set_target_properties(AsanBenchmarks PROPERTIES FOLDER "Asan benchmarks") +set(ASAN_NOINST_TEST_SOURCES + asan_noinst_test.cc + asan_test_main.cc) +set(ASAN_INST_TEST_SOURCES + asan_globals_test.cc + asan_test.cc + asan_oob_test.cc + asan_mem_test.cc + asan_str_test.cc) + # Adds ASan unit tests and benchmarks for architecture. macro(add_asan_tests_for_arch arch) # Build gtest instrumented with ASan. @@ -125,20 +145,23 @@ macro(add_asan_tests_for_arch arch) ${ASAN_UNITTEST_INSTRUMENTED_CFLAGS}) # Instrumented tests. set(ASAN_INST_TEST_OBJECTS) - asan_compile(ASAN_INST_TEST_OBJECTS asan_globals_test.cc ${arch} - ${ASAN_UNITTEST_INSTRUMENTED_CFLAGS}) - asan_compile(ASAN_INST_TEST_OBJECTS asan_test.cc ${arch} - ${ASAN_UNITTEST_INSTRUMENTED_CFLAGS}) + foreach(src ${ASAN_INST_TEST_SOURCES}) + asan_compile(ASAN_INST_TEST_OBJECTS ${src} ${arch} + ${ASAN_UNITTEST_INSTRUMENTED_CFLAGS}) + endforeach() + # Add Mac-specific tests. if (APPLE) - asan_compile(ASAN_INST_TEST_OBJECTS asan_mac_test.mm ${arch} + asan_compile(ASAN_INST_TEST_OBJECTS asan_mac_test.cc ${arch} + ${ASAN_UNITTEST_INSTRUMENTED_CFLAGS}) + asan_compile(ASAN_INST_TEST_OBJECTS asan_mac_test_helpers.mm ${arch} ${ASAN_UNITTEST_INSTRUMENTED_CFLAGS} -ObjC) endif() # Uninstrumented tests. set(ASAN_NOINST_TEST_OBJECTS) - asan_compile(ASAN_NOINST_TEST_OBJECTS asan_noinst_test.cc ${arch} - ${ASAN_UNITTEST_COMMON_CFLAGS}) - asan_compile(ASAN_NOINST_TEST_OBJECTS asan_test_main.cc ${arch} - ${ASAN_UNITTEST_COMMON_CFLAGS}) + foreach(src ${ASAN_NOINST_TEST_SOURCES}) + asan_compile(ASAN_NOINST_TEST_OBJECTS ${src} ${arch} + ${ASAN_UNITTEST_COMMON_CFLAGS}) + endforeach() # Link everything together. add_asan_test(AsanUnitTests "Asan-${arch}-Test" ${arch} ${ASAN_NOINST_TEST_OBJECTS} @@ -154,19 +177,14 @@ macro(add_asan_tests_for_arch arch) endmacro() if(COMPILER_RT_CAN_EXECUTE_TESTS) - if(CAN_TARGET_x86_64) - add_asan_tests_for_arch(x86_64) - endif() - if(CAN_TARGET_i386) - add_asan_tests_for_arch(i386) - endif() + foreach(arch ${ASAN_SUPPORTED_ARCH}) + add_asan_tests_for_arch(${arch}) + endforeach() endif() if(ANDROID) # We assume that unit tests on Android are built in a build # tree with fresh Clang as a host compiler. - set(ASAN_NOINST_TEST_SOURCES asan_noinst_test.cc asan_test_main.cc) - set(ASAN_INST_TEST_SOURCES asan_globals_test.cc asan_test.cc) add_library(asan_noinst_test OBJECT ${ASAN_NOINST_TEST_SOURCES}) set_target_compile_flags(asan_noinst_test ${ASAN_UNITTEST_COMMON_CFLAGS}) add_library(asan_inst_test OBJECT @@ -177,9 +195,8 @@ if(ANDROID) $<TARGET_OBJECTS:asan_inst_test> ) # Setup correct output directory and link flags. - get_unittest_directory(OUTPUT_DIR) set_target_properties(AsanTest PROPERTIES - RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_DIR}) + RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) set_target_link_flags(AsanTest ${ASAN_LINK_FLAGS}) target_link_libraries(AsanTest clang_rt.asan-arm-android) # Add unit test to test suite. diff --git a/lib/asan/tests/asan_globals_test.cc b/lib/asan/tests/asan_globals_test.cc index dc2e9bbb0530..5042ef07378d 100644 --- a/lib/asan/tests/asan_globals_test.cc +++ b/lib/asan/tests/asan_globals_test.cc @@ -11,8 +11,29 @@ // // Some globals in a separate file. //===----------------------------------------------------------------------===// +#include "asan_test_utils.h" + +char glob1[1]; +char glob2[2]; +char glob3[3]; +char glob4[4]; +char glob5[5]; +char glob6[6]; +char glob7[7]; +char glob8[8]; +char glob9[9]; +char glob10[10]; +char glob11[11]; +char glob12[12]; +char glob13[13]; +char glob14[14]; +char glob15[15]; +char glob16[16]; +char glob17[17]; +char glob1000[1000]; +char glob10000[10000]; +char glob100000[100000]; -extern char glob5[5]; static char static10[10]; int GlobalsTest(int zero) { diff --git a/lib/asan/tests/asan_mac_test.cc b/lib/asan/tests/asan_mac_test.cc new file mode 100644 index 000000000000..cabdfd711ea2 --- /dev/null +++ b/lib/asan/tests/asan_mac_test.cc @@ -0,0 +1,236 @@ +//===-- asan_test_mac.cc --------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// + +#include "asan_test_utils.h" + +#include "asan_mac_test.h" + +#include <malloc/malloc.h> +#include <AvailabilityMacros.h> // For MAC_OS_X_VERSION_* +#include <CoreFoundation/CFString.h> + +TEST(AddressSanitizerMac, CFAllocatorDefaultDoubleFree) { + EXPECT_DEATH( + CFAllocatorDefaultDoubleFree(NULL), + "attempting double-free"); +} + +void CFAllocator_DoubleFreeOnPthread() { + pthread_t child; + PTHREAD_CREATE(&child, NULL, CFAllocatorDefaultDoubleFree, NULL); + PTHREAD_JOIN(child, NULL); // Shouldn't be reached. +} + +TEST(AddressSanitizerMac, CFAllocatorDefaultDoubleFree_ChildPhread) { + EXPECT_DEATH(CFAllocator_DoubleFreeOnPthread(), "attempting double-free"); +} + +namespace { + +void *GLOB; + +void *CFAllocatorAllocateToGlob(void *unused) { + GLOB = CFAllocatorAllocate(NULL, 100, /*hint*/0); + return NULL; +} + +void *CFAllocatorDeallocateFromGlob(void *unused) { + char *p = (char*)GLOB; + p[100] = 'A'; // ASan should report an error here. + CFAllocatorDeallocate(NULL, GLOB); + return NULL; +} + +void CFAllocator_PassMemoryToAnotherThread() { + pthread_t th1, th2; + PTHREAD_CREATE(&th1, NULL, CFAllocatorAllocateToGlob, NULL); + PTHREAD_JOIN(th1, NULL); + PTHREAD_CREATE(&th2, NULL, CFAllocatorDeallocateFromGlob, NULL); + PTHREAD_JOIN(th2, NULL); +} + +TEST(AddressSanitizerMac, CFAllocator_PassMemoryToAnotherThread) { + EXPECT_DEATH(CFAllocator_PassMemoryToAnotherThread(), + "heap-buffer-overflow"); +} + +} // namespace + +// TODO(glider): figure out whether we still need these tests. Is it correct +// to intercept the non-default CFAllocators? +TEST(AddressSanitizerMac, DISABLED_CFAllocatorSystemDefaultDoubleFree) { + EXPECT_DEATH( + CFAllocatorSystemDefaultDoubleFree(), + "attempting double-free"); +} + +// We're intercepting malloc, so kCFAllocatorMalloc is routed to ASan. +TEST(AddressSanitizerMac, CFAllocatorMallocDoubleFree) { + EXPECT_DEATH(CFAllocatorMallocDoubleFree(), "attempting double-free"); +} + +TEST(AddressSanitizerMac, DISABLED_CFAllocatorMallocZoneDoubleFree) { + EXPECT_DEATH(CFAllocatorMallocZoneDoubleFree(), "attempting double-free"); +} + +// For libdispatch tests below we check that ASan got to the shadow byte +// legend, i.e. managed to print the thread stacks (this almost certainly +// means that the libdispatch task creation has been intercepted correctly). +TEST(AddressSanitizerMac, GCDDispatchAsync) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDDispatchAsync(), "Shadow byte legend"); +} + +TEST(AddressSanitizerMac, GCDDispatchSync) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDDispatchSync(), "Shadow byte legend"); +} + + +TEST(AddressSanitizerMac, GCDReuseWqthreadsAsync) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDReuseWqthreadsAsync(), "Shadow byte legend"); +} + +TEST(AddressSanitizerMac, GCDReuseWqthreadsSync) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDReuseWqthreadsSync(), "Shadow byte legend"); +} + +TEST(AddressSanitizerMac, GCDDispatchAfter) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDDispatchAfter(), "Shadow byte legend"); +} + +TEST(AddressSanitizerMac, GCDSourceEvent) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDSourceEvent(), "Shadow byte legend"); +} + +TEST(AddressSanitizerMac, GCDSourceCancel) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDSourceCancel(), "Shadow byte legend"); +} + +TEST(AddressSanitizerMac, GCDGroupAsync) { + // Make sure the whole ASan report is printed, i.e. that we don't die + // on a CHECK. + EXPECT_DEATH(TestGCDGroupAsync(), "Shadow byte legend"); +} + +void *MallocIntrospectionLockWorker(void *_) { + const int kNumPointers = 100; + int i; + void *pointers[kNumPointers]; + for (i = 0; i < kNumPointers; i++) { + pointers[i] = malloc(i + 1); + } + for (i = 0; i < kNumPointers; i++) { + free(pointers[i]); + } + + return NULL; +} + +void *MallocIntrospectionLockForker(void *_) { + pid_t result = fork(); + if (result == -1) { + perror("fork"); + } + assert(result != -1); + if (result == 0) { + // Call malloc in the child process to make sure we won't deadlock. + void *ptr = malloc(42); + free(ptr); + exit(0); + } else { + // Return in the parent process. + return NULL; + } +} + +TEST(AddressSanitizerMac, MallocIntrospectionLock) { + // Incorrect implementation of force_lock and force_unlock in our malloc zone + // will cause forked processes to deadlock. + // TODO(glider): need to detect that none of the child processes deadlocked. + const int kNumWorkers = 5, kNumIterations = 100; + int i, iter; + for (iter = 0; iter < kNumIterations; iter++) { + pthread_t workers[kNumWorkers], forker; + for (i = 0; i < kNumWorkers; i++) { + PTHREAD_CREATE(&workers[i], 0, MallocIntrospectionLockWorker, 0); + } + PTHREAD_CREATE(&forker, 0, MallocIntrospectionLockForker, 0); + for (i = 0; i < kNumWorkers; i++) { + PTHREAD_JOIN(workers[i], 0); + } + PTHREAD_JOIN(forker, 0); + } +} + +void *TSDAllocWorker(void *test_key) { + if (test_key) { + void *mem = malloc(10); + pthread_setspecific(*(pthread_key_t*)test_key, mem); + } + return NULL; +} + +TEST(AddressSanitizerMac, DISABLED_TSDWorkqueueTest) { + pthread_t th; + pthread_key_t test_key; + pthread_key_create(&test_key, CallFreeOnWorkqueue); + PTHREAD_CREATE(&th, NULL, TSDAllocWorker, &test_key); + PTHREAD_JOIN(th, NULL); + pthread_key_delete(test_key); +} + +// Test that CFStringCreateCopy does not copy constant strings. +TEST(AddressSanitizerMac, CFStringCreateCopy) { + CFStringRef str = CFSTR("Hello world!\n"); + CFStringRef str2 = CFStringCreateCopy(0, str); + EXPECT_EQ(str, str2); +} + +TEST(AddressSanitizerMac, NSObjectOOB) { + // Make sure that our allocators are used for NSObjects. + EXPECT_DEATH(TestOOBNSObjects(), "heap-buffer-overflow"); +} + +// Make sure that correct pointer is passed to free() when deallocating a +// NSURL object. +// See http://code.google.com/p/address-sanitizer/issues/detail?id=70. +TEST(AddressSanitizerMac, NSURLDeallocation) { + TestNSURLDeallocation(); +} + +// See http://code.google.com/p/address-sanitizer/issues/detail?id=109. +TEST(AddressSanitizerMac, Mstats) { + malloc_statistics_t stats1, stats2; + malloc_zone_statistics(/*all zones*/NULL, &stats1); + const size_t kMallocSize = 100000; + void *alloc = Ident(malloc(kMallocSize)); + malloc_zone_statistics(/*all zones*/NULL, &stats2); + EXPECT_GT(stats2.blocks_in_use, stats1.blocks_in_use); + EXPECT_GE(stats2.size_in_use - stats1.size_in_use, kMallocSize); + free(alloc); + // Even the default OSX allocator may not change the stats after free(). +} + diff --git a/lib/asan/tests/asan_mac_test.mm b/lib/asan/tests/asan_mac_test_helpers.mm index 4cbd2bb247fd..4cbd2bb247fd 100644 --- a/lib/asan/tests/asan_mac_test.mm +++ b/lib/asan/tests/asan_mac_test_helpers.mm diff --git a/lib/asan/tests/asan_mem_test.cc b/lib/asan/tests/asan_mem_test.cc new file mode 100644 index 000000000000..60f5cd4cf760 --- /dev/null +++ b/lib/asan/tests/asan_mem_test.cc @@ -0,0 +1,240 @@ +//===-- asan_mem_test.cc --------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// +#include "asan_test_utils.h" + +template<typename T> +void MemSetOOBTestTemplate(size_t length) { + if (length == 0) return; + size_t size = Ident(sizeof(T) * length); + T *array = Ident((T*)malloc(size)); + int element = Ident(42); + int zero = Ident(0); + void *(*MEMSET)(void *s, int c, size_t n) = Ident(memset); + // memset interval inside array + MEMSET(array, element, size); + MEMSET(array, element, size - 1); + MEMSET(array + length - 1, element, sizeof(T)); + MEMSET(array, element, 1); + + // memset 0 bytes + MEMSET(array - 10, element, zero); + MEMSET(array - 1, element, zero); + MEMSET(array, element, zero); + MEMSET(array + length, 0, zero); + MEMSET(array + length + 1, 0, zero); + + // try to memset bytes to the right of array + EXPECT_DEATH(MEMSET(array, 0, size + 1), + RightOOBWriteMessage(0)); + EXPECT_DEATH(MEMSET((char*)(array + length) - 1, element, 6), + RightOOBWriteMessage(0)); + EXPECT_DEATH(MEMSET(array + 1, element, size + sizeof(T)), + RightOOBWriteMessage(0)); + // whole interval is to the right + EXPECT_DEATH(MEMSET(array + length + 1, 0, 10), + RightOOBWriteMessage(sizeof(T))); + + // try to memset bytes to the left of array + EXPECT_DEATH(MEMSET((char*)array - 1, element, size), + LeftOOBWriteMessage(1)); + EXPECT_DEATH(MEMSET((char*)array - 5, 0, 6), + LeftOOBWriteMessage(5)); + if (length >= 100) { + // Large OOB, we find it only if the redzone is large enough. + EXPECT_DEATH(memset(array - 5, element, size + 5 * sizeof(T)), + LeftOOBWriteMessage(5 * sizeof(T))); + } + // whole interval is to the left + EXPECT_DEATH(MEMSET(array - 2, 0, sizeof(T)), + LeftOOBWriteMessage(2 * sizeof(T))); + + // try to memset bytes both to the left & to the right + EXPECT_DEATH(MEMSET((char*)array - 2, element, size + 4), + LeftOOBWriteMessage(2)); + + free(array); +} + +TEST(AddressSanitizer, MemSetOOBTest) { + MemSetOOBTestTemplate<char>(100); + MemSetOOBTestTemplate<int>(5); + MemSetOOBTestTemplate<double>(256); + // We can test arrays of structres/classes here, but what for? +} + +// Try to allocate two arrays of 'size' bytes that are near each other. +// Strictly speaking we are not guaranteed to find such two pointers, +// but given the structure of asan's allocator we will. +static bool AllocateTwoAdjacentArrays(char **x1, char **x2, size_t size) { + vector<char *> v; + bool res = false; + for (size_t i = 0; i < 1000U && !res; i++) { + v.push_back(new char[size]); + if (i == 0) continue; + sort(v.begin(), v.end()); + for (size_t j = 1; j < v.size(); j++) { + assert(v[j] > v[j-1]); + if ((size_t)(v[j] - v[j-1]) < size * 2) { + *x2 = v[j]; + *x1 = v[j-1]; + res = true; + break; + } + } + } + + for (size_t i = 0; i < v.size(); i++) { + if (res && v[i] == *x1) continue; + if (res && v[i] == *x2) continue; + delete [] v[i]; + } + return res; +} + +TEST(AddressSanitizer, LargeOOBInMemset) { + for (size_t size = 200; size < 100000; size += size / 2) { + char *x1, *x2; + if (!Ident(AllocateTwoAdjacentArrays)(&x1, &x2, size)) + continue; + // fprintf(stderr, " large oob memset: %p %p %zd\n", x1, x2, size); + // Do a memset on x1 with huge out-of-bound access that will end up in x2. + EXPECT_DEATH(Ident(memset)(x1, 0, size * 2), + "is located 0 bytes to the right"); + delete [] x1; + delete [] x2; + return; + } + assert(0 && "Did not find two adjacent malloc-ed pointers"); +} + +// Same test for memcpy and memmove functions +template <typename T, class M> +void MemTransferOOBTestTemplate(size_t length) { + if (length == 0) return; + size_t size = Ident(sizeof(T) * length); + T *src = Ident((T*)malloc(size)); + T *dest = Ident((T*)malloc(size)); + int zero = Ident(0); + + // valid transfer of bytes between arrays + M::transfer(dest, src, size); + M::transfer(dest + 1, src, size - sizeof(T)); + M::transfer(dest, src + length - 1, sizeof(T)); + M::transfer(dest, src, 1); + + // transfer zero bytes + M::transfer(dest - 1, src, 0); + M::transfer(dest + length, src, zero); + M::transfer(dest, src - 1, zero); + M::transfer(dest, src, zero); + + // try to change mem to the right of dest + EXPECT_DEATH(M::transfer(dest + 1, src, size), + RightOOBWriteMessage(0)); + EXPECT_DEATH(M::transfer((char*)(dest + length) - 1, src, 5), + RightOOBWriteMessage(0)); + + // try to change mem to the left of dest + EXPECT_DEATH(M::transfer(dest - 2, src, size), + LeftOOBWriteMessage(2 * sizeof(T))); + EXPECT_DEATH(M::transfer((char*)dest - 3, src, 4), + LeftOOBWriteMessage(3)); + + // try to access mem to the right of src + EXPECT_DEATH(M::transfer(dest, src + 2, size), + RightOOBReadMessage(0)); + EXPECT_DEATH(M::transfer(dest, (char*)(src + length) - 3, 6), + RightOOBReadMessage(0)); + + // try to access mem to the left of src + EXPECT_DEATH(M::transfer(dest, src - 1, size), + LeftOOBReadMessage(sizeof(T))); + EXPECT_DEATH(M::transfer(dest, (char*)src - 6, 7), + LeftOOBReadMessage(6)); + + // Generally we don't need to test cases where both accessing src and writing + // to dest address to poisoned memory. + + T *big_src = Ident((T*)malloc(size * 2)); + T *big_dest = Ident((T*)malloc(size * 2)); + // try to change mem to both sides of dest + EXPECT_DEATH(M::transfer(dest - 1, big_src, size * 2), + LeftOOBWriteMessage(sizeof(T))); + // try to access mem to both sides of src + EXPECT_DEATH(M::transfer(big_dest, src - 2, size * 2), + LeftOOBReadMessage(2 * sizeof(T))); + + free(src); + free(dest); + free(big_src); + free(big_dest); +} + +class MemCpyWrapper { + public: + static void* transfer(void *to, const void *from, size_t size) { + return Ident(memcpy)(to, from, size); + } +}; + +TEST(AddressSanitizer, MemCpyOOBTest) { + MemTransferOOBTestTemplate<char, MemCpyWrapper>(100); + MemTransferOOBTestTemplate<int, MemCpyWrapper>(1024); +} + +class MemMoveWrapper { + public: + static void* transfer(void *to, const void *from, size_t size) { + return Ident(memmove)(to, from, size); + } +}; + +TEST(AddressSanitizer, MemMoveOOBTest) { + MemTransferOOBTestTemplate<char, MemMoveWrapper>(100); + MemTransferOOBTestTemplate<int, MemMoveWrapper>(1024); +} + + +TEST(AddressSanitizer, MemCmpOOBTest) { + size_t size = Ident(100); + char *s1 = MallocAndMemsetString(size); + char *s2 = MallocAndMemsetString(size); + // Normal memcmp calls. + Ident(memcmp(s1, s2, size)); + Ident(memcmp(s1 + size - 1, s2 + size - 1, 1)); + Ident(memcmp(s1 - 1, s2 - 1, 0)); + // One of arguments points to not allocated memory. + EXPECT_DEATH(Ident(memcmp)(s1 - 1, s2, 1), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(memcmp)(s1, s2 - 1, 1), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(memcmp)(s1 + size, s2, 1), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(memcmp)(s1, s2 + size, 1), RightOOBReadMessage(0)); + // Hit unallocated memory and die. + EXPECT_DEATH(Ident(memcmp)(s1 + 1, s2 + 1, size), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(memcmp)(s1 + size - 1, s2, 2), RightOOBReadMessage(0)); + // Zero bytes are not terminators and don't prevent from OOB. + s1[size - 1] = '\0'; + s2[size - 1] = '\0'; + EXPECT_DEATH(Ident(memcmp)(s1, s2, size + 1), RightOOBReadMessage(0)); + + // Even if the buffers differ in the first byte, we still assume that + // memcmp may access the whole buffer and thus reporting the overflow here: + s1[0] = 1; + s2[0] = 123; + EXPECT_DEATH(Ident(memcmp)(s1, s2, size + 1), RightOOBReadMessage(0)); + + free(s1); + free(s2); +} + + + diff --git a/lib/asan/tests/asan_noinst_test.cc b/lib/asan/tests/asan_noinst_test.cc index 576312bf319f..54fdd1979b8e 100644 --- a/lib/asan/tests/asan_noinst_test.cc +++ b/lib/asan/tests/asan_noinst_test.cc @@ -15,9 +15,7 @@ #include "asan_allocator.h" #include "asan_internal.h" #include "asan_mapping.h" -#include "asan_stack.h" #include "asan_test_utils.h" -#include "sanitizer/asan_interface.h" #include <assert.h> #include <stdio.h> @@ -25,6 +23,7 @@ #include <string.h> // for memset() #include <algorithm> #include <vector> +#include <limits> TEST(AddressSanitizer, InternalSimpleDeathTest) { @@ -33,17 +32,17 @@ TEST(AddressSanitizer, InternalSimpleDeathTest) { static void MallocStress(size_t n) { u32 seed = my_rand(); - __asan::StackTrace stack1; + StackTrace stack1; stack1.trace[0] = 0xa123; stack1.trace[1] = 0xa456; stack1.size = 2; - __asan::StackTrace stack2; + StackTrace stack2; stack2.trace[0] = 0xb123; stack2.trace[1] = 0xb456; stack2.size = 2; - __asan::StackTrace stack3; + StackTrace stack3; stack3.trace[0] = 0xc123; stack3.trace[1] = 0xc456; stack3.size = 2; @@ -79,11 +78,20 @@ static void MallocStress(size_t n) { TEST(AddressSanitizer, NoInstMallocTest) { -#ifdef __arm__ - MallocStress(300000); -#else - MallocStress(1000000); -#endif + MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000); +} + +TEST(AddressSanitizer, ThreadedMallocStressTest) { + const int kNumThreads = 4; + const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000; + pthread_t t[kNumThreads]; + for (int i = 0; i < kNumThreads; i++) { + PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress, + (void*)kNumIterations); + } + for (int i = 0; i < kNumThreads; i++) { + PTHREAD_JOIN(t[i], 0); + } } static void PrintShadow(const char *tag, uptr ptr, size_t size) { @@ -207,16 +215,16 @@ void CompressStackTraceTest(size_t n_iter) { for (size_t iter = 0; iter < n_iter; iter++) { std::random_shuffle(pc_array, pc_array + kNumPcs); - __asan::StackTrace stack0, stack1; + StackTrace stack0, stack1; stack0.CopyFrom(pc_array, kNumPcs); stack0.size = std::max((size_t)1, (size_t)(my_rand_r(&seed) % stack0.size)); size_t compress_size = std::max((size_t)2, (size_t)my_rand_r(&seed) % (2 * kNumPcs)); size_t n_frames = - __asan::StackTrace::CompressStack(&stack0, compressed, compress_size); + StackTrace::CompressStack(&stack0, compressed, compress_size); Ident(n_frames); assert(n_frames <= stack0.size); - __asan::StackTrace::UncompressStack(&stack1, compressed, compress_size); + StackTrace::UncompressStack(&stack1, compressed, compress_size); assert(stack1.size == n_frames); for (size_t i = 0; i < stack1.size; i++) { assert(stack0.trace[i] == stack1.trace[i]); @@ -233,13 +241,13 @@ void CompressStackTraceBenchmark(size_t n_iter) { u32 compressed[2 * kNumPcs]; std::random_shuffle(pc_array, pc_array + kNumPcs); - __asan::StackTrace stack0; + StackTrace stack0; stack0.CopyFrom(pc_array, kNumPcs); stack0.size = kNumPcs; for (size_t iter = 0; iter < n_iter; iter++) { size_t compress_size = kNumPcs; size_t n_frames = - __asan::StackTrace::CompressStack(&stack0, compressed, compress_size); + StackTrace::CompressStack(&stack0, compressed, compress_size); Ident(n_frames); } } @@ -249,11 +257,11 @@ TEST(AddressSanitizer, CompressStackTraceBenchmark) { } TEST(AddressSanitizer, QuarantineTest) { - __asan::StackTrace stack; + StackTrace stack; stack.trace[0] = 0x890; stack.size = 1; - const int size = 32; + const int size = 1024; void *p = __asan::asan_malloc(size, &stack); __asan::asan_free(p, &stack, __asan::FROM_MALLOC); size_t i; @@ -263,15 +271,14 @@ TEST(AddressSanitizer, QuarantineTest) { __asan::asan_free(p1, &stack, __asan::FROM_MALLOC); if (p1 == p) break; } - // fprintf(stderr, "i=%ld\n", i); - EXPECT_GE(i, 100000U); + EXPECT_GE(i, 10000U); EXPECT_LT(i, max_i); } void *ThreadedQuarantineTestWorker(void *unused) { (void)unused; u32 seed = my_rand(); - __asan::StackTrace stack; + StackTrace stack; stack.trace[0] = 0x890; stack.size = 1; @@ -298,7 +305,7 @@ TEST(AddressSanitizer, ThreadedQuarantineTest) { void *ThreadedOneSizeMallocStress(void *unused) { (void)unused; - __asan::StackTrace stack; + StackTrace stack; stack.trace[0] = 0x890; stack.size = 1; const size_t kNumMallocs = 1000; @@ -326,11 +333,13 @@ TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) { } TEST(AddressSanitizer, MemsetWildAddressTest) { + using __asan::kHighMemEnd; typedef void*(*memset_p)(void*, int, size_t); // Prevent inlining of memset(). volatile memset_p libc_memset = (memset_p)memset; EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + 200), 0, 100), - "unknown-crash.*low shadow"); + (kLowShadowEnd == 0) ? "unknown-crash.*shadow gap" + : "unknown-crash.*low shadow"); EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + 200), 0, 100), "unknown-crash.*shadow gap"); EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + 200), 0, 100), @@ -338,11 +347,7 @@ TEST(AddressSanitizer, MemsetWildAddressTest) { } TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) { -#if ASAN_ALLOCATOR_VERSION == 1 - EXPECT_EQ(1U, __asan_get_estimated_allocated_size(0)); -#elif ASAN_ALLOCATOR_VERSION == 2 EXPECT_EQ(0U, __asan_get_estimated_allocated_size(0)); -#endif const size_t sizes[] = { 1, 30, 1<<30 }; for (size_t i = 0; i < 3; i++) { EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i])); @@ -416,44 +421,12 @@ static void DoDoubleFree() { delete Ident(x); } -#if ASAN_ALLOCATOR_VERSION == 1 -// This test is run in a separate process, so that large malloced -// chunk won't remain in the free lists after the test. -// Note: use ASSERT_* instead of EXPECT_* here. -static void RunGetHeapSizeTestAndDie() { - size_t old_heap_size, new_heap_size, heap_growth; - // We unlikely have have chunk of this size in free list. - static const size_t kLargeMallocSize = 1 << 29; // 512M - old_heap_size = __asan_get_heap_size(); - fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize); - free(Ident(malloc(kLargeMallocSize))); - new_heap_size = __asan_get_heap_size(); - heap_growth = new_heap_size - old_heap_size; - fprintf(stderr, "heap growth after first malloc: %zu\n", heap_growth); - ASSERT_GE(heap_growth, kLargeMallocSize); - ASSERT_LE(heap_growth, 2 * kLargeMallocSize); - - // Now large chunk should fall into free list, and can be - // allocated without increasing heap size. - old_heap_size = new_heap_size; - free(Ident(malloc(kLargeMallocSize))); - heap_growth = __asan_get_heap_size() - old_heap_size; - fprintf(stderr, "heap growth after second malloc: %zu\n", heap_growth); - ASSERT_LT(heap_growth, kLargeMallocSize); - - // Test passed. Now die with expected double-free. - DoDoubleFree(); -} - -TEST(AddressSanitizerInterface, GetHeapSizeTest) { - EXPECT_DEATH(RunGetHeapSizeTestAndDie(), "double-free"); -} -#elif ASAN_ALLOCATOR_VERSION == 2 TEST(AddressSanitizerInterface, GetHeapSizeTest) { // asan_allocator2 does not keep huge chunks in free list, but unmaps them. // The chunk should be greater than the quarantine size, // otherwise it will be stuck in quarantine instead of being unmaped. - static const size_t kLargeMallocSize = 1 << 29; // 512M + static const size_t kLargeMallocSize = (1 << 28) + 1; // 256M + free(Ident(malloc(kLargeMallocSize))); // Drain quarantine. uptr old_heap_size = __asan_get_heap_size(); for (int i = 0; i < 3; i++) { // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize); @@ -461,55 +434,6 @@ TEST(AddressSanitizerInterface, GetHeapSizeTest) { EXPECT_EQ(old_heap_size, __asan_get_heap_size()); } } -#endif - -// Note: use ASSERT_* instead of EXPECT_* here. -static void DoLargeMallocForGetFreeBytesTestAndDie() { -#if ASAN_ALLOCATOR_VERSION == 1 - // asan_allocator2 does not keep large chunks in free_lists, so this test - // will not work. - size_t old_free_bytes, new_free_bytes; - static const size_t kLargeMallocSize = 1 << 29; // 512M - // If we malloc and free a large memory chunk, it will not fall - // into quarantine and will be available for future requests. - old_free_bytes = __asan_get_free_bytes(); - fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize); - fprintf(stderr, "free bytes before malloc: %zu\n", old_free_bytes); - free(Ident(malloc(kLargeMallocSize))); - new_free_bytes = __asan_get_free_bytes(); - fprintf(stderr, "free bytes after malloc and free: %zu\n", new_free_bytes); - ASSERT_GE(new_free_bytes, old_free_bytes + kLargeMallocSize); -#endif // ASAN_ALLOCATOR_VERSION - // Test passed. - DoDoubleFree(); -} - -TEST(AddressSanitizerInterface, GetFreeBytesTest) { -#if ASAN_ALLOCATOR_VERSION == 1 - // Allocate a small chunk. Now allocator probably has a lot of these - // chunks to fulfill future requests. So, future requests will decrease - // the number of free bytes. Do this only on systems where there - // is enough memory for such assumptions. - if (SANITIZER_WORDSIZE == 64 && !ASAN_LOW_MEMORY) { - static const size_t kNumOfChunks = 100; - static const size_t kChunkSize = 100; - char *chunks[kNumOfChunks]; - size_t i; - size_t old_free_bytes, new_free_bytes; - chunks[0] = Ident((char*)malloc(kChunkSize)); - old_free_bytes = __asan_get_free_bytes(); - for (i = 1; i < kNumOfChunks; i++) { - chunks[i] = Ident((char*)malloc(kChunkSize)); - new_free_bytes = __asan_get_free_bytes(); - EXPECT_LT(new_free_bytes, old_free_bytes); - old_free_bytes = new_free_bytes; - } - for (i = 0; i < kNumOfChunks; i++) - free(chunks[i]); - } -#endif - EXPECT_DEATH(DoLargeMallocForGetFreeBytesTestAndDie(), "double-free"); -} static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357}; static const size_t kManyThreadsIterations = 250; @@ -631,6 +555,53 @@ TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) { free(vec); } +TEST(AddressSanitizerInterface, GlobalRedzones) { + GOOD_ACCESS(glob1, 1 - 1); + GOOD_ACCESS(glob2, 2 - 1); + GOOD_ACCESS(glob3, 3 - 1); + GOOD_ACCESS(glob4, 4 - 1); + GOOD_ACCESS(glob5, 5 - 1); + GOOD_ACCESS(glob6, 6 - 1); + GOOD_ACCESS(glob7, 7 - 1); + GOOD_ACCESS(glob8, 8 - 1); + GOOD_ACCESS(glob9, 9 - 1); + GOOD_ACCESS(glob10, 10 - 1); + GOOD_ACCESS(glob11, 11 - 1); + GOOD_ACCESS(glob12, 12 - 1); + GOOD_ACCESS(glob13, 13 - 1); + GOOD_ACCESS(glob14, 14 - 1); + GOOD_ACCESS(glob15, 15 - 1); + GOOD_ACCESS(glob16, 16 - 1); + GOOD_ACCESS(glob17, 17 - 1); + GOOD_ACCESS(glob1000, 1000 - 1); + GOOD_ACCESS(glob10000, 10000 - 1); + GOOD_ACCESS(glob100000, 100000 - 1); + + BAD_ACCESS(glob1, 1); + BAD_ACCESS(glob2, 2); + BAD_ACCESS(glob3, 3); + BAD_ACCESS(glob4, 4); + BAD_ACCESS(glob5, 5); + BAD_ACCESS(glob6, 6); + BAD_ACCESS(glob7, 7); + BAD_ACCESS(glob8, 8); + BAD_ACCESS(glob9, 9); + BAD_ACCESS(glob10, 10); + BAD_ACCESS(glob11, 11); + BAD_ACCESS(glob12, 12); + BAD_ACCESS(glob13, 13); + BAD_ACCESS(glob14, 14); + BAD_ACCESS(glob15, 15); + BAD_ACCESS(glob16, 16); + BAD_ACCESS(glob17, 17); + BAD_ACCESS(glob1000, 1000); + BAD_ACCESS(glob1000, 1100); // Redzone is at least 101 bytes. + BAD_ACCESS(glob10000, 10000); + BAD_ACCESS(glob10000, 11000); // Redzone is at least 1001 bytes. + BAD_ACCESS(glob100000, 100000); + BAD_ACCESS(glob100000, 110000); // Redzone is at least 10001 bytes. +} + // Make sure that each aligned block of size "2^granularity" doesn't have // "true" value before "false" value. static void MakeShadowValid(bool *shadow, int length, int granularity) { @@ -715,7 +686,7 @@ TEST(AddressSanitizerInterface, PoisonedRegion) { // 10.50% [.] __sanitizer::mem_is_zero // I.e. mem_is_zero should consume ~ SHADOW_GRANULARITY less CPU cycles // than memset itself. -TEST(AddressSanitizerInterface, DISABLED_Stress_memset) { +TEST(AddressSanitizerInterface, DISABLED_StressLargeMemset) { size_t size = 1 << 20; char *x = new char[size]; for (int i = 0; i < 100000; i++) @@ -723,6 +694,15 @@ TEST(AddressSanitizerInterface, DISABLED_Stress_memset) { delete [] x; } +// Same here, but we run memset with small sizes. +TEST(AddressSanitizerInterface, DISABLED_StressSmallMemset) { + size_t size = 32; + char *x = new char[size]; + for (int i = 0; i < 100000000; i++) + Ident(memset)(x, 0, size); + delete [] x; +} + static const char *kInvalidPoisonMessage = "invalid-poison-memory-range"; static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range"; @@ -761,12 +741,7 @@ TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) { TEST(AddressSanitizerInterface, GetOwnershipStressTest) { std::vector<char *> pointers; std::vector<size_t> sizes; -#if ASAN_ALLOCATOR_VERSION == 1 - const size_t kNumMallocs = - (SANITIZER_WORDSIZE <= 32 || ASAN_LOW_MEMORY) ? 1 << 10 : 1 << 14; -#elif ASAN_ALLOCATOR_VERSION == 2 // too slow with asan_allocator2. :( const size_t kNumMallocs = 1 << 9; -#endif for (size_t i = 0; i < kNumMallocs; i++) { size_t size = i * 100 + 1; pointers.push_back((char*)malloc(size)); @@ -782,3 +757,38 @@ TEST(AddressSanitizerInterface, GetOwnershipStressTest) { for (size_t i = 0, n = pointers.size(); i < n; i++) free(pointers[i]); } + +TEST(AddressSanitizerInterface, CallocOverflow) { + size_t kArraySize = 4096; + volatile size_t kMaxSizeT = std::numeric_limits<size_t>::max(); + volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10; + void *p = calloc(kArraySize, kArraySize2); // Should return 0. + EXPECT_EQ(0L, Ident(p)); +} + +TEST(AddressSanitizerInterface, CallocOverflow2) { +#if SANITIZER_WORDSIZE == 32 + size_t kArraySize = 112; + volatile size_t kArraySize2 = 43878406; + void *p = calloc(kArraySize, kArraySize2); // Should return 0. + EXPECT_EQ(0L, Ident(p)); +#endif +} + +TEST(AddressSanitizerInterface, CallocReturnsZeroMem) { + size_t sizes[] = {16, 1000, 10000, 100000, 2100000}; + for (size_t s = 0; s < ARRAY_SIZE(sizes); s++) { + size_t size = sizes[s]; + for (size_t iter = 0; iter < 5; iter++) { + char *x = Ident((char*)calloc(1, size)); + EXPECT_EQ(x[0], 0); + EXPECT_EQ(x[size - 1], 0); + EXPECT_EQ(x[size / 2], 0); + EXPECT_EQ(x[size / 3], 0); + EXPECT_EQ(x[size / 4], 0); + memset(x, 0x42, size); + free(Ident(x)); + free(Ident(malloc(Ident(1 << 27)))); // Try to drain the quarantine. + } + } +} diff --git a/lib/asan/tests/asan_oob_test.cc b/lib/asan/tests/asan_oob_test.cc new file mode 100644 index 000000000000..f8343f19cfcb --- /dev/null +++ b/lib/asan/tests/asan_oob_test.cc @@ -0,0 +1,126 @@ +//===-- asan_oob_test.cc --------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// +#include "asan_test_utils.h" + +NOINLINE void asan_write_sized_aligned(uint8_t *p, size_t size) { + EXPECT_EQ(0U, ((uintptr_t)p % size)); + if (size == 1) asan_write((uint8_t*)p); + else if (size == 2) asan_write((uint16_t*)p); + else if (size == 4) asan_write((uint32_t*)p); + else if (size == 8) asan_write((uint64_t*)p); +} + +template<typename T> +NOINLINE void oob_test(int size, int off) { + char *p = (char*)malloc_aaa(size); + // fprintf(stderr, "writing %d byte(s) into [%p,%p) with offset %d\n", + // sizeof(T), p, p + size, off); + asan_write((T*)(p + off)); + free_aaa(p); +} + +template<typename T> +void OOBTest() { + char expected_str[100]; + for (int size = sizeof(T); size < 20; size += 5) { + for (int i = -5; i < 0; i++) { + const char *str = + "is located.*%d byte.*to the left"; + sprintf(expected_str, str, abs(i)); + EXPECT_DEATH(oob_test<T>(size, i), expected_str); + } + + for (int i = 0; i < (int)(size - sizeof(T) + 1); i++) + oob_test<T>(size, i); + + for (int i = size - sizeof(T) + 1; i <= (int)(size + 2 * sizeof(T)); i++) { + const char *str = + "is located.*%d byte.*to the right"; + int off = i >= size ? (i - size) : 0; + // we don't catch unaligned partially OOB accesses. + if (i % sizeof(T)) continue; + sprintf(expected_str, str, off); + EXPECT_DEATH(oob_test<T>(size, i), expected_str); + } + } + + EXPECT_DEATH(oob_test<T>(kLargeMalloc, -1), + "is located.*1 byte.*to the left"); + EXPECT_DEATH(oob_test<T>(kLargeMalloc, kLargeMalloc), + "is located.*0 byte.*to the right"); +} + +// TODO(glider): the following tests are EXTREMELY slow on Darwin: +// AddressSanitizer.OOB_char (125503 ms) +// AddressSanitizer.OOB_int (126890 ms) +// AddressSanitizer.OOBRightTest (315605 ms) +// AddressSanitizer.SimpleStackTest (366559 ms) + +TEST(AddressSanitizer, OOB_char) { + OOBTest<U1>(); +} + +TEST(AddressSanitizer, OOB_int) { + OOBTest<U4>(); +} + +TEST(AddressSanitizer, OOBRightTest) { + for (size_t access_size = 1; access_size <= 8; access_size *= 2) { + for (size_t alloc_size = 1; alloc_size <= 8; alloc_size++) { + for (size_t offset = 0; offset <= 8; offset += access_size) { + void *p = malloc(alloc_size); + // allocated: [p, p + alloc_size) + // accessed: [p + offset, p + offset + access_size) + uint8_t *addr = (uint8_t*)p + offset; + if (offset + access_size <= alloc_size) { + asan_write_sized_aligned(addr, access_size); + } else { + int outside_bytes = offset > alloc_size ? (offset - alloc_size) : 0; + const char *str = + "is located.%d *byte.*to the right"; + char expected_str[100]; + sprintf(expected_str, str, outside_bytes); + EXPECT_DEATH(asan_write_sized_aligned(addr, access_size), + expected_str); + } + free(p); + } + } + } +} + +TEST(AddressSanitizer, LargeOOBRightTest) { + size_t large_power_of_two = 1 << 19; + for (size_t i = 16; i <= 256; i *= 2) { + size_t size = large_power_of_two - i; + char *p = Ident(new char[size]); + EXPECT_DEATH(p[size] = 0, "is located 0 bytes to the right"); + delete [] p; + } +} + +TEST(AddressSanitizer, DISABLED_DemoOOBLeftLow) { + oob_test<U1>(10, -1); +} + +TEST(AddressSanitizer, DISABLED_DemoOOBLeftHigh) { + oob_test<U1>(kLargeMalloc, -1); +} + +TEST(AddressSanitizer, DISABLED_DemoOOBRightLow) { + oob_test<U1>(10, 10); +} + +TEST(AddressSanitizer, DISABLED_DemoOOBRightHigh) { + oob_test<U1>(kLargeMalloc, kLargeMalloc); +} diff --git a/lib/asan/tests/asan_str_test.cc b/lib/asan/tests/asan_str_test.cc new file mode 100644 index 000000000000..128fb61c25a9 --- /dev/null +++ b/lib/asan/tests/asan_str_test.cc @@ -0,0 +1,572 @@ +//=-- asan_str_test.cc ----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +//===----------------------------------------------------------------------===// +#include "asan_test_utils.h" + +// Used for string functions tests +static char global_string[] = "global"; +static size_t global_string_length = 6; + +// Input to a test is a zero-terminated string str with given length +// Accesses to the bytes to the left and to the right of str +// are presumed to produce OOB errors +void StrLenOOBTestTemplate(char *str, size_t length, bool is_global) { + // Normal strlen calls + EXPECT_EQ(strlen(str), length); + if (length > 0) { + EXPECT_EQ(length - 1, strlen(str + 1)); + EXPECT_EQ(0U, strlen(str + length)); + } + // Arg of strlen is not malloced, OOB access + if (!is_global) { + // We don't insert RedZones to the left of global variables + EXPECT_DEATH(Ident(strlen(str - 1)), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(strlen(str - 5)), LeftOOBReadMessage(5)); + } + EXPECT_DEATH(Ident(strlen(str + length + 1)), RightOOBReadMessage(0)); + // Overwrite terminator + str[length] = 'a'; + // String is not zero-terminated, strlen will lead to OOB access + EXPECT_DEATH(Ident(strlen(str)), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(strlen(str + length)), RightOOBReadMessage(0)); + // Restore terminator + str[length] = 0; +} +TEST(AddressSanitizer, StrLenOOBTest) { + // Check heap-allocated string + size_t length = Ident(10); + char *heap_string = Ident((char*)malloc(length + 1)); + char stack_string[10 + 1]; + break_optimization(&stack_string); + for (size_t i = 0; i < length; i++) { + heap_string[i] = 'a'; + stack_string[i] = 'b'; + } + heap_string[length] = 0; + stack_string[length] = 0; + StrLenOOBTestTemplate(heap_string, length, false); + // TODO(samsonov): Fix expected messages in StrLenOOBTestTemplate to + // make test for stack_string work. Or move it to output tests. + // StrLenOOBTestTemplate(stack_string, length, false); + StrLenOOBTestTemplate(global_string, global_string_length, true); + free(heap_string); +} + +#ifndef __APPLE__ +TEST(AddressSanitizer, StrNLenOOBTest) { + size_t size = Ident(123); + char *str = MallocAndMemsetString(size); + // Normal strnlen calls. + Ident(strnlen(str - 1, 0)); + Ident(strnlen(str, size)); + Ident(strnlen(str + size - 1, 1)); + str[size - 1] = '\0'; + Ident(strnlen(str, 2 * size)); + // Argument points to not allocated memory. + EXPECT_DEATH(Ident(strnlen(str - 1, 1)), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(strnlen(str + size, 1)), RightOOBReadMessage(0)); + // Overwrite the terminating '\0' and hit unallocated memory. + str[size - 1] = 'z'; + EXPECT_DEATH(Ident(strnlen(str, size + 1)), RightOOBReadMessage(0)); + free(str); +} +#endif + +TEST(AddressSanitizer, StrDupOOBTest) { + size_t size = Ident(42); + char *str = MallocAndMemsetString(size); + char *new_str; + // Normal strdup calls. + str[size - 1] = '\0'; + new_str = strdup(str); + free(new_str); + new_str = strdup(str + size - 1); + free(new_str); + // Argument points to not allocated memory. + EXPECT_DEATH(Ident(strdup(str - 1)), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(strdup(str + size)), RightOOBReadMessage(0)); + // Overwrite the terminating '\0' and hit unallocated memory. + str[size - 1] = 'z'; + EXPECT_DEATH(Ident(strdup(str)), RightOOBReadMessage(0)); + free(str); +} + +TEST(AddressSanitizer, StrCpyOOBTest) { + size_t to_size = Ident(30); + size_t from_size = Ident(6); // less than to_size + char *to = Ident((char*)malloc(to_size)); + char *from = Ident((char*)malloc(from_size)); + // Normal strcpy calls. + strcpy(from, "hello"); + strcpy(to, from); + strcpy(to + to_size - from_size, from); + // Length of "from" is too small. + EXPECT_DEATH(Ident(strcpy(from, "hello2")), RightOOBWriteMessage(0)); + // "to" or "from" points to not allocated memory. + EXPECT_DEATH(Ident(strcpy(to - 1, from)), LeftOOBWriteMessage(1)); + EXPECT_DEATH(Ident(strcpy(to, from - 1)), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(strcpy(to, from + from_size)), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(strcpy(to + to_size, from)), RightOOBWriteMessage(0)); + // Overwrite the terminating '\0' character and hit unallocated memory. + from[from_size - 1] = '!'; + EXPECT_DEATH(Ident(strcpy(to, from)), RightOOBReadMessage(0)); + free(to); + free(from); +} + +TEST(AddressSanitizer, StrNCpyOOBTest) { + size_t to_size = Ident(20); + size_t from_size = Ident(6); // less than to_size + char *to = Ident((char*)malloc(to_size)); + // From is a zero-terminated string "hello\0" of length 6 + char *from = Ident((char*)malloc(from_size)); + strcpy(from, "hello"); + // copy 0 bytes + strncpy(to, from, 0); + strncpy(to - 1, from - 1, 0); + // normal strncpy calls + strncpy(to, from, from_size); + strncpy(to, from, to_size); + strncpy(to, from + from_size - 1, to_size); + strncpy(to + to_size - 1, from, 1); + // One of {to, from} points to not allocated memory + EXPECT_DEATH(Ident(strncpy(to, from - 1, from_size)), + LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(strncpy(to - 1, from, from_size)), + LeftOOBWriteMessage(1)); + EXPECT_DEATH(Ident(strncpy(to, from + from_size, 1)), + RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(strncpy(to + to_size, from, 1)), + RightOOBWriteMessage(0)); + // Length of "to" is too small + EXPECT_DEATH(Ident(strncpy(to + to_size - from_size + 1, from, from_size)), + RightOOBWriteMessage(0)); + EXPECT_DEATH(Ident(strncpy(to + 1, from, to_size)), + RightOOBWriteMessage(0)); + // Overwrite terminator in from + from[from_size - 1] = '!'; + // normal strncpy call + strncpy(to, from, from_size); + // Length of "from" is too small + EXPECT_DEATH(Ident(strncpy(to, from, to_size)), + RightOOBReadMessage(0)); + free(to); + free(from); +} + +// Users may have different definitions of "strchr" and "index", so provide +// function pointer typedefs and overload RunStrChrTest implementation. +// We can't use macro for RunStrChrTest body here, as this macro would +// confuse EXPECT_DEATH gtest macro. +typedef char*(*PointerToStrChr1)(const char*, int); +typedef char*(*PointerToStrChr2)(char*, int); + +USED static void RunStrChrTest(PointerToStrChr1 StrChr) { + size_t size = Ident(100); + char *str = MallocAndMemsetString(size); + str[10] = 'q'; + str[11] = '\0'; + EXPECT_EQ(str, StrChr(str, 'z')); + EXPECT_EQ(str + 10, StrChr(str, 'q')); + EXPECT_EQ(NULL, StrChr(str, 'a')); + // StrChr argument points to not allocated memory. + EXPECT_DEATH(Ident(StrChr(str - 1, 'z')), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(StrChr(str + size, 'z')), RightOOBReadMessage(0)); + // Overwrite the terminator and hit not allocated memory. + str[11] = 'z'; + EXPECT_DEATH(Ident(StrChr(str, 'a')), RightOOBReadMessage(0)); + free(str); +} +USED static void RunStrChrTest(PointerToStrChr2 StrChr) { + size_t size = Ident(100); + char *str = MallocAndMemsetString(size); + str[10] = 'q'; + str[11] = '\0'; + EXPECT_EQ(str, StrChr(str, 'z')); + EXPECT_EQ(str + 10, StrChr(str, 'q')); + EXPECT_EQ(NULL, StrChr(str, 'a')); + // StrChr argument points to not allocated memory. + EXPECT_DEATH(Ident(StrChr(str - 1, 'z')), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(StrChr(str + size, 'z')), RightOOBReadMessage(0)); + // Overwrite the terminator and hit not allocated memory. + str[11] = 'z'; + EXPECT_DEATH(Ident(StrChr(str, 'a')), RightOOBReadMessage(0)); + free(str); +} + +TEST(AddressSanitizer, StrChrAndIndexOOBTest) { + RunStrChrTest(&strchr); + RunStrChrTest(&index); +} + +TEST(AddressSanitizer, StrCmpAndFriendsLogicTest) { + // strcmp + EXPECT_EQ(0, strcmp("", "")); + EXPECT_EQ(0, strcmp("abcd", "abcd")); + EXPECT_GT(0, strcmp("ab", "ac")); + EXPECT_GT(0, strcmp("abc", "abcd")); + EXPECT_LT(0, strcmp("acc", "abc")); + EXPECT_LT(0, strcmp("abcd", "abc")); + + // strncmp + EXPECT_EQ(0, strncmp("a", "b", 0)); + EXPECT_EQ(0, strncmp("abcd", "abcd", 10)); + EXPECT_EQ(0, strncmp("abcd", "abcef", 3)); + EXPECT_GT(0, strncmp("abcde", "abcfa", 4)); + EXPECT_GT(0, strncmp("a", "b", 5)); + EXPECT_GT(0, strncmp("bc", "bcde", 4)); + EXPECT_LT(0, strncmp("xyz", "xyy", 10)); + EXPECT_LT(0, strncmp("baa", "aaa", 1)); + EXPECT_LT(0, strncmp("zyx", "", 2)); + + // strcasecmp + EXPECT_EQ(0, strcasecmp("", "")); + EXPECT_EQ(0, strcasecmp("zzz", "zzz")); + EXPECT_EQ(0, strcasecmp("abCD", "ABcd")); + EXPECT_GT(0, strcasecmp("aB", "Ac")); + EXPECT_GT(0, strcasecmp("ABC", "ABCd")); + EXPECT_LT(0, strcasecmp("acc", "abc")); + EXPECT_LT(0, strcasecmp("ABCd", "abc")); + + // strncasecmp + EXPECT_EQ(0, strncasecmp("a", "b", 0)); + EXPECT_EQ(0, strncasecmp("abCD", "ABcd", 10)); + EXPECT_EQ(0, strncasecmp("abCd", "ABcef", 3)); + EXPECT_GT(0, strncasecmp("abcde", "ABCfa", 4)); + EXPECT_GT(0, strncasecmp("a", "B", 5)); + EXPECT_GT(0, strncasecmp("bc", "BCde", 4)); + EXPECT_LT(0, strncasecmp("xyz", "xyy", 10)); + EXPECT_LT(0, strncasecmp("Baa", "aaa", 1)); + EXPECT_LT(0, strncasecmp("zyx", "", 2)); + + // memcmp + EXPECT_EQ(0, memcmp("a", "b", 0)); + EXPECT_EQ(0, memcmp("ab\0c", "ab\0c", 4)); + EXPECT_GT(0, memcmp("\0ab", "\0ac", 3)); + EXPECT_GT(0, memcmp("abb\0", "abba", 4)); + EXPECT_LT(0, memcmp("ab\0cd", "ab\0c\0", 5)); + EXPECT_LT(0, memcmp("zza", "zyx", 3)); +} + +typedef int(*PointerToStrCmp)(const char*, const char*); +void RunStrCmpTest(PointerToStrCmp StrCmp) { + size_t size = Ident(100); + int fill = 'o'; + char *s1 = MallocAndMemsetString(size, fill); + char *s2 = MallocAndMemsetString(size, fill); + s1[size - 1] = '\0'; + s2[size - 1] = '\0'; + // Normal StrCmp calls + Ident(StrCmp(s1, s2)); + Ident(StrCmp(s1, s2 + size - 1)); + Ident(StrCmp(s1 + size - 1, s2 + size - 1)); + s1[size - 1] = 'z'; + s2[size - 1] = 'x'; + Ident(StrCmp(s1, s2)); + // One of arguments points to not allocated memory. + EXPECT_DEATH(Ident(StrCmp)(s1 - 1, s2), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(StrCmp)(s1, s2 - 1), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(StrCmp)(s1 + size, s2), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(StrCmp)(s1, s2 + size), RightOOBReadMessage(0)); + // Hit unallocated memory and die. + s1[size - 1] = fill; + EXPECT_DEATH(Ident(StrCmp)(s1, s1), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(StrCmp)(s1 + size - 1, s2), RightOOBReadMessage(0)); + free(s1); + free(s2); +} + +TEST(AddressSanitizer, StrCmpOOBTest) { + RunStrCmpTest(&strcmp); +} + +TEST(AddressSanitizer, StrCaseCmpOOBTest) { + RunStrCmpTest(&strcasecmp); +} + +typedef int(*PointerToStrNCmp)(const char*, const char*, size_t); +void RunStrNCmpTest(PointerToStrNCmp StrNCmp) { + size_t size = Ident(100); + char *s1 = MallocAndMemsetString(size); + char *s2 = MallocAndMemsetString(size); + s1[size - 1] = '\0'; + s2[size - 1] = '\0'; + // Normal StrNCmp calls + Ident(StrNCmp(s1, s2, size + 2)); + s1[size - 1] = 'z'; + s2[size - 1] = 'x'; + Ident(StrNCmp(s1 + size - 2, s2 + size - 2, size)); + s2[size - 1] = 'z'; + Ident(StrNCmp(s1 - 1, s2 - 1, 0)); + Ident(StrNCmp(s1 + size - 1, s2 + size - 1, 1)); + // One of arguments points to not allocated memory. + EXPECT_DEATH(Ident(StrNCmp)(s1 - 1, s2, 1), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(StrNCmp)(s1, s2 - 1, 1), LeftOOBReadMessage(1)); + EXPECT_DEATH(Ident(StrNCmp)(s1 + size, s2, 1), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(StrNCmp)(s1, s2 + size, 1), RightOOBReadMessage(0)); + // Hit unallocated memory and die. + EXPECT_DEATH(Ident(StrNCmp)(s1 + 1, s2 + 1, size), RightOOBReadMessage(0)); + EXPECT_DEATH(Ident(StrNCmp)(s1 + size - 1, s2, 2), RightOOBReadMessage(0)); + free(s1); + free(s2); +} + +TEST(AddressSanitizer, StrNCmpOOBTest) { + RunStrNCmpTest(&strncmp); +} + +TEST(AddressSanitizer, StrNCaseCmpOOBTest) { + RunStrNCmpTest(&strncasecmp); +} +TEST(AddressSanitizer, StrCatOOBTest) { + // strcat() reads strlen(to) bytes from |to| before concatenating. + size_t to_size = Ident(100); + char *to = MallocAndMemsetString(to_size); + to[0] = '\0'; + size_t from_size = Ident(20); + char *from = MallocAndMemsetString(from_size); + from[from_size - 1] = '\0'; + // Normal strcat calls. + strcat(to, from); + strcat(to, from); + strcat(to + from_size, from + from_size - 2); + // Passing an invalid pointer is an error even when concatenating an empty + // string. + EXPECT_DEATH(strcat(to - 1, from + from_size - 1), LeftOOBAccessMessage(1)); + // One of arguments points to not allocated memory. + EXPECT_DEATH(strcat(to - 1, from), LeftOOBAccessMessage(1)); + EXPECT_DEATH(strcat(to, from - 1), LeftOOBReadMessage(1)); + EXPECT_DEATH(strcat(to + to_size, from), RightOOBWriteMessage(0)); + EXPECT_DEATH(strcat(to, from + from_size), RightOOBReadMessage(0)); + + // "from" is not zero-terminated. + from[from_size - 1] = 'z'; + EXPECT_DEATH(strcat(to, from), RightOOBReadMessage(0)); + from[from_size - 1] = '\0'; + // "to" is not zero-terminated. + memset(to, 'z', to_size); + EXPECT_DEATH(strcat(to, from), RightOOBWriteMessage(0)); + // "to" is too short to fit "from". + to[to_size - from_size + 1] = '\0'; + EXPECT_DEATH(strcat(to, from), RightOOBWriteMessage(0)); + // length of "to" is just enough. + strcat(to, from + 1); + + free(to); + free(from); +} + +TEST(AddressSanitizer, StrNCatOOBTest) { + // strncat() reads strlen(to) bytes from |to| before concatenating. + size_t to_size = Ident(100); + char *to = MallocAndMemsetString(to_size); + to[0] = '\0'; + size_t from_size = Ident(20); + char *from = MallocAndMemsetString(from_size); + // Normal strncat calls. + strncat(to, from, 0); + strncat(to, from, from_size); + from[from_size - 1] = '\0'; + strncat(to, from, 2 * from_size); + // Catenating empty string with an invalid string is still an error. + EXPECT_DEATH(strncat(to - 1, from, 0), LeftOOBAccessMessage(1)); + strncat(to, from + from_size - 1, 10); + // One of arguments points to not allocated memory. + EXPECT_DEATH(strncat(to - 1, from, 2), LeftOOBAccessMessage(1)); + EXPECT_DEATH(strncat(to, from - 1, 2), LeftOOBReadMessage(1)); + EXPECT_DEATH(strncat(to + to_size, from, 2), RightOOBWriteMessage(0)); + EXPECT_DEATH(strncat(to, from + from_size, 2), RightOOBReadMessage(0)); + + memset(from, 'z', from_size); + memset(to, 'z', to_size); + to[0] = '\0'; + // "from" is too short. + EXPECT_DEATH(strncat(to, from, from_size + 1), RightOOBReadMessage(0)); + // "to" is not zero-terminated. + EXPECT_DEATH(strncat(to + 1, from, 1), RightOOBWriteMessage(0)); + // "to" is too short to fit "from". + to[0] = 'z'; + to[to_size - from_size + 1] = '\0'; + EXPECT_DEATH(strncat(to, from, from_size - 1), RightOOBWriteMessage(0)); + // "to" is just enough. + strncat(to, from, from_size - 2); + + free(to); + free(from); +} + +static string OverlapErrorMessage(const string &func) { + return func + "-param-overlap"; +} + +TEST(AddressSanitizer, StrArgsOverlapTest) { + size_t size = Ident(100); + char *str = Ident((char*)malloc(size)); + +// Do not check memcpy() on OS X 10.7 and later, where it actually aliases +// memmove(). +#if !defined(__APPLE__) || !defined(MAC_OS_X_VERSION_10_7) || \ + (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7) + // Check "memcpy". Use Ident() to avoid inlining. + memset(str, 'z', size); + Ident(memcpy)(str + 1, str + 11, 10); + Ident(memcpy)(str, str, 0); + EXPECT_DEATH(Ident(memcpy)(str, str + 14, 15), OverlapErrorMessage("memcpy")); + EXPECT_DEATH(Ident(memcpy)(str + 14, str, 15), OverlapErrorMessage("memcpy")); +#endif + + // We do not treat memcpy with to==from as a bug. + // See http://llvm.org/bugs/show_bug.cgi?id=11763. + // EXPECT_DEATH(Ident(memcpy)(str + 20, str + 20, 1), + // OverlapErrorMessage("memcpy")); + + // Check "strcpy". + memset(str, 'z', size); + str[9] = '\0'; + strcpy(str + 10, str); + EXPECT_DEATH(strcpy(str + 9, str), OverlapErrorMessage("strcpy")); + EXPECT_DEATH(strcpy(str, str + 4), OverlapErrorMessage("strcpy")); + strcpy(str, str + 5); + + // Check "strncpy". + memset(str, 'z', size); + strncpy(str, str + 10, 10); + EXPECT_DEATH(strncpy(str, str + 9, 10), OverlapErrorMessage("strncpy")); + EXPECT_DEATH(strncpy(str + 9, str, 10), OverlapErrorMessage("strncpy")); + str[10] = '\0'; + strncpy(str + 11, str, 20); + EXPECT_DEATH(strncpy(str + 10, str, 20), OverlapErrorMessage("strncpy")); + + // Check "strcat". + memset(str, 'z', size); + str[10] = '\0'; + str[20] = '\0'; + strcat(str, str + 10); + EXPECT_DEATH(strcat(str, str + 11), OverlapErrorMessage("strcat")); + str[10] = '\0'; + strcat(str + 11, str); + EXPECT_DEATH(strcat(str, str + 9), OverlapErrorMessage("strcat")); + EXPECT_DEATH(strcat(str + 9, str), OverlapErrorMessage("strcat")); + EXPECT_DEATH(strcat(str + 10, str), OverlapErrorMessage("strcat")); + + // Check "strncat". + memset(str, 'z', size); + str[10] = '\0'; + strncat(str, str + 10, 10); // from is empty + EXPECT_DEATH(strncat(str, str + 11, 10), OverlapErrorMessage("strncat")); + str[10] = '\0'; + str[20] = '\0'; + strncat(str + 5, str, 5); + str[10] = '\0'; + EXPECT_DEATH(strncat(str + 5, str, 6), OverlapErrorMessage("strncat")); + EXPECT_DEATH(strncat(str, str + 9, 10), OverlapErrorMessage("strncat")); + + free(str); +} + +void CallAtoi(const char *nptr) { + Ident(atoi(nptr)); +} +void CallAtol(const char *nptr) { + Ident(atol(nptr)); +} +void CallAtoll(const char *nptr) { + Ident(atoll(nptr)); +} +typedef void(*PointerToCallAtoi)(const char*); + +void RunAtoiOOBTest(PointerToCallAtoi Atoi) { + char *array = MallocAndMemsetString(10, '1'); + // Invalid pointer to the string. + EXPECT_DEATH(Atoi(array + 11), RightOOBReadMessage(1)); + EXPECT_DEATH(Atoi(array - 1), LeftOOBReadMessage(1)); + // Die if a buffer doesn't have terminating NULL. + EXPECT_DEATH(Atoi(array), RightOOBReadMessage(0)); + // Make last symbol a terminating NULL or other non-digit. + array[9] = '\0'; + Atoi(array); + array[9] = 'a'; + Atoi(array); + Atoi(array + 9); + // Sometimes we need to detect overflow if no digits are found. + memset(array, ' ', 10); + EXPECT_DEATH(Atoi(array), RightOOBReadMessage(0)); + array[9] = '-'; + EXPECT_DEATH(Atoi(array), RightOOBReadMessage(0)); + EXPECT_DEATH(Atoi(array + 9), RightOOBReadMessage(0)); + array[8] = '-'; + Atoi(array); + free(array); +} + +TEST(AddressSanitizer, AtoiAndFriendsOOBTest) { + RunAtoiOOBTest(&CallAtoi); + RunAtoiOOBTest(&CallAtol); + RunAtoiOOBTest(&CallAtoll); +} + +void CallStrtol(const char *nptr, char **endptr, int base) { + Ident(strtol(nptr, endptr, base)); +} +void CallStrtoll(const char *nptr, char **endptr, int base) { + Ident(strtoll(nptr, endptr, base)); +} +typedef void(*PointerToCallStrtol)(const char*, char**, int); + +void RunStrtolOOBTest(PointerToCallStrtol Strtol) { + char *array = MallocAndMemsetString(3); + char *endptr = NULL; + array[0] = '1'; + array[1] = '2'; + array[2] = '3'; + // Invalid pointer to the string. + EXPECT_DEATH(Strtol(array + 3, NULL, 0), RightOOBReadMessage(0)); + EXPECT_DEATH(Strtol(array - 1, NULL, 0), LeftOOBReadMessage(1)); + // Buffer overflow if there is no terminating null (depends on base). + Strtol(array, &endptr, 3); + EXPECT_EQ(array + 2, endptr); + EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0)); + array[2] = 'z'; + Strtol(array, &endptr, 35); + EXPECT_EQ(array + 2, endptr); + EXPECT_DEATH(Strtol(array, NULL, 36), RightOOBReadMessage(0)); + // Add terminating zero to get rid of overflow. + array[2] = '\0'; + Strtol(array, NULL, 36); + // Don't check for overflow if base is invalid. + Strtol(array - 1, NULL, -1); + Strtol(array + 3, NULL, 1); + // Sometimes we need to detect overflow if no digits are found. + array[0] = array[1] = array[2] = ' '; + EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0)); + array[2] = '+'; + EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0)); + array[2] = '-'; + EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0)); + array[1] = '+'; + Strtol(array, NULL, 0); + array[1] = array[2] = 'z'; + Strtol(array, &endptr, 0); + EXPECT_EQ(array, endptr); + Strtol(array + 2, NULL, 0); + EXPECT_EQ(array, endptr); + free(array); +} + +TEST(AddressSanitizer, StrtollOOBTest) { + RunStrtolOOBTest(&CallStrtoll); +} +TEST(AddressSanitizer, StrtolOOBTest) { + RunStrtolOOBTest(&CallStrtol); +} + + diff --git a/lib/asan/tests/asan_test.cc b/lib/asan/tests/asan_test.cc index 5fa65b2af5dc..5ae525de9876 100644 --- a/lib/asan/tests/asan_test.cc +++ b/lib/asan/tests/asan_test.cc @@ -10,69 +10,8 @@ // This file is a part of AddressSanitizer, an address sanity checker. // //===----------------------------------------------------------------------===// -#include <stdio.h> -#include <signal.h> -#include <stdlib.h> -#include <string.h> -#include <strings.h> -#include <pthread.h> -#include <stdint.h> -#include <setjmp.h> -#include <assert.h> -#include <algorithm> - -#ifdef __linux__ -# include <sys/prctl.h> -# include <sys/types.h> -# include <sys/stat.h> -# include <fcntl.h> -#include <unistd.h> -#endif - -#if defined(__i386__) || defined(__x86_64__) -#include <emmintrin.h> -#endif - #include "asan_test_utils.h" -#ifndef __APPLE__ -#include <malloc.h> -#else -#include <malloc/malloc.h> -#include <AvailabilityMacros.h> // For MAC_OS_X_VERSION_* -#include <CoreFoundation/CFString.h> -#endif // __APPLE__ - -#if ASAN_HAS_EXCEPTIONS -# define ASAN_THROW(x) throw (x) -#else -# define ASAN_THROW(x) -#endif - -#include <sys/mman.h> - -typedef uint8_t U1; -typedef uint16_t U2; -typedef uint32_t U4; -typedef uint64_t U8; - -static const int kPageSize = 4096; - -const size_t kLargeMalloc = 1 << 24; - -template<typename T> -NOINLINE void asan_write(T *a) { - *a = 0; -} - -NOINLINE void asan_write_sized_aligned(uint8_t *p, size_t size) { - EXPECT_EQ(0U, ((uintptr_t)p % size)); - if (size == 1) asan_write((uint8_t*)p); - else if (size == 2) asan_write((uint16_t*)p); - else if (size == 4) asan_write((uint32_t*)p); - else if (size == 8) asan_write((uint64_t*)p); -} - NOINLINE void *malloc_fff(size_t size) { void *res = malloc/**/(size); break_optimization(0); return res;} NOINLINE void *malloc_eee(size_t size) { @@ -106,15 +45,6 @@ NOINLINE void free_ccc(void *p) { free(p); break_optimization(0);} NOINLINE void free_bbb(void *p) { free_ccc(p); break_optimization(0);} NOINLINE void free_aaa(void *p) { free_bbb(p); break_optimization(0);} -template<typename T> -NOINLINE void oob_test(int size, int off) { - char *p = (char*)malloc_aaa(size); - // fprintf(stderr, "writing %d byte(s) into [%p,%p) with offset %d\n", - // sizeof(T), p, p + size, off); - asan_write((T*)(p + off)); - free_aaa(p); -} - template<typename T> NOINLINE void uaf_test(int size, int off) { @@ -231,88 +161,6 @@ TEST(AddressSanitizer, DISABLED_TSDTest) { pthread_key_delete(test_key); } -template<typename T> -void OOBTest() { - char expected_str[100]; - for (int size = sizeof(T); size < 20; size += 5) { - for (int i = -5; i < 0; i++) { - const char *str = - "is located.*%d byte.*to the left"; - sprintf(expected_str, str, abs(i)); - EXPECT_DEATH(oob_test<T>(size, i), expected_str); - } - - for (int i = 0; i < (int)(size - sizeof(T) + 1); i++) - oob_test<T>(size, i); - - for (int i = size - sizeof(T) + 1; i <= (int)(size + 2 * sizeof(T)); i++) { - const char *str = - "is located.*%d byte.*to the right"; - int off = i >= size ? (i - size) : 0; - // we don't catch unaligned partially OOB accesses. - if (i % sizeof(T)) continue; - sprintf(expected_str, str, off); - EXPECT_DEATH(oob_test<T>(size, i), expected_str); - } - } - - EXPECT_DEATH(oob_test<T>(kLargeMalloc, -1), - "is located.*1 byte.*to the left"); - EXPECT_DEATH(oob_test<T>(kLargeMalloc, kLargeMalloc), - "is located.*0 byte.*to the right"); -} - -// TODO(glider): the following tests are EXTREMELY slow on Darwin: -// AddressSanitizer.OOB_char (125503 ms) -// AddressSanitizer.OOB_int (126890 ms) -// AddressSanitizer.OOBRightTest (315605 ms) -// AddressSanitizer.SimpleStackTest (366559 ms) - -TEST(AddressSanitizer, OOB_char) { - OOBTest<U1>(); -} - -TEST(AddressSanitizer, OOB_int) { - OOBTest<U4>(); -} - -TEST(AddressSanitizer, OOBRightTest) { - for (size_t access_size = 1; access_size <= 8; access_size *= 2) { - for (size_t alloc_size = 1; alloc_size <= 8; alloc_size++) { - for (size_t offset = 0; offset <= 8; offset += access_size) { - void *p = malloc(alloc_size); - // allocated: [p, p + alloc_size) - // accessed: [p + offset, p + offset + access_size) - uint8_t *addr = (uint8_t*)p + offset; - if (offset + access_size <= alloc_size) { - asan_write_sized_aligned(addr, access_size); - } else { - int outside_bytes = offset > alloc_size ? (offset - alloc_size) : 0; - const char *str = - "is located.%d *byte.*to the right"; - char expected_str[100]; - sprintf(expected_str, str, outside_bytes); - EXPECT_DEATH(asan_write_sized_aligned(addr, access_size), - expected_str); - } - free(p); - } - } - } -} - -#if ASAN_ALLOCATOR_VERSION == 2 // Broken with the asan_allocator1 -TEST(AddressSanitizer, LargeOOBRightTest) { - size_t large_power_of_two = 1 << 19; - for (size_t i = 16; i <= 256; i *= 2) { - size_t size = large_power_of_two - i; - char *p = Ident(new char[size]); - EXPECT_DEATH(p[size] = 0, "is located 0 bytes to the right"); - delete [] p; - } -} -#endif // ASAN_ALLOCATOR_VERSION == 2 - TEST(AddressSanitizer, UAF_char) { const char *uaf_string = "AddressSanitizer:.*heap-use-after-free"; EXPECT_DEATH(uaf_test<U1>(1, 0), uaf_string); @@ -322,6 +170,27 @@ TEST(AddressSanitizer, UAF_char) { EXPECT_DEATH(uaf_test<U1>(kLargeMalloc, kLargeMalloc / 2), uaf_string); } +TEST(AddressSanitizer, UAF_long_double) { + if (sizeof(long double) == sizeof(double)) return; + long double *p = Ident(new long double[10]); + EXPECT_DEATH(Ident(p)[12] = 0, "WRITE of size 1[06]"); + EXPECT_DEATH(Ident(p)[0] = Ident(p)[12], "READ of size 1[06]"); + delete [] Ident(p); +} + +struct Packed5 { + int x; + char c; +} __attribute__((packed)); + + +TEST(AddressSanitizer, UAF_Packed5) { + Packed5 *p = Ident(new Packed5[2]); + EXPECT_DEATH(p[0] = p[3], "READ of size 5"); + EXPECT_DEATH(p[3] = p[0], "WRITE of size 5"); + delete [] Ident(p); +} + #if ASAN_HAS_BLACKLIST TEST(AddressSanitizer, IgnoreTest) { int *x = Ident(new int); @@ -408,41 +277,6 @@ TEST(AddressSanitizer, SignalTest) { } // namespace #endif -static void MallocStress(size_t n) { - uint32_t seed = my_rand(); - for (size_t iter = 0; iter < 10; iter++) { - vector<void *> vec; - for (size_t i = 0; i < n; i++) { - if ((i % 3) == 0) { - if (vec.empty()) continue; - size_t idx = my_rand_r(&seed) % vec.size(); - void *ptr = vec[idx]; - vec[idx] = vec.back(); - vec.pop_back(); - free_aaa(ptr); - } else { - size_t size = my_rand_r(&seed) % 1000 + 1; -#ifndef __APPLE__ - size_t alignment = 1 << (my_rand_r(&seed) % 7 + 3); - char *ptr = (char*)memalign_aaa(alignment, size); -#else - char *ptr = (char*) malloc_aaa(size); -#endif - vec.push_back(ptr); - ptr[0] = 0; - ptr[size-1] = 0; - ptr[size/2] = 0; - } - } - for (size_t i = 0; i < vec.size(); i++) - free_aaa(vec[i]); - } -} - -TEST(AddressSanitizer, MallocStressTest) { - MallocStress((ASAN_LOW_MEMORY) ? 20000 : 200000); -} - static void TestLargeMalloc(size_t size) { char buff[1024]; sprintf(buff, "is located 1 bytes to the left of %lu-byte", (long)size); @@ -450,26 +284,17 @@ static void TestLargeMalloc(size_t size) { } TEST(AddressSanitizer, LargeMallocTest) { - for (int i = 113; i < (1 << 28); i = i * 2 + 13) { + const int max_size = (SANITIZER_WORDSIZE == 32) ? 1 << 26 : 1 << 28; + for (int i = 113; i < max_size; i = i * 2 + 13) { TestLargeMalloc(i); } } -#if ASAN_LOW_MEMORY != 1 TEST(AddressSanitizer, HugeMallocTest) { -#ifdef __APPLE__ - // It was empirically found out that 1215 megabytes is the maximum amount of - // memory available to the process under AddressSanitizer on 32-bit Mac 10.6. - // 32-bit Mac 10.7 gives even less (< 1G). - // (the libSystem malloc() allows allocating up to 2300 megabytes without - // ASan). - size_t n_megs = SANITIZER_WORDSIZE == 32 ? 500 : 4100; -#else - size_t n_megs = SANITIZER_WORDSIZE == 32 ? 2600 : 4100; -#endif + if (SANITIZER_WORDSIZE != 64) return; + size_t n_megs = 4100; TestLargeMalloc(n_megs << 20); } -#endif #ifndef __APPLE__ void MemalignRun(size_t align, size_t size, int idx) { @@ -489,19 +314,6 @@ TEST(AddressSanitizer, memalign) { } #endif -TEST(AddressSanitizer, ThreadedMallocStressTest) { - const int kNumThreads = 4; - const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000; - pthread_t t[kNumThreads]; - for (int i = 0; i < kNumThreads; i++) { - PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress, - (void*)kNumIterations); - } - for (int i = 0; i < kNumThreads; i++) { - PTHREAD_JOIN(t[i], 0); - } -} - void *ManyThreadsWorker(void *a) { for (int iter = 0; iter < 100; iter++) { for (size_t size = 100; size < 2000; size *= 2) { @@ -532,6 +344,45 @@ TEST(AddressSanitizer, ReallocTest) { (my_rand() % 1000 + kMinElem) * sizeof(int)); EXPECT_EQ(3, ptr[3]); } + free(ptr); + // Realloc pointer returned by malloc(0). + int *ptr2 = Ident((int*)malloc(0)); + ptr2 = Ident((int*)realloc(ptr2, sizeof(*ptr2))); + *ptr2 = 42; + EXPECT_EQ(42, *ptr2); + free(ptr2); +} + +TEST(AddressSanitizer, ReallocFreedPointerTest) { + void *ptr = Ident(malloc(42)); + ASSERT_TRUE(NULL != ptr); + free(ptr); + EXPECT_DEATH(ptr = realloc(ptr, 77), "attempting double-free"); +} + +TEST(AddressSanitizer, ReallocInvalidPointerTest) { + void *ptr = Ident(malloc(42)); + EXPECT_DEATH(ptr = realloc((int*)ptr + 1, 77), "attempting free.*not malloc"); +} + +TEST(AddressSanitizer, ZeroSizeMallocTest) { + // Test that malloc(0) and similar functions don't return NULL. + void *ptr = Ident(malloc(0)); + EXPECT_TRUE(NULL != ptr); + free(ptr); +#if !defined(__APPLE__) && !defined(ANDROID) && !defined(__ANDROID__) + int pm_res = posix_memalign(&ptr, 1<<20, 0); + EXPECT_EQ(0, pm_res); + EXPECT_TRUE(NULL != ptr); + free(ptr); +#endif + int *int_ptr = new int[0]; + int *int_ptr2 = new int[0]; + EXPECT_TRUE(NULL != int_ptr); + EXPECT_TRUE(NULL != int_ptr2); + EXPECT_NE(int_ptr, int_ptr2); + delete[] int_ptr; + delete[] int_ptr2; } #ifndef __APPLE__ @@ -561,8 +412,10 @@ void WrongFree() { } TEST(AddressSanitizer, WrongFreeTest) { - EXPECT_DEATH(WrongFree(), - "ERROR: AddressSanitizer: attempting free.*not malloc"); + EXPECT_DEATH(WrongFree(), ASAN_PCRE_DOTALL + "ERROR: AddressSanitizer: attempting free.*not malloc" + ".*is located 4 bytes inside of 400-byte region" + ".*allocated by thread"); } void DoubleFree() { @@ -624,6 +477,9 @@ TEST(AddressSanitizer, ManyStackObjectsTest) { EXPECT_DEATH(Ident(ZZZ)[-1] = 0, ASAN_PCRE_DOTALL "XXX.*YYY.*ZZZ"); } +#if 0 // This test requires online symbolizer. +// Moved to lit_tests/stack-oob-frames.cc. +// Reenable here once we have online symbolizer by default. NOINLINE static void Frame0(int frame, char *a, char *b, char *c) { char d[4] = {0}; char *D = Ident(d); @@ -659,6 +515,7 @@ TEST(AddressSanitizer, GuiltyStackFrame2Test) { TEST(AddressSanitizer, GuiltyStackFrame3Test) { EXPECT_DEATH(Frame3(3), "located .*in frame <.*Frame3"); } +#endif NOINLINE void LongJmpFunc1(jmp_buf buf) { // create three red zones for these two stack objects. @@ -722,7 +579,10 @@ TEST(AddressSanitizer, LongJmpTest) { } } -#if not defined(__ANDROID__) +#if !defined(__ANDROID__) && \ + !defined(__powerpc64__) && !defined(__powerpc__) +// Does not work on Power: +// https://code.google.com/p/address-sanitizer/issues/detail?id=185 TEST(AddressSanitizer, BuiltinLongJmpTest) { static jmp_buf buf; if (!__builtin_setjmp((void**)buf)) { @@ -817,23 +677,24 @@ TEST(AddressSanitizer, Store128Test) { } #endif -static string RightOOBErrorMessage(int oob_distance, bool is_write) { +string RightOOBErrorMessage(int oob_distance, bool is_write) { assert(oob_distance >= 0); char expected_str[100]; - sprintf(expected_str, ASAN_PCRE_DOTALL "%s.*located %d bytes to the right", + sprintf(expected_str, ASAN_PCRE_DOTALL + "buffer-overflow.*%s.*located %d bytes to the right", is_write ? "WRITE" : "READ", oob_distance); return string(expected_str); } -static string RightOOBWriteMessage(int oob_distance) { +string RightOOBWriteMessage(int oob_distance) { return RightOOBErrorMessage(oob_distance, /*is_write*/true); } -static string RightOOBReadMessage(int oob_distance) { +string RightOOBReadMessage(int oob_distance) { return RightOOBErrorMessage(oob_distance, /*is_write*/false); } -static string LeftOOBErrorMessage(int oob_distance, bool is_write) { +string LeftOOBErrorMessage(int oob_distance, bool is_write) { assert(oob_distance > 0); char expected_str[100]; sprintf(expected_str, ASAN_PCRE_DOTALL "%s.*located %d bytes to the left", @@ -841,830 +702,29 @@ static string LeftOOBErrorMessage(int oob_distance, bool is_write) { return string(expected_str); } -static string LeftOOBWriteMessage(int oob_distance) { +string LeftOOBWriteMessage(int oob_distance) { return LeftOOBErrorMessage(oob_distance, /*is_write*/true); } -static string LeftOOBReadMessage(int oob_distance) { +string LeftOOBReadMessage(int oob_distance) { return LeftOOBErrorMessage(oob_distance, /*is_write*/false); } -static string LeftOOBAccessMessage(int oob_distance) { +string LeftOOBAccessMessage(int oob_distance) { assert(oob_distance > 0); char expected_str[100]; sprintf(expected_str, "located %d bytes to the left", oob_distance); return string(expected_str); } -template<typename T> -void MemSetOOBTestTemplate(size_t length) { - if (length == 0) return; - size_t size = Ident(sizeof(T) * length); - T *array = Ident((T*)malloc(size)); - int element = Ident(42); - int zero = Ident(0); - void *(*MEMSET)(void *s, int c, size_t n) = Ident(memset); - // memset interval inside array - MEMSET(array, element, size); - MEMSET(array, element, size - 1); - MEMSET(array + length - 1, element, sizeof(T)); - MEMSET(array, element, 1); - - // memset 0 bytes - MEMSET(array - 10, element, zero); - MEMSET(array - 1, element, zero); - MEMSET(array, element, zero); - MEMSET(array + length, 0, zero); - MEMSET(array + length + 1, 0, zero); - - // try to memset bytes to the right of array - EXPECT_DEATH(MEMSET(array, 0, size + 1), - RightOOBWriteMessage(0)); - EXPECT_DEATH(MEMSET((char*)(array + length) - 1, element, 6), - RightOOBWriteMessage(0)); - EXPECT_DEATH(MEMSET(array + 1, element, size + sizeof(T)), - RightOOBWriteMessage(0)); - // whole interval is to the right - EXPECT_DEATH(MEMSET(array + length + 1, 0, 10), - RightOOBWriteMessage(sizeof(T))); - - // try to memset bytes to the left of array - EXPECT_DEATH(MEMSET((char*)array - 1, element, size), - LeftOOBWriteMessage(1)); - EXPECT_DEATH(MEMSET((char*)array - 5, 0, 6), - LeftOOBWriteMessage(5)); - if (length >= 100) { - // Large OOB, we find it only if the redzone is large enough. - EXPECT_DEATH(memset(array - 5, element, size + 5 * sizeof(T)), - LeftOOBWriteMessage(5 * sizeof(T))); - } - // whole interval is to the left - EXPECT_DEATH(MEMSET(array - 2, 0, sizeof(T)), - LeftOOBWriteMessage(2 * sizeof(T))); - - // try to memset bytes both to the left & to the right - EXPECT_DEATH(MEMSET((char*)array - 2, element, size + 4), - LeftOOBWriteMessage(2)); - - free(array); -} - -TEST(AddressSanitizer, MemSetOOBTest) { - MemSetOOBTestTemplate<char>(100); - MemSetOOBTestTemplate<int>(5); - MemSetOOBTestTemplate<double>(256); - // We can test arrays of structres/classes here, but what for? -} - -// Try to allocate two arrays of 'size' bytes that are near each other. -// Strictly speaking we are not guaranteed to find such two pointers, -// but given the structure of asan's allocator we will. -static bool AllocateTwoAdjacentArrays(char **x1, char **x2, size_t size) { - vector<char *> v; - bool res = false; - for (size_t i = 0; i < 1000U && !res; i++) { - v.push_back(new char[size]); - if (i == 0) continue; - sort(v.begin(), v.end()); - for (size_t j = 1; j < v.size(); j++) { - assert(v[j] > v[j-1]); - if ((size_t)(v[j] - v[j-1]) < size * 2) { - *x2 = v[j]; - *x1 = v[j-1]; - res = true; - break; - } - } - } - - for (size_t i = 0; i < v.size(); i++) { - if (res && v[i] == *x1) continue; - if (res && v[i] == *x2) continue; - delete [] v[i]; - } - return res; -} - -TEST(AddressSanitizer, LargeOOBInMemset) { - for (size_t size = 200; size < 100000; size += size / 2) { - char *x1, *x2; - if (!Ident(AllocateTwoAdjacentArrays)(&x1, &x2, size)) - continue; - // fprintf(stderr, " large oob memset: %p %p %zd\n", x1, x2, size); - // Do a memset on x1 with huge out-of-bound access that will end up in x2. - EXPECT_DEATH(Ident(memset)(x1, 0, size * 2), - "is located 0 bytes to the right"); - delete [] x1; - delete [] x2; - return; - } - assert(0 && "Did not find two adjacent malloc-ed pointers"); -} - -// Same test for memcpy and memmove functions -template <typename T, class M> -void MemTransferOOBTestTemplate(size_t length) { - if (length == 0) return; - size_t size = Ident(sizeof(T) * length); - T *src = Ident((T*)malloc(size)); - T *dest = Ident((T*)malloc(size)); - int zero = Ident(0); - - // valid transfer of bytes between arrays - M::transfer(dest, src, size); - M::transfer(dest + 1, src, size - sizeof(T)); - M::transfer(dest, src + length - 1, sizeof(T)); - M::transfer(dest, src, 1); - - // transfer zero bytes - M::transfer(dest - 1, src, 0); - M::transfer(dest + length, src, zero); - M::transfer(dest, src - 1, zero); - M::transfer(dest, src, zero); - - // try to change mem to the right of dest - EXPECT_DEATH(M::transfer(dest + 1, src, size), - RightOOBWriteMessage(0)); - EXPECT_DEATH(M::transfer((char*)(dest + length) - 1, src, 5), - RightOOBWriteMessage(0)); - - // try to change mem to the left of dest - EXPECT_DEATH(M::transfer(dest - 2, src, size), - LeftOOBWriteMessage(2 * sizeof(T))); - EXPECT_DEATH(M::transfer((char*)dest - 3, src, 4), - LeftOOBWriteMessage(3)); - - // try to access mem to the right of src - EXPECT_DEATH(M::transfer(dest, src + 2, size), - RightOOBReadMessage(0)); - EXPECT_DEATH(M::transfer(dest, (char*)(src + length) - 3, 6), - RightOOBReadMessage(0)); - - // try to access mem to the left of src - EXPECT_DEATH(M::transfer(dest, src - 1, size), - LeftOOBReadMessage(sizeof(T))); - EXPECT_DEATH(M::transfer(dest, (char*)src - 6, 7), - LeftOOBReadMessage(6)); - - // Generally we don't need to test cases where both accessing src and writing - // to dest address to poisoned memory. - - T *big_src = Ident((T*)malloc(size * 2)); - T *big_dest = Ident((T*)malloc(size * 2)); - // try to change mem to both sides of dest - EXPECT_DEATH(M::transfer(dest - 1, big_src, size * 2), - LeftOOBWriteMessage(sizeof(T))); - // try to access mem to both sides of src - EXPECT_DEATH(M::transfer(big_dest, src - 2, size * 2), - LeftOOBReadMessage(2 * sizeof(T))); - - free(src); - free(dest); - free(big_src); - free(big_dest); -} - -class MemCpyWrapper { - public: - static void* transfer(void *to, const void *from, size_t size) { - return Ident(memcpy)(to, from, size); - } -}; -TEST(AddressSanitizer, MemCpyOOBTest) { - MemTransferOOBTestTemplate<char, MemCpyWrapper>(100); - MemTransferOOBTestTemplate<int, MemCpyWrapper>(1024); -} - -class MemMoveWrapper { - public: - static void* transfer(void *to, const void *from, size_t size) { - return Ident(memmove)(to, from, size); - } -}; -TEST(AddressSanitizer, MemMoveOOBTest) { - MemTransferOOBTestTemplate<char, MemMoveWrapper>(100); - MemTransferOOBTestTemplate<int, MemMoveWrapper>(1024); -} - -// Tests for string functions - -// Used for string functions tests -static char global_string[] = "global"; -static size_t global_string_length = 6; - -// Input to a test is a zero-terminated string str with given length -// Accesses to the bytes to the left and to the right of str -// are presumed to produce OOB errors -void StrLenOOBTestTemplate(char *str, size_t length, bool is_global) { - // Normal strlen calls - EXPECT_EQ(strlen(str), length); - if (length > 0) { - EXPECT_EQ(length - 1, strlen(str + 1)); - EXPECT_EQ(0U, strlen(str + length)); - } - // Arg of strlen is not malloced, OOB access - if (!is_global) { - // We don't insert RedZones to the left of global variables - EXPECT_DEATH(Ident(strlen(str - 1)), LeftOOBReadMessage(1)); - EXPECT_DEATH(Ident(strlen(str - 5)), LeftOOBReadMessage(5)); - } - EXPECT_DEATH(Ident(strlen(str + length + 1)), RightOOBReadMessage(0)); - // Overwrite terminator - str[length] = 'a'; - // String is not zero-terminated, strlen will lead to OOB access - EXPECT_DEATH(Ident(strlen(str)), RightOOBReadMessage(0)); - EXPECT_DEATH(Ident(strlen(str + length)), RightOOBReadMessage(0)); - // Restore terminator - str[length] = 0; -} -TEST(AddressSanitizer, StrLenOOBTest) { - // Check heap-allocated string - size_t length = Ident(10); - char *heap_string = Ident((char*)malloc(length + 1)); - char stack_string[10 + 1]; - break_optimization(&stack_string); - for (size_t i = 0; i < length; i++) { - heap_string[i] = 'a'; - stack_string[i] = 'b'; - } - heap_string[length] = 0; - stack_string[length] = 0; - StrLenOOBTestTemplate(heap_string, length, false); - // TODO(samsonov): Fix expected messages in StrLenOOBTestTemplate to - // make test for stack_string work. Or move it to output tests. - // StrLenOOBTestTemplate(stack_string, length, false); - StrLenOOBTestTemplate(global_string, global_string_length, true); - free(heap_string); -} - -static inline char* MallocAndMemsetString(size_t size, char ch) { +char* MallocAndMemsetString(size_t size, char ch) { char *s = Ident((char*)malloc(size)); memset(s, ch, size); return s; } -static inline char* MallocAndMemsetString(size_t size) { - return MallocAndMemsetString(size, 'z'); -} - -#ifndef __APPLE__ -TEST(AddressSanitizer, StrNLenOOBTest) { - size_t size = Ident(123); - char *str = MallocAndMemsetString(size); - // Normal strnlen calls. - Ident(strnlen(str - 1, 0)); - Ident(strnlen(str, size)); - Ident(strnlen(str + size - 1, 1)); - str[size - 1] = '\0'; - Ident(strnlen(str, 2 * size)); - // Argument points to not allocated memory. - EXPECT_DEATH(Ident(strnlen(str - 1, 1)), LeftOOBReadMessage(1)); - EXPECT_DEATH(Ident(strnlen(str + size, 1)), RightOOBReadMessage(0)); - // Overwrite the terminating '\0' and hit unallocated memory. - str[size - 1] = 'z'; - EXPECT_DEATH(Ident(strnlen(str, size + 1)), RightOOBReadMessage(0)); - free(str); -} -#endif - -TEST(AddressSanitizer, StrDupOOBTest) { - size_t size = Ident(42); - char *str = MallocAndMemsetString(size); - char *new_str; - // Normal strdup calls. - str[size - 1] = '\0'; - new_str = strdup(str); - free(new_str); - new_str = strdup(str + size - 1); - free(new_str); - // Argument points to not allocated memory. - EXPECT_DEATH(Ident(strdup(str - 1)), LeftOOBReadMessage(1)); - EXPECT_DEATH(Ident(strdup(str + size)), RightOOBReadMessage(0)); - // Overwrite the terminating '\0' and hit unallocated memory. - str[size - 1] = 'z'; - EXPECT_DEATH(Ident(strdup(str)), RightOOBReadMessage(0)); - free(str); -} - -TEST(AddressSanitizer, StrCpyOOBTest) { - size_t to_size = Ident(30); - size_t from_size = Ident(6); // less than to_size - char *to = Ident((char*)malloc(to_size)); - char *from = Ident((char*)malloc(from_size)); - // Normal strcpy calls. - strcpy(from, "hello"); - strcpy(to, from); - strcpy(to + to_size - from_size, from); - // Length of "from" is too small. - EXPECT_DEATH(Ident(strcpy(from, "hello2")), RightOOBWriteMessage(0)); - // "to" or "from" points to not allocated memory. - EXPECT_DEATH(Ident(strcpy(to - 1, from)), LeftOOBWriteMessage(1)); - EXPECT_DEATH(Ident(strcpy(to, from - 1)), LeftOOBReadMessage(1)); - EXPECT_DEATH(Ident(strcpy(to, from + from_size)), RightOOBReadMessage(0)); - EXPECT_DEATH(Ident(strcpy(to + to_size, from)), RightOOBWriteMessage(0)); - // Overwrite the terminating '\0' character and hit unallocated memory. - from[from_size - 1] = '!'; - EXPECT_DEATH(Ident(strcpy(to, from)), RightOOBReadMessage(0)); - free(to); - free(from); -} - -TEST(AddressSanitizer, StrNCpyOOBTest) { - size_t to_size = Ident(20); - size_t from_size = Ident(6); // less than to_size - char *to = Ident((char*)malloc(to_size)); - // From is a zero-terminated string "hello\0" of length 6 - char *from = Ident((char*)malloc(from_size)); - strcpy(from, "hello"); - // copy 0 bytes - strncpy(to, from, 0); - strncpy(to - 1, from - 1, 0); - // normal strncpy calls - strncpy(to, from, from_size); - strncpy(to, from, to_size); - strncpy(to, from + from_size - 1, to_size); - strncpy(to + to_size - 1, from, 1); - // One of {to, from} points to not allocated memory - EXPECT_DEATH(Ident(strncpy(to, from - 1, from_size)), - LeftOOBReadMessage(1)); - EXPECT_DEATH(Ident(strncpy(to - 1, from, from_size)), - LeftOOBWriteMessage(1)); - EXPECT_DEATH(Ident(strncpy(to, from + from_size, 1)), - RightOOBReadMessage(0)); - EXPECT_DEATH(Ident(strncpy(to + to_size, from, 1)), - RightOOBWriteMessage(0)); - // Length of "to" is too small - EXPECT_DEATH(Ident(strncpy(to + to_size - from_size + 1, from, from_size)), - RightOOBWriteMessage(0)); - EXPECT_DEATH(Ident(strncpy(to + 1, from, to_size)), - RightOOBWriteMessage(0)); - // Overwrite terminator in from - from[from_size - 1] = '!'; - // normal strncpy call - strncpy(to, from, from_size); - // Length of "from" is too small - EXPECT_DEATH(Ident(strncpy(to, from, to_size)), - RightOOBReadMessage(0)); - free(to); - free(from); -} - -// Users may have different definitions of "strchr" and "index", so provide -// function pointer typedefs and overload RunStrChrTest implementation. -// We can't use macro for RunStrChrTest body here, as this macro would -// confuse EXPECT_DEATH gtest macro. -typedef char*(*PointerToStrChr1)(const char*, int); -typedef char*(*PointerToStrChr2)(char*, int); - -USED static void RunStrChrTest(PointerToStrChr1 StrChr) { - size_t size = Ident(100); - char *str = MallocAndMemsetString(size); - str[10] = 'q'; - str[11] = '\0'; - EXPECT_EQ(str, StrChr(str, 'z')); - EXPECT_EQ(str + 10, StrChr(str, 'q')); - EXPECT_EQ(NULL, StrChr(str, 'a')); - // StrChr argument points to not allocated memory. - EXPECT_DEATH(Ident(StrChr(str - 1, 'z')), LeftOOBReadMessage(1)); - EXPECT_DEATH(Ident(StrChr(str + size, 'z')), RightOOBReadMessage(0)); - // Overwrite the terminator and hit not allocated memory. - str[11] = 'z'; - EXPECT_DEATH(Ident(StrChr(str, 'a')), RightOOBReadMessage(0)); - free(str); -} -USED static void RunStrChrTest(PointerToStrChr2 StrChr) { - size_t size = Ident(100); - char *str = MallocAndMemsetString(size); - str[10] = 'q'; - str[11] = '\0'; - EXPECT_EQ(str, StrChr(str, 'z')); - EXPECT_EQ(str + 10, StrChr(str, 'q')); - EXPECT_EQ(NULL, StrChr(str, 'a')); - // StrChr argument points to not allocated memory. - EXPECT_DEATH(Ident(StrChr(str - 1, 'z')), LeftOOBReadMessage(1)); - EXPECT_DEATH(Ident(StrChr(str + size, 'z')), RightOOBReadMessage(0)); - // Overwrite the terminator and hit not allocated memory. - str[11] = 'z'; - EXPECT_DEATH(Ident(StrChr(str, 'a')), RightOOBReadMessage(0)); - free(str); -} - -TEST(AddressSanitizer, StrChrAndIndexOOBTest) { - RunStrChrTest(&strchr); - RunStrChrTest(&index); -} - -TEST(AddressSanitizer, StrCmpAndFriendsLogicTest) { - // strcmp - EXPECT_EQ(0, strcmp("", "")); - EXPECT_EQ(0, strcmp("abcd", "abcd")); - EXPECT_GT(0, strcmp("ab", "ac")); - EXPECT_GT(0, strcmp("abc", "abcd")); - EXPECT_LT(0, strcmp("acc", "abc")); - EXPECT_LT(0, strcmp("abcd", "abc")); - - // strncmp - EXPECT_EQ(0, strncmp("a", "b", 0)); - EXPECT_EQ(0, strncmp("abcd", "abcd", 10)); - EXPECT_EQ(0, strncmp("abcd", "abcef", 3)); - EXPECT_GT(0, strncmp("abcde", "abcfa", 4)); - EXPECT_GT(0, strncmp("a", "b", 5)); - EXPECT_GT(0, strncmp("bc", "bcde", 4)); - EXPECT_LT(0, strncmp("xyz", "xyy", 10)); - EXPECT_LT(0, strncmp("baa", "aaa", 1)); - EXPECT_LT(0, strncmp("zyx", "", 2)); - - // strcasecmp - EXPECT_EQ(0, strcasecmp("", "")); - EXPECT_EQ(0, strcasecmp("zzz", "zzz")); - EXPECT_EQ(0, strcasecmp("abCD", "ABcd")); - EXPECT_GT(0, strcasecmp("aB", "Ac")); - EXPECT_GT(0, strcasecmp("ABC", "ABCd")); - EXPECT_LT(0, strcasecmp("acc", "abc")); - EXPECT_LT(0, strcasecmp("ABCd", "abc")); - - // strncasecmp - EXPECT_EQ(0, strncasecmp("a", "b", 0)); - EXPECT_EQ(0, strncasecmp("abCD", "ABcd", 10)); - EXPECT_EQ(0, strncasecmp("abCd", "ABcef", 3)); - EXPECT_GT(0, strncasecmp("abcde", "ABCfa", 4)); - EXPECT_GT(0, strncasecmp("a", "B", 5)); - EXPECT_GT(0, strncasecmp("bc", "BCde", 4)); - EXPECT_LT(0, strncasecmp("xyz", "xyy", 10)); - EXPECT_LT(0, strncasecmp("Baa", "aaa", 1)); - EXPECT_LT(0, strncasecmp("zyx", "", 2)); - - // memcmp - EXPECT_EQ(0, memcmp("a", "b", 0)); - EXPECT_EQ(0, memcmp("ab\0c", "ab\0c", 4)); - EXPECT_GT(0, memcmp("\0ab", "\0ac", 3)); - EXPECT_GT(0, memcmp("abb\0", "abba", 4)); - EXPECT_LT(0, memcmp("ab\0cd", "ab\0c\0", 5)); - EXPECT_LT(0, memcmp("zza", "zyx", 3)); -} - -typedef int(*PointerToStrCmp)(const char*, const char*); -void RunStrCmpTest(PointerToStrCmp StrCmp) { - size_t size = Ident(100); - int fill = 'o'; - char *s1 = MallocAndMemsetString(size, fill); - char *s2 = MallocAndMemsetString(size, fill); - s1[size - 1] = '\0'; - s2[size - 1] = '\0'; - // Normal StrCmp calls - Ident(StrCmp(s1, s2)); - Ident(StrCmp(s1, s2 + size - 1)); - Ident(StrCmp(s1 + size - 1, s2 + size - 1)); - s1[size - 1] = 'z'; - s2[size - 1] = 'x'; - Ident(StrCmp(s1, s2)); - // One of arguments points to not allocated memory. - EXPECT_DEATH(Ident(StrCmp)(s1 - 1, s2), LeftOOBReadMessage(1)); - EXPECT_DEATH(Ident(StrCmp)(s1, s2 - 1), LeftOOBReadMessage(1)); - EXPECT_DEATH(Ident(StrCmp)(s1 + size, s2), RightOOBReadMessage(0)); - EXPECT_DEATH(Ident(StrCmp)(s1, s2 + size), RightOOBReadMessage(0)); - // Hit unallocated memory and die. - s1[size - 1] = fill; - EXPECT_DEATH(Ident(StrCmp)(s1, s1), RightOOBReadMessage(0)); - EXPECT_DEATH(Ident(StrCmp)(s1 + size - 1, s2), RightOOBReadMessage(0)); - free(s1); - free(s2); -} - -TEST(AddressSanitizer, StrCmpOOBTest) { - RunStrCmpTest(&strcmp); -} - -TEST(AddressSanitizer, StrCaseCmpOOBTest) { - RunStrCmpTest(&strcasecmp); -} - -typedef int(*PointerToStrNCmp)(const char*, const char*, size_t); -void RunStrNCmpTest(PointerToStrNCmp StrNCmp) { - size_t size = Ident(100); - char *s1 = MallocAndMemsetString(size); - char *s2 = MallocAndMemsetString(size); - s1[size - 1] = '\0'; - s2[size - 1] = '\0'; - // Normal StrNCmp calls - Ident(StrNCmp(s1, s2, size + 2)); - s1[size - 1] = 'z'; - s2[size - 1] = 'x'; - Ident(StrNCmp(s1 + size - 2, s2 + size - 2, size)); - s2[size - 1] = 'z'; - Ident(StrNCmp(s1 - 1, s2 - 1, 0)); - Ident(StrNCmp(s1 + size - 1, s2 + size - 1, 1)); - // One of arguments points to not allocated memory. - EXPECT_DEATH(Ident(StrNCmp)(s1 - 1, s2, 1), LeftOOBReadMessage(1)); - EXPECT_DEATH(Ident(StrNCmp)(s1, s2 - 1, 1), LeftOOBReadMessage(1)); - EXPECT_DEATH(Ident(StrNCmp)(s1 + size, s2, 1), RightOOBReadMessage(0)); - EXPECT_DEATH(Ident(StrNCmp)(s1, s2 + size, 1), RightOOBReadMessage(0)); - // Hit unallocated memory and die. - EXPECT_DEATH(Ident(StrNCmp)(s1 + 1, s2 + 1, size), RightOOBReadMessage(0)); - EXPECT_DEATH(Ident(StrNCmp)(s1 + size - 1, s2, 2), RightOOBReadMessage(0)); - free(s1); - free(s2); -} - -TEST(AddressSanitizer, StrNCmpOOBTest) { - RunStrNCmpTest(&strncmp); -} - -TEST(AddressSanitizer, StrNCaseCmpOOBTest) { - RunStrNCmpTest(&strncasecmp); -} - -TEST(AddressSanitizer, MemCmpOOBTest) { - size_t size = Ident(100); - char *s1 = MallocAndMemsetString(size); - char *s2 = MallocAndMemsetString(size); - // Normal memcmp calls. - Ident(memcmp(s1, s2, size)); - Ident(memcmp(s1 + size - 1, s2 + size - 1, 1)); - Ident(memcmp(s1 - 1, s2 - 1, 0)); - // One of arguments points to not allocated memory. - EXPECT_DEATH(Ident(memcmp)(s1 - 1, s2, 1), LeftOOBReadMessage(1)); - EXPECT_DEATH(Ident(memcmp)(s1, s2 - 1, 1), LeftOOBReadMessage(1)); - EXPECT_DEATH(Ident(memcmp)(s1 + size, s2, 1), RightOOBReadMessage(0)); - EXPECT_DEATH(Ident(memcmp)(s1, s2 + size, 1), RightOOBReadMessage(0)); - // Hit unallocated memory and die. - EXPECT_DEATH(Ident(memcmp)(s1 + 1, s2 + 1, size), RightOOBReadMessage(0)); - EXPECT_DEATH(Ident(memcmp)(s1 + size - 1, s2, 2), RightOOBReadMessage(0)); - // Zero bytes are not terminators and don't prevent from OOB. - s1[size - 1] = '\0'; - s2[size - 1] = '\0'; - EXPECT_DEATH(Ident(memcmp)(s1, s2, size + 1), RightOOBReadMessage(0)); - free(s1); - free(s2); -} - -TEST(AddressSanitizer, StrCatOOBTest) { - // strcat() reads strlen(to) bytes from |to| before concatenating. - size_t to_size = Ident(100); - char *to = MallocAndMemsetString(to_size); - to[0] = '\0'; - size_t from_size = Ident(20); - char *from = MallocAndMemsetString(from_size); - from[from_size - 1] = '\0'; - // Normal strcat calls. - strcat(to, from); - strcat(to, from); - strcat(to + from_size, from + from_size - 2); - // Passing an invalid pointer is an error even when concatenating an empty - // string. - EXPECT_DEATH(strcat(to - 1, from + from_size - 1), LeftOOBAccessMessage(1)); - // One of arguments points to not allocated memory. - EXPECT_DEATH(strcat(to - 1, from), LeftOOBAccessMessage(1)); - EXPECT_DEATH(strcat(to, from - 1), LeftOOBReadMessage(1)); - EXPECT_DEATH(strcat(to + to_size, from), RightOOBWriteMessage(0)); - EXPECT_DEATH(strcat(to, from + from_size), RightOOBReadMessage(0)); - - // "from" is not zero-terminated. - from[from_size - 1] = 'z'; - EXPECT_DEATH(strcat(to, from), RightOOBReadMessage(0)); - from[from_size - 1] = '\0'; - // "to" is not zero-terminated. - memset(to, 'z', to_size); - EXPECT_DEATH(strcat(to, from), RightOOBWriteMessage(0)); - // "to" is too short to fit "from". - to[to_size - from_size + 1] = '\0'; - EXPECT_DEATH(strcat(to, from), RightOOBWriteMessage(0)); - // length of "to" is just enough. - strcat(to, from + 1); - - free(to); - free(from); -} - -TEST(AddressSanitizer, StrNCatOOBTest) { - // strncat() reads strlen(to) bytes from |to| before concatenating. - size_t to_size = Ident(100); - char *to = MallocAndMemsetString(to_size); - to[0] = '\0'; - size_t from_size = Ident(20); - char *from = MallocAndMemsetString(from_size); - // Normal strncat calls. - strncat(to, from, 0); - strncat(to, from, from_size); - from[from_size - 1] = '\0'; - strncat(to, from, 2 * from_size); - // Catenating empty string with an invalid string is still an error. - EXPECT_DEATH(strncat(to - 1, from, 0), LeftOOBAccessMessage(1)); - strncat(to, from + from_size - 1, 10); - // One of arguments points to not allocated memory. - EXPECT_DEATH(strncat(to - 1, from, 2), LeftOOBAccessMessage(1)); - EXPECT_DEATH(strncat(to, from - 1, 2), LeftOOBReadMessage(1)); - EXPECT_DEATH(strncat(to + to_size, from, 2), RightOOBWriteMessage(0)); - EXPECT_DEATH(strncat(to, from + from_size, 2), RightOOBReadMessage(0)); - - memset(from, 'z', from_size); - memset(to, 'z', to_size); - to[0] = '\0'; - // "from" is too short. - EXPECT_DEATH(strncat(to, from, from_size + 1), RightOOBReadMessage(0)); - // "to" is not zero-terminated. - EXPECT_DEATH(strncat(to + 1, from, 1), RightOOBWriteMessage(0)); - // "to" is too short to fit "from". - to[0] = 'z'; - to[to_size - from_size + 1] = '\0'; - EXPECT_DEATH(strncat(to, from, from_size - 1), RightOOBWriteMessage(0)); - // "to" is just enough. - strncat(to, from, from_size - 2); - - free(to); - free(from); -} - -static string OverlapErrorMessage(const string &func) { - return func + "-param-overlap"; -} - -TEST(AddressSanitizer, StrArgsOverlapTest) { - size_t size = Ident(100); - char *str = Ident((char*)malloc(size)); - -// Do not check memcpy() on OS X 10.7 and later, where it actually aliases -// memmove(). -#if !defined(__APPLE__) || !defined(MAC_OS_X_VERSION_10_7) || \ - (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7) - // Check "memcpy". Use Ident() to avoid inlining. - memset(str, 'z', size); - Ident(memcpy)(str + 1, str + 11, 10); - Ident(memcpy)(str, str, 0); - EXPECT_DEATH(Ident(memcpy)(str, str + 14, 15), OverlapErrorMessage("memcpy")); - EXPECT_DEATH(Ident(memcpy)(str + 14, str, 15), OverlapErrorMessage("memcpy")); -#endif - - // We do not treat memcpy with to==from as a bug. - // See http://llvm.org/bugs/show_bug.cgi?id=11763. - // EXPECT_DEATH(Ident(memcpy)(str + 20, str + 20, 1), - // OverlapErrorMessage("memcpy")); - - // Check "strcpy". - memset(str, 'z', size); - str[9] = '\0'; - strcpy(str + 10, str); - EXPECT_DEATH(strcpy(str + 9, str), OverlapErrorMessage("strcpy")); - EXPECT_DEATH(strcpy(str, str + 4), OverlapErrorMessage("strcpy")); - strcpy(str, str + 5); - - // Check "strncpy". - memset(str, 'z', size); - strncpy(str, str + 10, 10); - EXPECT_DEATH(strncpy(str, str + 9, 10), OverlapErrorMessage("strncpy")); - EXPECT_DEATH(strncpy(str + 9, str, 10), OverlapErrorMessage("strncpy")); - str[10] = '\0'; - strncpy(str + 11, str, 20); - EXPECT_DEATH(strncpy(str + 10, str, 20), OverlapErrorMessage("strncpy")); - - // Check "strcat". - memset(str, 'z', size); - str[10] = '\0'; - str[20] = '\0'; - strcat(str, str + 10); - EXPECT_DEATH(strcat(str, str + 11), OverlapErrorMessage("strcat")); - str[10] = '\0'; - strcat(str + 11, str); - EXPECT_DEATH(strcat(str, str + 9), OverlapErrorMessage("strcat")); - EXPECT_DEATH(strcat(str + 9, str), OverlapErrorMessage("strcat")); - EXPECT_DEATH(strcat(str + 10, str), OverlapErrorMessage("strcat")); - - // Check "strncat". - memset(str, 'z', size); - str[10] = '\0'; - strncat(str, str + 10, 10); // from is empty - EXPECT_DEATH(strncat(str, str + 11, 10), OverlapErrorMessage("strncat")); - str[10] = '\0'; - str[20] = '\0'; - strncat(str + 5, str, 5); - str[10] = '\0'; - EXPECT_DEATH(strncat(str + 5, str, 6), OverlapErrorMessage("strncat")); - EXPECT_DEATH(strncat(str, str + 9, 10), OverlapErrorMessage("strncat")); - - free(str); -} - -void CallAtoi(const char *nptr) { - Ident(atoi(nptr)); -} -void CallAtol(const char *nptr) { - Ident(atol(nptr)); -} -void CallAtoll(const char *nptr) { - Ident(atoll(nptr)); -} -typedef void(*PointerToCallAtoi)(const char*); - -void RunAtoiOOBTest(PointerToCallAtoi Atoi) { - char *array = MallocAndMemsetString(10, '1'); - // Invalid pointer to the string. - EXPECT_DEATH(Atoi(array + 11), RightOOBReadMessage(1)); - EXPECT_DEATH(Atoi(array - 1), LeftOOBReadMessage(1)); - // Die if a buffer doesn't have terminating NULL. - EXPECT_DEATH(Atoi(array), RightOOBReadMessage(0)); - // Make last symbol a terminating NULL or other non-digit. - array[9] = '\0'; - Atoi(array); - array[9] = 'a'; - Atoi(array); - Atoi(array + 9); - // Sometimes we need to detect overflow if no digits are found. - memset(array, ' ', 10); - EXPECT_DEATH(Atoi(array), RightOOBReadMessage(0)); - array[9] = '-'; - EXPECT_DEATH(Atoi(array), RightOOBReadMessage(0)); - EXPECT_DEATH(Atoi(array + 9), RightOOBReadMessage(0)); - array[8] = '-'; - Atoi(array); - free(array); -} - -TEST(AddressSanitizer, AtoiAndFriendsOOBTest) { - RunAtoiOOBTest(&CallAtoi); - RunAtoiOOBTest(&CallAtol); - RunAtoiOOBTest(&CallAtoll); -} - -void CallStrtol(const char *nptr, char **endptr, int base) { - Ident(strtol(nptr, endptr, base)); -} -void CallStrtoll(const char *nptr, char **endptr, int base) { - Ident(strtoll(nptr, endptr, base)); -} -typedef void(*PointerToCallStrtol)(const char*, char**, int); - -void RunStrtolOOBTest(PointerToCallStrtol Strtol) { - char *array = MallocAndMemsetString(3); - char *endptr = NULL; - array[0] = '1'; - array[1] = '2'; - array[2] = '3'; - // Invalid pointer to the string. - EXPECT_DEATH(Strtol(array + 3, NULL, 0), RightOOBReadMessage(0)); - EXPECT_DEATH(Strtol(array - 1, NULL, 0), LeftOOBReadMessage(1)); - // Buffer overflow if there is no terminating null (depends on base). - Strtol(array, &endptr, 3); - EXPECT_EQ(array + 2, endptr); - EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0)); - array[2] = 'z'; - Strtol(array, &endptr, 35); - EXPECT_EQ(array + 2, endptr); - EXPECT_DEATH(Strtol(array, NULL, 36), RightOOBReadMessage(0)); - // Add terminating zero to get rid of overflow. - array[2] = '\0'; - Strtol(array, NULL, 36); - // Don't check for overflow if base is invalid. - Strtol(array - 1, NULL, -1); - Strtol(array + 3, NULL, 1); - // Sometimes we need to detect overflow if no digits are found. - array[0] = array[1] = array[2] = ' '; - EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0)); - array[2] = '+'; - EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0)); - array[2] = '-'; - EXPECT_DEATH(Strtol(array, NULL, 0), RightOOBReadMessage(0)); - array[1] = '+'; - Strtol(array, NULL, 0); - array[1] = array[2] = 'z'; - Strtol(array, &endptr, 0); - EXPECT_EQ(array, endptr); - Strtol(array + 2, NULL, 0); - EXPECT_EQ(array, endptr); - free(array); -} - -TEST(AddressSanitizer, StrtollOOBTest) { - RunStrtolOOBTest(&CallStrtoll); -} -TEST(AddressSanitizer, StrtolOOBTest) { - RunStrtolOOBTest(&CallStrtol); -} - -// At the moment we instrument memcpy/memove/memset calls at compile time so we -// can't handle OOB error if these functions are called by pointer, see disabled -// MemIntrinsicCallByPointerTest below -typedef void*(*PointerToMemTransfer)(void*, const void*, size_t); -typedef void*(*PointerToMemSet)(void*, int, size_t); - -void CallMemSetByPointer(PointerToMemSet MemSet) { - size_t size = Ident(100); - char *array = Ident((char*)malloc(size)); - EXPECT_DEATH(MemSet(array, 0, 101), RightOOBWriteMessage(0)); - free(array); -} - -void CallMemTransferByPointer(PointerToMemTransfer MemTransfer) { - size_t size = Ident(100); - char *src = Ident((char*)malloc(size)); - char *dst = Ident((char*)malloc(size)); - EXPECT_DEATH(MemTransfer(dst, src, 101), RightOOBWriteMessage(0)); - free(src); - free(dst); -} -TEST(AddressSanitizer, DISABLED_MemIntrinsicCallByPointerTest) { - CallMemSetByPointer(&memset); - CallMemTransferByPointer(&memcpy); - CallMemTransferByPointer(&memmove); +char* MallocAndMemsetString(size_t size) { + return MallocAndMemsetString(size, 'z'); } #if defined(__linux__) && !defined(ANDROID) && !defined(__ANDROID__) @@ -1829,7 +889,11 @@ TEST(AddressSanitizer, ShadowGapTest) { #if SANITIZER_WORDSIZE == 32 char *addr = (char*)0x22000000; #else +# if defined(__powerpc64__) + char *addr = (char*)0x024000800000; +# else char *addr = (char*)0x0000100000080000; +# endif #endif EXPECT_DEATH(*addr = 1, "AddressSanitizer: SEGV on unknown"); } @@ -1853,11 +917,9 @@ TEST(AddressSanitizer, StrDupTest) { } // Currently we create and poison redzone at right of global variables. -char glob5[5]; static char static110[110]; const char ConstGlob[7] = {1, 2, 3, 4, 5, 6, 7}; static const char StaticConstGlob[3] = {9, 8, 7}; -extern int GlobalsTest(int x); TEST(AddressSanitizer, GlobalTest) { static char func_static15[15]; @@ -2038,9 +1100,10 @@ TEST(AddressSanitizer, AttributeNoAddressSafetyTest) { Ident(NoAddressSafety)(); } -// TODO(glider): Enable this test on Mac. // It doesn't work on Android, as calls to new/delete go through malloc/free. -#if !defined(__APPLE__) && !defined(ANDROID) && !defined(__ANDROID__) +// Neither it does on OS X, see +// https://code.google.com/p/address-sanitizer/issues/detail?id=131. +#if !defined(ANDROID) && !defined(__ANDROID__) && !defined(__APPLE__) static string MismatchStr(const string &str) { return string("AddressSanitizer: alloc-dealloc-mismatch \\(") + str; } @@ -2097,22 +1160,6 @@ TEST(AddressSanitizer, DISABLED_DemoUAFHigh) { uaf_test<U1>(kLargeMalloc, 0); } -TEST(AddressSanitizer, DISABLED_DemoOOBLeftLow) { - oob_test<U1>(10, -1); -} - -TEST(AddressSanitizer, DISABLED_DemoOOBLeftHigh) { - oob_test<U1>(kLargeMalloc, -1); -} - -TEST(AddressSanitizer, DISABLED_DemoOOBRightLow) { - oob_test<U1>(10, 10); -} - -TEST(AddressSanitizer, DISABLED_DemoOOBRightHigh) { - oob_test<U1>(kLargeMalloc, kLargeMalloc); -} - TEST(AddressSanitizer, DISABLED_DemoOOM) { size_t size = SANITIZER_WORDSIZE == 64 ? (size_t)(1ULL << 40) : (0xf0000000); printf("%p\n", malloc(size)); @@ -2160,223 +1207,6 @@ TEST(AddressSanitizer, BufferOverflowAfterManyFrees) { delete [] Ident(x); } -#ifdef __APPLE__ -#include "asan_mac_test.h" -TEST(AddressSanitizerMac, CFAllocatorDefaultDoubleFree) { - EXPECT_DEATH( - CFAllocatorDefaultDoubleFree(NULL), - "attempting double-free"); -} - -void CFAllocator_DoubleFreeOnPthread() { - pthread_t child; - PTHREAD_CREATE(&child, NULL, CFAllocatorDefaultDoubleFree, NULL); - PTHREAD_JOIN(child, NULL); // Shouldn't be reached. -} - -TEST(AddressSanitizerMac, CFAllocatorDefaultDoubleFree_ChildPhread) { - EXPECT_DEATH(CFAllocator_DoubleFreeOnPthread(), "attempting double-free"); -} - -namespace { - -void *GLOB; - -void *CFAllocatorAllocateToGlob(void *unused) { - GLOB = CFAllocatorAllocate(NULL, 100, /*hint*/0); - return NULL; -} - -void *CFAllocatorDeallocateFromGlob(void *unused) { - char *p = (char*)GLOB; - p[100] = 'A'; // ASan should report an error here. - CFAllocatorDeallocate(NULL, GLOB); - return NULL; -} - -void CFAllocator_PassMemoryToAnotherThread() { - pthread_t th1, th2; - PTHREAD_CREATE(&th1, NULL, CFAllocatorAllocateToGlob, NULL); - PTHREAD_JOIN(th1, NULL); - PTHREAD_CREATE(&th2, NULL, CFAllocatorDeallocateFromGlob, NULL); - PTHREAD_JOIN(th2, NULL); -} - -TEST(AddressSanitizerMac, CFAllocator_PassMemoryToAnotherThread) { - EXPECT_DEATH(CFAllocator_PassMemoryToAnotherThread(), - "heap-buffer-overflow"); -} - -} // namespace - -// TODO(glider): figure out whether we still need these tests. Is it correct -// to intercept the non-default CFAllocators? -TEST(AddressSanitizerMac, DISABLED_CFAllocatorSystemDefaultDoubleFree) { - EXPECT_DEATH( - CFAllocatorSystemDefaultDoubleFree(), - "attempting double-free"); -} - -// We're intercepting malloc, so kCFAllocatorMalloc is routed to ASan. -TEST(AddressSanitizerMac, CFAllocatorMallocDoubleFree) { - EXPECT_DEATH(CFAllocatorMallocDoubleFree(), "attempting double-free"); -} - -TEST(AddressSanitizerMac, DISABLED_CFAllocatorMallocZoneDoubleFree) { - EXPECT_DEATH(CFAllocatorMallocZoneDoubleFree(), "attempting double-free"); -} - -// For libdispatch tests below we check that ASan got to the shadow byte -// legend, i.e. managed to print the thread stacks (this almost certainly -// means that the libdispatch task creation has been intercepted correctly). -TEST(AddressSanitizerMac, GCDDispatchAsync) { - // Make sure the whole ASan report is printed, i.e. that we don't die - // on a CHECK. - EXPECT_DEATH(TestGCDDispatchAsync(), "Shadow byte legend"); -} - -TEST(AddressSanitizerMac, GCDDispatchSync) { - // Make sure the whole ASan report is printed, i.e. that we don't die - // on a CHECK. - EXPECT_DEATH(TestGCDDispatchSync(), "Shadow byte legend"); -} - - -TEST(AddressSanitizerMac, GCDReuseWqthreadsAsync) { - // Make sure the whole ASan report is printed, i.e. that we don't die - // on a CHECK. - EXPECT_DEATH(TestGCDReuseWqthreadsAsync(), "Shadow byte legend"); -} - -TEST(AddressSanitizerMac, GCDReuseWqthreadsSync) { - // Make sure the whole ASan report is printed, i.e. that we don't die - // on a CHECK. - EXPECT_DEATH(TestGCDReuseWqthreadsSync(), "Shadow byte legend"); -} - -TEST(AddressSanitizerMac, GCDDispatchAfter) { - // Make sure the whole ASan report is printed, i.e. that we don't die - // on a CHECK. - EXPECT_DEATH(TestGCDDispatchAfter(), "Shadow byte legend"); -} - -TEST(AddressSanitizerMac, GCDSourceEvent) { - // Make sure the whole ASan report is printed, i.e. that we don't die - // on a CHECK. - EXPECT_DEATH(TestGCDSourceEvent(), "Shadow byte legend"); -} - -TEST(AddressSanitizerMac, GCDSourceCancel) { - // Make sure the whole ASan report is printed, i.e. that we don't die - // on a CHECK. - EXPECT_DEATH(TestGCDSourceCancel(), "Shadow byte legend"); -} - -TEST(AddressSanitizerMac, GCDGroupAsync) { - // Make sure the whole ASan report is printed, i.e. that we don't die - // on a CHECK. - EXPECT_DEATH(TestGCDGroupAsync(), "Shadow byte legend"); -} - -void *MallocIntrospectionLockWorker(void *_) { - const int kNumPointers = 100; - int i; - void *pointers[kNumPointers]; - for (i = 0; i < kNumPointers; i++) { - pointers[i] = malloc(i + 1); - } - for (i = 0; i < kNumPointers; i++) { - free(pointers[i]); - } - - return NULL; -} - -void *MallocIntrospectionLockForker(void *_) { - pid_t result = fork(); - if (result == -1) { - perror("fork"); - } - assert(result != -1); - if (result == 0) { - // Call malloc in the child process to make sure we won't deadlock. - void *ptr = malloc(42); - free(ptr); - exit(0); - } else { - // Return in the parent process. - return NULL; - } -} - -TEST(AddressSanitizerMac, MallocIntrospectionLock) { - // Incorrect implementation of force_lock and force_unlock in our malloc zone - // will cause forked processes to deadlock. - // TODO(glider): need to detect that none of the child processes deadlocked. - const int kNumWorkers = 5, kNumIterations = 100; - int i, iter; - for (iter = 0; iter < kNumIterations; iter++) { - pthread_t workers[kNumWorkers], forker; - for (i = 0; i < kNumWorkers; i++) { - PTHREAD_CREATE(&workers[i], 0, MallocIntrospectionLockWorker, 0); - } - PTHREAD_CREATE(&forker, 0, MallocIntrospectionLockForker, 0); - for (i = 0; i < kNumWorkers; i++) { - PTHREAD_JOIN(workers[i], 0); - } - PTHREAD_JOIN(forker, 0); - } -} - -void *TSDAllocWorker(void *test_key) { - if (test_key) { - void *mem = malloc(10); - pthread_setspecific(*(pthread_key_t*)test_key, mem); - } - return NULL; -} - -TEST(AddressSanitizerMac, DISABLED_TSDWorkqueueTest) { - pthread_t th; - pthread_key_t test_key; - pthread_key_create(&test_key, CallFreeOnWorkqueue); - PTHREAD_CREATE(&th, NULL, TSDAllocWorker, &test_key); - PTHREAD_JOIN(th, NULL); - pthread_key_delete(test_key); -} - -// Test that CFStringCreateCopy does not copy constant strings. -TEST(AddressSanitizerMac, CFStringCreateCopy) { - CFStringRef str = CFSTR("Hello world!\n"); - CFStringRef str2 = CFStringCreateCopy(0, str); - EXPECT_EQ(str, str2); -} - -TEST(AddressSanitizerMac, NSObjectOOB) { - // Make sure that our allocators are used for NSObjects. - EXPECT_DEATH(TestOOBNSObjects(), "heap-buffer-overflow"); -} - -// Make sure that correct pointer is passed to free() when deallocating a -// NSURL object. -// See http://code.google.com/p/address-sanitizer/issues/detail?id=70. -TEST(AddressSanitizerMac, NSURLDeallocation) { - TestNSURLDeallocation(); -} - -// See http://code.google.com/p/address-sanitizer/issues/detail?id=109. -TEST(AddressSanitizerMac, Mstats) { - malloc_statistics_t stats1, stats2; - malloc_zone_statistics(/*all zones*/NULL, &stats1); - const size_t kMallocSize = 100000; - void *alloc = Ident(malloc(kMallocSize)); - malloc_zone_statistics(/*all zones*/NULL, &stats2); - EXPECT_GT(stats2.blocks_in_use, stats1.blocks_in_use); - EXPECT_GE(stats2.size_in_use - stats1.size_in_use, kMallocSize); - free(alloc); - // Even the default OSX allocator may not change the stats after free(). -} -#endif // __APPLE__ // Test that instrumentation of stack allocations takes into account // AllocSize of a type, and not its StoreSize (16 vs 10 bytes for long double). @@ -2387,3 +1217,16 @@ TEST(AddressSanitizer, LongDoubleNegativeTest) { memcpy(Ident(&a), Ident(&b), sizeof(long double)); memcpy(Ident(&c), Ident(&b), sizeof(long double)); } + +TEST(AddressSanitizer, pthread_getschedparam) { + int policy; + struct sched_param param; + EXPECT_DEATH( + pthread_getschedparam(pthread_self(), &policy, Ident(¶m) + 2), + "AddressSanitizer: stack-buffer-overflow"); + EXPECT_DEATH( + pthread_getschedparam(pthread_self(), Ident(&policy) - 1, ¶m), + "AddressSanitizer: stack-buffer-overflow"); + int res = pthread_getschedparam(pthread_self(), &policy, ¶m); + ASSERT_EQ(0, res); +} diff --git a/lib/asan/tests/asan_test_config.h b/lib/asan/tests/asan_test_config.h index 1d28e99a4b10..6eb33ce4431b 100644 --- a/lib/asan/tests/asan_test_config.h +++ b/lib/asan/tests/asan_test_config.h @@ -47,10 +47,6 @@ using std::map; # error "please define ASAN_NEEDS_SEGV" #endif -#ifndef ASAN_LOW_MEMORY -# define ASAN_LOW_MEMORY 0 -#endif - #ifndef ASAN_AVOID_EXPENSIVE_TESTS # define ASAN_AVOID_EXPENSIVE_TESTS 0 #endif diff --git a/lib/asan/tests/asan_test_utils.h b/lib/asan/tests/asan_test_utils.h index 6ed9f90df906..403773180c2f 100644 --- a/lib/asan/tests/asan_test_utils.h +++ b/lib/asan/tests/asan_test_utils.h @@ -20,10 +20,92 @@ # undef INCLUDED_FROM_ASAN_TEST_UTILS_H #endif -#include "sanitizer_common/tests/sanitizer_test_utils.h" +#include "sanitizer_test_utils.h" +#include <stdio.h> +#include <signal.h> +#include <stdlib.h> +#include <string.h> +#include <strings.h> +#include <pthread.h> +#include <stdint.h> +#include <setjmp.h> +#include <assert.h> +#include <algorithm> +#include <sys/mman.h> + +#ifdef __linux__ +# include <sys/prctl.h> +# include <sys/types.h> +# include <sys/stat.h> +# include <fcntl.h> +#include <unistd.h> +#endif + +#if defined(__i386__) || defined(__x86_64__) +#include <emmintrin.h> +#endif + +#ifndef __APPLE__ +#include <malloc.h> +#endif // Check that pthread_create/pthread_join return success. #define PTHREAD_CREATE(a, b, c, d) ASSERT_EQ(0, pthread_create(a, b, c, d)) #define PTHREAD_JOIN(a, b) ASSERT_EQ(0, pthread_join(a, b)) +#if ASAN_HAS_EXCEPTIONS +# define ASAN_THROW(x) throw (x) +#else +# define ASAN_THROW(x) +#endif + +typedef uint8_t U1; +typedef uint16_t U2; +typedef uint32_t U4; +typedef uint64_t U8; + +static const int kPageSize = 4096; + +const size_t kLargeMalloc = 1 << 24; + +extern void free_aaa(void *p); +extern void *malloc_aaa(size_t size); + +template<typename T> +NOINLINE void asan_write(T *a) { + *a = 0; +} + +string RightOOBErrorMessage(int oob_distance, bool is_write); +string RightOOBWriteMessage(int oob_distance); +string RightOOBReadMessage(int oob_distance); +string LeftOOBErrorMessage(int oob_distance, bool is_write); +string LeftOOBWriteMessage(int oob_distance); +string LeftOOBReadMessage(int oob_distance); +string LeftOOBAccessMessage(int oob_distance); +char* MallocAndMemsetString(size_t size, char ch); +char* MallocAndMemsetString(size_t size); + +extern char glob1[1]; +extern char glob2[2]; +extern char glob3[3]; +extern char glob4[4]; +extern char glob5[5]; +extern char glob6[6]; +extern char glob7[7]; +extern char glob8[8]; +extern char glob9[9]; +extern char glob10[10]; +extern char glob11[11]; +extern char glob12[12]; +extern char glob13[13]; +extern char glob14[14]; +extern char glob15[15]; +extern char glob16[16]; +extern char glob17[17]; +extern char glob1000[1000]; +extern char glob10000[10000]; +extern char glob100000[100000]; +extern int GlobalsTest(int x); + #endif // ASAN_TEST_UTILS_H diff --git a/lib/atomic.c b/lib/atomic.c index a291f0d051d1..02429a653d2b 100644 --- a/lib/atomic.c +++ b/lib/atomic.c @@ -30,10 +30,18 @@ // Clang objects if you redefine a builtin. This little hack allows us to // define a function with the same name as an intrinsic. +#if __APPLE__ +// mach-o has extra leading underscore +#pragma redefine_extname __atomic_load_c ___atomic_load +#pragma redefine_extname __atomic_store_c ___atomic_store +#pragma redefine_extname __atomic_exchange_c ___atomic_exchange +#pragma redefine_extname __atomic_compare_exchange_c ___atomic_compare_exchange +#else #pragma redefine_extname __atomic_load_c __atomic_load #pragma redefine_extname __atomic_store_c __atomic_store #pragma redefine_extname __atomic_exchange_c __atomic_exchange #pragma redefine_extname __atomic_compare_exchange_c __atomic_compare_exchange +#endif /// Number of locks. This allocates one page on 32-bit platforms, two on /// 64-bit. This can be specified externally if a different trade between @@ -70,6 +78,20 @@ inline static void lock(Lock *l) { } /// locks for atomic operations static Lock locks[SPINLOCK_COUNT] = { [0 ... SPINLOCK_COUNT-1] = {0,1,0} }; + +#elif defined(__APPLE__) +#include <libkern/OSAtomic.h> +typedef OSSpinLock Lock; +inline static void unlock(Lock *l) { + OSSpinLockUnlock(l); +} +/// Locks a lock. In the current implementation, this is potentially +/// unbounded in the contended case. +inline static void lock(Lock *l) { + OSSpinLockLock(l); +} +static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0 + #else typedef _Atomic(uintptr_t) Lock; /// Unlock a lock. This is a release operation. diff --git a/lib/comparedf2.c b/lib/comparedf2.c index fe35fd80aadd..de67784dc85d 100644 --- a/lib/comparedf2.c +++ b/lib/comparedf2.c @@ -106,6 +106,8 @@ enum GE_RESULT __gedf2(fp_t a, fp_t b) { } } +ARM_EABI_FNALIAS(dcmpun, unorddf2) + int __unorddf2(fp_t a, fp_t b) { const rep_t aAbs = toRep(a) & absMask; const rep_t bAbs = toRep(b) & absMask; diff --git a/lib/comparesf2.c b/lib/comparesf2.c index 3f2e358addb5..c1c3a479c8ec 100644 --- a/lib/comparesf2.c +++ b/lib/comparesf2.c @@ -106,6 +106,8 @@ enum GE_RESULT __gesf2(fp_t a, fp_t b) { } } +ARM_EABI_FNALIAS(fcmpun, unordsf2) + int __unordsf2(fp_t a, fp_t b) { const rep_t aAbs = toRep(a) & absMask; const rep_t bAbs = toRep(b) & absMask; diff --git a/lib/interception/CMakeLists.txt b/lib/interception/CMakeLists.txt index ca59f2b8211e..cd9e6e75504f 100644 --- a/lib/interception/CMakeLists.txt +++ b/lib/interception/CMakeLists.txt @@ -4,28 +4,19 @@ set(INTERCEPTION_SOURCES interception_linux.cc interception_mac.cc interception_win.cc + interception_type_test.cc ) -set(MACH_OVERRIDE_SOURCES - mach_override/mach_override.c - ) - -# Only add this C file if we're building on a Mac. Other source files can be -# harmlessly compiled on any platform, but the C file is complained about due -# to pedantic rules about empty translation units. -if (APPLE) - list(APPEND INTERCEPTION_SOURCES ${MACH_OVERRIDE_SOURCES}) - set_source_files_properties(${MACH_OVERRIDE_SOURCES} PROPERTIES COMPILE_FLAGS "-std=c99 ${INTERCEPTION_CFLAGS}") -endif () +include_directories(..) set(INTERCEPTION_CFLAGS ${SANITIZER_COMMON_CFLAGS}) if(APPLE) # Build universal binary on APPLE. - add_library(RTInterception.osx OBJECT ${INTERCEPTION_SOURCES}) - set_target_compile_flags(RTInterception.osx ${INTERCEPTION_CFLAGS}) - set_target_properties(RTInterception.osx PROPERTIES - OSX_ARCHITECTURES "${SANITIZER_COMMON_SUPPORTED_ARCH}") + add_compiler_rt_osx_object_library(RTInterception + ARCH ${SANITIZER_COMMON_SUPPORTED_ARCH} + SOURCES ${INTERCEPTION_SOURCES} + CFLAGS ${INTERCEPTION_CFLAGS}) elseif(ANDROID) add_library(RTInterception.arm.android OBJECT ${INTERCEPTION_SOURCES}) set_target_compile_flags(RTInterception.arm.android diff --git a/lib/interception/Makefile.mk b/lib/interception/Makefile.mk index 1412a016f80e..88aa6cbc26d1 100644 --- a/lib/interception/Makefile.mk +++ b/lib/interception/Makefile.mk @@ -8,7 +8,7 @@ #===------------------------------------------------------------------------===# ModuleName := interception -SubDirs := mach_override +SubDirs := Sources := $(foreach file,$(wildcard $(Dir)/*.cc),$(notdir $(file))) ObjNames := $(Sources:%.cc=%.o) @@ -17,7 +17,7 @@ Implementation := Generic # FIXME: use automatic dependencies? Dependencies := $(wildcard $(Dir)/*.h) -Dependencies += $(wildcard $(Dir)/mach_override/*.h) +Dependencies += $(wildcard $(Dir)/../sanitizer_common/*.h) # Define a convenience variable for all the interception functions. InterceptionFunctions := $(Sources:%.cc=%) diff --git a/lib/interception/interception.h b/lib/interception/interception.h index 030bda7cba0c..d50af35415d6 100644 --- a/lib/interception/interception.h +++ b/lib/interception/interception.h @@ -19,35 +19,24 @@ # error "Interception doesn't work on this operating system." #endif -#include "sanitizer/common_interface_defs.h" +#include "sanitizer_common/sanitizer_internal_defs.h" // These typedefs should be used only in the interceptor definitions to replace // the standard system types (e.g. SSIZE_T instead of ssize_t) -typedef __sanitizer::uptr SIZE_T; -typedef __sanitizer::sptr SSIZE_T; -typedef __sanitizer::sptr PTRDIFF_T; -typedef __sanitizer::s64 INTMAX_T; -typedef __sanitizer::u64 OFF_T; -typedef __sanitizer::u64 OFF64_T; - -// How to use this library: -// 1) Include this header to define your own interceptors -// (see details below). -// 2) Build all *.cc files and link against them. -// On Mac you will also need to: -// 3) Provide your own implementation for the following functions: -// mach_error_t __interception::allocate_island(void **ptr, -// size_t size, -// void *hint); -// mach_error_t __interception::deallocate_island(void *ptr); -// See "interception_mac.h" for more details. +typedef __sanitizer::uptr SIZE_T; +typedef __sanitizer::sptr SSIZE_T; +typedef __sanitizer::sptr PTRDIFF_T; +typedef __sanitizer::s64 INTMAX_T; +typedef __sanitizer::OFF_T OFF_T; +typedef __sanitizer::OFF64_T OFF64_T; // How to add an interceptor: // Suppose you need to wrap/replace system function (generally, from libc): // int foo(const char *bar, double baz); // You'll need to: // 1) define INTERCEPTOR(int, foo, const char *bar, double baz) { ... } in -// your source file. +// your source file. See the notes below for cases when +// INTERCEPTOR_WITH_SUFFIX(...) should be used instead. // 2) Call "INTERCEPT_FUNCTION(foo)" prior to the first call of "foo". // INTERCEPT_FUNCTION(foo) evaluates to "true" iff the function was // intercepted successfully. @@ -61,15 +50,20 @@ typedef __sanitizer::u64 OFF64_T; // 3b) add DECLARE_REAL_AND_INTERCEPTOR(int, foo, const char*, double) // to a header file. -// Notes: 1. Things may not work properly if macro INTERCEPT(...) {...} or +// Notes: 1. Things may not work properly if macro INTERCEPTOR(...) {...} or // DECLARE_REAL(...) are located inside namespaces. -// 2. On Mac you can also use: "OVERRIDE_FUNCTION(foo, zoo);" to +// 2. On Mac you can also use: "OVERRIDE_FUNCTION(foo, zoo)" to // effectively redirect calls from "foo" to "zoo". In this case // you aren't required to implement // INTERCEPTOR(int, foo, const char *bar, double baz) {...} // but instead you'll have to add -// DEFINE_REAL(int, foo, const char *bar, double baz) in your +// DECLARE_REAL(int, foo, const char *bar, double baz) in your // source file (to define a pointer to overriden function). +// 3. Some Mac functions have symbol variants discriminated by +// additional suffixes, e.g. _$UNIX2003 (see +// https://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/index.html +// for more details). To intercept such functions you need to use the +// INTERCEPTOR_WITH_SUFFIX(...) macro. // How it works: // To replace system functions on Linux we just need to declare functions @@ -79,29 +73,53 @@ typedef __sanitizer::u64 OFF64_T; // we intercept. To resolve this we declare our interceptors with __interceptor_ // prefix, and then make actual interceptors weak aliases to __interceptor_ // functions. +// // This is not so on Mac OS, where the two-level namespace makes // our replacement functions invisible to other libraries. This may be overcomed // using the DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared -// libraries in Chromium were noticed when doing so. Instead we use -// mach_override, a handy framework for patching functions at runtime. -// To avoid possible name clashes, our replacement functions have -// the "wrap_" prefix on Mac. -// An alternative to function patching is to create a dylib containing a -// __DATA,__interpose section that associates library functions with their -// wrappers. When this dylib is preloaded before an executable using -// DYLD_INSERT_LIBRARIES, it routes all the calls to interposed functions done -// through stubs to the wrapper functions. Such a library is built with -// -DMAC_INTERPOSE_FUNCTIONS=1. - -#if !defined(MAC_INTERPOSE_FUNCTIONS) || !defined(__APPLE__) -# define MAC_INTERPOSE_FUNCTIONS 0 -#endif +// libraries in Chromium were noticed when doing so. +// Instead we create a dylib containing a __DATA,__interpose section that +// associates library functions with their wrappers. When this dylib is +// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all +// the calls to interposed functions done through stubs to the wrapper +// functions. +// As it's decided at compile time which functions are to be intercepted on Mac, +// INTERCEPT_FUNCTION() is effectively a no-op on this system. #if defined(__APPLE__) +#include <sys/cdefs.h> // For __DARWIN_ALIAS_C(). + +// Just a pair of pointers. +struct interpose_substitution { + const uptr replacement; + const uptr original; +}; + +// For a function foo() create a global pair of pointers { wrap_foo, foo } in +// the __DATA,__interpose section. +// As a result all the calls to foo() will be routed to wrap_foo() at runtime. +#define INTERPOSER(func_name) __attribute__((used)) \ +const interpose_substitution substitution_##func_name[] \ + __attribute__((section("__DATA, __interpose"))) = { \ + { reinterpret_cast<const uptr>(WRAP(func_name)), \ + reinterpret_cast<const uptr>(func_name) } \ +} + +// For a function foo() and a wrapper function bar() create a global pair +// of pointers { bar, foo } in the __DATA,__interpose section. +// As a result all the calls to foo() will be routed to bar() at runtime. +#define INTERPOSER_2(func_name, wrapper_name) __attribute__((used)) \ +const interpose_substitution substitution_##func_name[] \ + __attribute__((section("__DATA, __interpose"))) = { \ + { reinterpret_cast<const uptr>(wrapper_name), \ + reinterpret_cast<const uptr>(func_name) } \ +} + # define WRAP(x) wrap_##x # define WRAPPER_NAME(x) "wrap_"#x # define INTERCEPTOR_ATTRIBUTE # define DECLARE_WRAPPER(ret_type, func, ...) + #elif defined(_WIN32) # if defined(_DLL) // DLL CRT # define WRAP(x) x @@ -122,7 +140,7 @@ typedef __sanitizer::u64 OFF64_T; __attribute__((weak, alias("__interceptor_" #func), visibility("default"))); #endif -#if !MAC_INTERPOSE_FUNCTIONS +#if !defined(__APPLE__) # define PTR_TO_REAL(x) real_##x # define REAL(x) __interception::PTR_TO_REAL(x) # define FUNC_TYPE(x) x##_f @@ -132,11 +150,11 @@ typedef __sanitizer::u64 OFF64_T; namespace __interception { \ extern FUNC_TYPE(func) PTR_TO_REAL(func); \ } -#else // MAC_INTERPOSE_FUNCTIONS +#else // __APPLE__ # define REAL(x) x # define DECLARE_REAL(ret_type, func, ...) \ extern "C" ret_type func(__VA_ARGS__); -#endif // MAC_INTERPOSE_FUNCTIONS +#endif // __APPLE__ #define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \ DECLARE_REAL(ret_type, func, __VA_ARGS__) \ @@ -146,7 +164,7 @@ typedef __sanitizer::u64 OFF64_T; // macros does its job. In exceptional cases you may need to call REAL(foo) // without defining INTERCEPTOR(..., foo, ...). For example, if you override // foo with an interceptor for other function. -#if !MAC_INTERPOSE_FUNCTIONS +#if !defined(__APPLE__) # define DEFINE_REAL(ret_type, func, ...) \ typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \ namespace __interception { \ @@ -156,6 +174,7 @@ typedef __sanitizer::u64 OFF64_T; # define DEFINE_REAL(ret_type, func, ...) #endif +#if !defined(__APPLE__) #define INTERCEPTOR(ret_type, func, ...) \ DEFINE_REAL(ret_type, func, __VA_ARGS__) \ DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \ @@ -163,6 +182,29 @@ typedef __sanitizer::u64 OFF64_T; INTERCEPTOR_ATTRIBUTE \ ret_type WRAP(func)(__VA_ARGS__) +// We don't need INTERCEPTOR_WITH_SUFFIX on non-Darwin for now. +#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \ + INTERCEPTOR(ret_type, func, __VA_ARGS__) + +#else // __APPLE__ + +#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \ + extern "C" ret_type func(__VA_ARGS__) suffix; \ + extern "C" ret_type WRAP(func)(__VA_ARGS__); \ + INTERPOSER(func); \ + extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__) + +#define INTERCEPTOR(ret_type, func, ...) \ + INTERCEPTOR_ZZZ(/*no symbol variants*/, ret_type, func, __VA_ARGS__) + +#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \ + INTERCEPTOR_ZZZ(__DARWIN_ALIAS_C(func), ret_type, func, __VA_ARGS__) + +// Override |overridee| with |overrider|. +#define OVERRIDE_FUNCTION(overridee, overrider) \ + INTERPOSER_2(overridee, WRAP(overrider)) +#endif + #if defined(_WIN32) # define INTERCEPTOR_WINAPI(ret_type, func, ...) \ typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \ @@ -195,8 +237,6 @@ typedef unsigned long uptr; // NOLINT # define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX(func) #elif defined(__APPLE__) # include "interception_mac.h" -# define OVERRIDE_FUNCTION(old_func, new_func) \ - OVERRIDE_FUNCTION_MAC(old_func, new_func) # define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func) #else // defined(_WIN32) # include "interception_win.h" diff --git a/lib/interception/interception_mac.cc b/lib/interception/interception_mac.cc index 2c10a71210e9..b035cf998140 100644 --- a/lib/interception/interception_mac.cc +++ b/lib/interception/interception_mac.cc @@ -15,17 +15,6 @@ #ifdef __APPLE__ #include "interception.h" -#include "mach_override/mach_override.h" -namespace __interception { -bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) { - *orig_old_func = 0; - int res = __asan_mach_override_ptr_custom((void*)old_func, (void*)new_func, - (void**)orig_old_func, - __interception_allocate_island, - __interception_deallocate_island); - return (res == 0) && (*orig_old_func != 0); -} -} // namespace __interception #endif // __APPLE__ diff --git a/lib/interception/interception_mac.h b/lib/interception/interception_mac.h index 6e9e80817cb3..5059489831ec 100644 --- a/lib/interception/interception_mac.h +++ b/lib/interception/interception_mac.h @@ -21,29 +21,7 @@ #ifndef INTERCEPTION_MAC_H #define INTERCEPTION_MAC_H -#include <mach/mach_error.h> -#include <stddef.h> - -// Allocate memory for the escape island. This cannot be moved to -// mach_override, because each user of interceptors may specify its -// own memory range for escape islands. -extern "C" { -mach_error_t __interception_allocate_island(void **ptr, size_t unused_size, - void *unused_hint); -mach_error_t __interception_deallocate_island(void *ptr); -} // extern "C" - -namespace __interception { -// returns true if the old function existed. -bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func); -} // namespace __interception - -# define OVERRIDE_FUNCTION_MAC(old_func, new_func) \ - ::__interception::OverrideFunction( \ - (::__interception::uptr)old_func, \ - (::__interception::uptr)new_func, \ - (::__interception::uptr*)((::__interception::uptr)&REAL(old_func))) -# define INTERCEPT_FUNCTION_MAC(func) OVERRIDE_FUNCTION_MAC(func, WRAP(func)) +#define INTERCEPT_FUNCTION_MAC(func) #endif // INTERCEPTION_MAC_H #endif // __APPLE__ diff --git a/lib/interception/interception_type_test.cc b/lib/interception/interception_type_test.cc new file mode 100644 index 000000000000..7b79b783fbe9 --- /dev/null +++ b/lib/interception/interception_type_test.cc @@ -0,0 +1,39 @@ +//===-- interception_type_test.cc -------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Compile-time tests of the internal type definitions. +//===----------------------------------------------------------------------===// + +#if defined(__linux__) || defined(__APPLE__) + +#include "interception.h" +#include <sys/types.h> +#include <stddef.h> +#include <stdint.h> + +COMPILER_CHECK(sizeof(SIZE_T) == sizeof(size_t)); +COMPILER_CHECK(sizeof(SSIZE_T) == sizeof(ssize_t)); +COMPILER_CHECK(sizeof(PTRDIFF_T) == sizeof(ptrdiff_t)); +COMPILER_CHECK(sizeof(INTMAX_T) == sizeof(intmax_t)); + +#ifndef __APPLE__ +COMPILER_CHECK(sizeof(OFF64_T) == sizeof(off64_t)); +#endif + +// The following are the cases when pread (and friends) is used instead of +// pread64. In those cases we need OFF_T to match off_t. We don't care about the +// rest (they depend on _FILE_OFFSET_BITS setting when building an application). +# if defined(__ANDROID__) || !defined _FILE_OFFSET_BITS || \ + _FILE_OFFSET_BITS != 64 +COMPILER_CHECK(sizeof(OFF_T) == sizeof(off_t)); +# endif + +#endif diff --git a/lib/interception/mach_override/LICENSE.TXT b/lib/interception/mach_override/LICENSE.TXT deleted file mode 100644 index 9446965176ce..000000000000 --- a/lib/interception/mach_override/LICENSE.TXT +++ /dev/null @@ -1,3 +0,0 @@ -Copyright (c) 2003-2009 Jonathan 'Wolf' Rentzsch: <http://rentzsch.com> -Some rights reserved: <http://opensource.org/licenses/mit-license.php> - diff --git a/lib/interception/mach_override/README.txt b/lib/interception/mach_override/README.txt deleted file mode 100644 index 5f62ad7b994f..000000000000 --- a/lib/interception/mach_override/README.txt +++ /dev/null @@ -1,9 +0,0 @@ --- mach_override.c is taken from upstream version at - https://github.com/rentzsch/mach_star/tree/f8e0c424b5be5cb641ded67c265e616157ae4bcf --- Added debugging code under DEBUG_DISASM. --- The files are guarded with #ifdef __APPLE__ --- some opcodes are added in order to parse the library functions on Lion --- fixupInstructions() is extended to relocate relative calls, not only jumps --- mach_override_ptr is renamed to __asan_mach_override_ptr and - other functions are marked as hidden. - diff --git a/lib/interception/mach_override/mach_override.c b/lib/interception/mach_override/mach_override.c deleted file mode 100644 index 7511a7bebb82..000000000000 --- a/lib/interception/mach_override/mach_override.c +++ /dev/null @@ -1,970 +0,0 @@ -/******************************************************************************* - mach_override.c - Copyright (c) 2003-2009 Jonathan 'Wolf' Rentzsch: <http://rentzsch.com> - Some rights reserved: <http://opensource.org/licenses/mit-license.php> - - ***************************************************************************/ -#ifdef __APPLE__ - -#include "mach_override.h" - -#include <mach-o/dyld.h> -#include <mach/mach_host.h> -#include <mach/mach_init.h> -#include <mach/vm_map.h> -#include <sys/mman.h> - -#include <CoreServices/CoreServices.h> - -//#define DEBUG_DISASM 1 -#undef DEBUG_DISASM - -/************************** -* -* Constants -* -**************************/ -#pragma mark - -#pragma mark (Constants) - -#if defined(__ppc__) || defined(__POWERPC__) - -static -long kIslandTemplate[] = { - 0x9001FFFC, // stw r0,-4(SP) - 0x3C00DEAD, // lis r0,0xDEAD - 0x6000BEEF, // ori r0,r0,0xBEEF - 0x7C0903A6, // mtctr r0 - 0x8001FFFC, // lwz r0,-4(SP) - 0x60000000, // nop ; optionally replaced - 0x4E800420 // bctr -}; - -#define kAddressHi 3 -#define kAddressLo 5 -#define kInstructionHi 10 -#define kInstructionLo 11 - -#elif defined(__i386__) - -#define kOriginalInstructionsSize 16 - -static -unsigned char kIslandTemplate[] = { - // kOriginalInstructionsSize nop instructions so that we - // should have enough space to host original instructions - 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, - 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, - // Now the real jump instruction - 0xE9, 0xEF, 0xBE, 0xAD, 0xDE -}; - -#define kInstructions 0 -#define kJumpAddress kInstructions + kOriginalInstructionsSize + 1 -#elif defined(__x86_64__) - -#define kOriginalInstructionsSize 32 - -#define kJumpAddress kOriginalInstructionsSize + 6 - -static -unsigned char kIslandTemplate[] = { - // kOriginalInstructionsSize nop instructions so that we - // should have enough space to host original instructions - 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, - 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, - 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, - 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, - // Now the real jump instruction - 0xFF, 0x25, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00 -}; - -#endif - -#define kAllocateHigh 1 -#define kAllocateNormal 0 - -/************************** -* -* Data Types -* -**************************/ -#pragma mark - -#pragma mark (Data Types) - -typedef struct { - char instructions[sizeof(kIslandTemplate)]; - int allocatedHigh; -} BranchIsland; - -/************************** -* -* Funky Protos -* -**************************/ -#pragma mark - -#pragma mark (Funky Protos) - - - static mach_error_t -allocateBranchIsland( - BranchIsland **island, - int allocateHigh, - void *originalFunctionAddress); - - static mach_error_t -freeBranchIsland( - BranchIsland *island ); - - static mach_error_t -defaultIslandMalloc( - void **ptr, size_t unused_size, void *hint); - - static mach_error_t -defaultIslandFree( - void *ptr); - -#if defined(__ppc__) || defined(__POWERPC__) - static mach_error_t -setBranchIslandTarget( - BranchIsland *island, - const void *branchTo, - long instruction ); -#endif - -#if defined(__i386__) || defined(__x86_64__) -static mach_error_t -setBranchIslandTarget_i386( - BranchIsland *island, - const void *branchTo, - char* instructions ); -// Can't be made static because there's no C implementation for atomic_mov64 -// on i386. -void -atomic_mov64( - uint64_t *targetAddress, - uint64_t value ) __attribute__((visibility("hidden"))); - - static Boolean -eatKnownInstructions( - unsigned char *code, - uint64_t *newInstruction, - int *howManyEaten, - char *originalInstructions, - int *originalInstructionCount, - uint8_t *originalInstructionSizes ); - - static void -fixupInstructions( - void *originalFunction, - void *escapeIsland, - void *instructionsToFix, - int instructionCount, - uint8_t *instructionSizes ); - -#ifdef DEBUG_DISASM - static void -dump16Bytes( - void *ptr); -#endif // DEBUG_DISASM -#endif - -/******************************************************************************* -* -* Interface -* -*******************************************************************************/ -#pragma mark - -#pragma mark (Interface) - -#if defined(__i386__) || defined(__x86_64__) -static mach_error_t makeIslandExecutable(void *address) { - mach_error_t err = err_none; - vm_size_t pageSize; - host_page_size( mach_host_self(), &pageSize ); - uintptr_t page = (uintptr_t)address & ~(uintptr_t)(pageSize-1); - int e = err_none; - e |= mprotect((void *)page, pageSize, PROT_EXEC | PROT_READ | PROT_WRITE); - e |= msync((void *)page, pageSize, MS_INVALIDATE ); - if (e) { - err = err_cannot_override; - } - return err; -} -#endif - - static mach_error_t -defaultIslandMalloc( - void **ptr, size_t unused_size, void *hint) { - return allocateBranchIsland( (BranchIsland**)ptr, kAllocateHigh, hint ); -} - static mach_error_t -defaultIslandFree( - void *ptr) { - return freeBranchIsland(ptr); -} - - mach_error_t -__asan_mach_override_ptr( - void *originalFunctionAddress, - const void *overrideFunctionAddress, - void **originalFunctionReentryIsland ) -{ - return __asan_mach_override_ptr_custom(originalFunctionAddress, - overrideFunctionAddress, - originalFunctionReentryIsland, - defaultIslandMalloc, - defaultIslandFree); -} - - mach_error_t -__asan_mach_override_ptr_custom( - void *originalFunctionAddress, - const void *overrideFunctionAddress, - void **originalFunctionReentryIsland, - island_malloc *alloc, - island_free *dealloc) -{ - assert( originalFunctionAddress ); - assert( overrideFunctionAddress ); - - // this addresses overriding such functions as AudioOutputUnitStart() - // test with modified DefaultOutputUnit project -#if defined(__x86_64__) - for(;;){ - if(*(uint16_t*)originalFunctionAddress==0x25FF) // jmp qword near [rip+0x????????] - originalFunctionAddress=*(void**)((char*)originalFunctionAddress+6+*(int32_t *)((uint16_t*)originalFunctionAddress+1)); - else break; - } -#elif defined(__i386__) - for(;;){ - if(*(uint16_t*)originalFunctionAddress==0x25FF) // jmp *0x???????? - originalFunctionAddress=**(void***)((uint16_t*)originalFunctionAddress+1); - else break; - } -#endif -#ifdef DEBUG_DISASM - { - fprintf(stderr, "Replacing function at %p\n", originalFunctionAddress); - fprintf(stderr, "First 16 bytes of the function: "); - unsigned char *orig = (unsigned char *)originalFunctionAddress; - int i; - for (i = 0; i < 16; i++) { - fprintf(stderr, "%x ", (unsigned int) orig[i]); - } - fprintf(stderr, "\n"); - fprintf(stderr, - "To disassemble, save the following function as disas.c" - " and run:\n gcc -c disas.c && gobjdump -d disas.o\n" - "The first 16 bytes of the original function will start" - " after four nop instructions.\n"); - fprintf(stderr, "\nvoid foo() {\n asm volatile(\"nop;nop;nop;nop;\");\n"); - int j = 0; - for (j = 0; j < 2; j++) { - fprintf(stderr, " asm volatile(\".byte "); - for (i = 8 * j; i < 8 * (j+1) - 1; i++) { - fprintf(stderr, "0x%x, ", (unsigned int) orig[i]); - } - fprintf(stderr, "0x%x;\");\n", (unsigned int) orig[8 * (j+1) - 1]); - } - fprintf(stderr, "}\n\n"); - } -#endif - - long *originalFunctionPtr = (long*) originalFunctionAddress; - mach_error_t err = err_none; - -#if defined(__ppc__) || defined(__POWERPC__) - // Ensure first instruction isn't 'mfctr'. - #define kMFCTRMask 0xfc1fffff - #define kMFCTRInstruction 0x7c0903a6 - - long originalInstruction = *originalFunctionPtr; - if( !err && ((originalInstruction & kMFCTRMask) == kMFCTRInstruction) ) - err = err_cannot_override; -#elif defined(__i386__) || defined(__x86_64__) - int eatenCount = 0; - int originalInstructionCount = 0; - char originalInstructions[kOriginalInstructionsSize]; - uint8_t originalInstructionSizes[kOriginalInstructionsSize]; - uint64_t jumpRelativeInstruction = 0; // JMP - - Boolean overridePossible = eatKnownInstructions ((unsigned char *)originalFunctionPtr, - &jumpRelativeInstruction, &eatenCount, - originalInstructions, &originalInstructionCount, - originalInstructionSizes ); -#ifdef DEBUG_DISASM - if (!overridePossible) fprintf(stderr, "overridePossible = false @%d\n", __LINE__); -#endif - if (eatenCount > kOriginalInstructionsSize) { -#ifdef DEBUG_DISASM - fprintf(stderr, "Too many instructions eaten\n"); -#endif - overridePossible = false; - } - if (!overridePossible) err = err_cannot_override; - if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__); -#endif - - // Make the original function implementation writable. - if( !err ) { - err = vm_protect( mach_task_self(), - (vm_address_t) originalFunctionPtr, 8, false, - (VM_PROT_ALL | VM_PROT_COPY) ); - if( err ) - err = vm_protect( mach_task_self(), - (vm_address_t) originalFunctionPtr, 8, false, - (VM_PROT_DEFAULT | VM_PROT_COPY) ); - } - if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__); - - // Allocate and target the escape island to the overriding function. - BranchIsland *escapeIsland = NULL; - if( !err ) - err = alloc( (void**)&escapeIsland, sizeof(BranchIsland), originalFunctionAddress ); - if ( err ) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__); - -#if defined(__ppc__) || defined(__POWERPC__) - if( !err ) - err = setBranchIslandTarget( escapeIsland, overrideFunctionAddress, 0 ); - - // Build the branch absolute instruction to the escape island. - long branchAbsoluteInstruction = 0; // Set to 0 just to silence warning. - if( !err ) { - long escapeIslandAddress = ((long) escapeIsland) & 0x3FFFFFF; - branchAbsoluteInstruction = 0x48000002 | escapeIslandAddress; - } -#elif defined(__i386__) || defined(__x86_64__) - if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__); - - if( !err ) - err = setBranchIslandTarget_i386( escapeIsland, overrideFunctionAddress, 0 ); - - if (err) fprintf(stderr, "err = %x %s:%d\n", err, __FILE__, __LINE__); - // Build the jump relative instruction to the escape island -#endif - - -#if defined(__i386__) || defined(__x86_64__) - if (!err) { - uint32_t addressOffset = ((char*)escapeIsland - (char*)originalFunctionPtr - 5); - addressOffset = OSSwapInt32(addressOffset); - - jumpRelativeInstruction |= 0xE900000000000000LL; - jumpRelativeInstruction |= ((uint64_t)addressOffset & 0xffffffff) << 24; - jumpRelativeInstruction = OSSwapInt64(jumpRelativeInstruction); - } -#endif - - // Optionally allocate & return the reentry island. This may contain relocated - // jmp instructions and so has all the same addressing reachability requirements - // the escape island has to the original function, except the escape island is - // technically our original function. - BranchIsland *reentryIsland = NULL; - if( !err && originalFunctionReentryIsland ) { - err = alloc( (void**)&reentryIsland, sizeof(BranchIsland), escapeIsland); - if( !err ) - *originalFunctionReentryIsland = reentryIsland; - } - -#if defined(__ppc__) || defined(__POWERPC__) - // Atomically: - // o If the reentry island was allocated: - // o Insert the original instruction into the reentry island. - // o Target the reentry island at the 2nd instruction of the - // original function. - // o Replace the original instruction with the branch absolute. - if( !err ) { - int escapeIslandEngaged = false; - do { - if( reentryIsland ) - err = setBranchIslandTarget( reentryIsland, - (void*) (originalFunctionPtr+1), originalInstruction ); - if( !err ) { - escapeIslandEngaged = CompareAndSwap( originalInstruction, - branchAbsoluteInstruction, - (UInt32*)originalFunctionPtr ); - if( !escapeIslandEngaged ) { - // Someone replaced the instruction out from under us, - // re-read the instruction, make sure it's still not - // 'mfctr' and try again. - originalInstruction = *originalFunctionPtr; - if( (originalInstruction & kMFCTRMask) == kMFCTRInstruction) - err = err_cannot_override; - } - } - } while( !err && !escapeIslandEngaged ); - } -#elif defined(__i386__) || defined(__x86_64__) - // Atomically: - // o If the reentry island was allocated: - // o Insert the original instructions into the reentry island. - // o Target the reentry island at the first non-replaced - // instruction of the original function. - // o Replace the original first instructions with the jump relative. - // - // Note that on i386, we do not support someone else changing the code under our feet - if ( !err ) { - fixupInstructions(originalFunctionPtr, reentryIsland, originalInstructions, - originalInstructionCount, originalInstructionSizes ); - - if( reentryIsland ) - err = setBranchIslandTarget_i386( reentryIsland, - (void*) ((char *)originalFunctionPtr+eatenCount), originalInstructions ); - // try making islands executable before planting the jmp -#if defined(__x86_64__) || defined(__i386__) - if( !err ) - err = makeIslandExecutable(escapeIsland); - if( !err && reentryIsland ) - err = makeIslandExecutable(reentryIsland); -#endif - if ( !err ) - atomic_mov64((uint64_t *)originalFunctionPtr, jumpRelativeInstruction); - } -#endif - - // Clean up on error. - if( err ) { - if( reentryIsland ) - dealloc( reentryIsland ); - if( escapeIsland ) - dealloc( escapeIsland ); - } - -#ifdef DEBUG_DISASM - { - fprintf(stderr, "First 16 bytes of the function after slicing: "); - unsigned char *orig = (unsigned char *)originalFunctionAddress; - int i; - for (i = 0; i < 16; i++) { - fprintf(stderr, "%x ", (unsigned int) orig[i]); - } - fprintf(stderr, "\n"); - } -#endif - return err; -} - -/******************************************************************************* -* -* Implementation -* -*******************************************************************************/ -#pragma mark - -#pragma mark (Implementation) - -/***************************************************************************//** - Implementation: Allocates memory for a branch island. - - @param island <- The allocated island. - @param allocateHigh -> Whether to allocate the island at the end of the - address space (for use with the branch absolute - instruction). - @result <- mach_error_t - - ***************************************************************************/ - - static mach_error_t -allocateBranchIsland( - BranchIsland **island, - int allocateHigh, - void *originalFunctionAddress) -{ - assert( island ); - - mach_error_t err = err_none; - - if( allocateHigh ) { - vm_size_t pageSize; - err = host_page_size( mach_host_self(), &pageSize ); - if( !err ) { - assert( sizeof( BranchIsland ) <= pageSize ); -#if defined(__ppc__) || defined(__POWERPC__) - vm_address_t first = 0xfeffffff; - vm_address_t last = 0xfe000000 + pageSize; -#elif defined(__x86_64__) - vm_address_t first = ((uint64_t)originalFunctionAddress & ~(uint64_t)(((uint64_t)1 << 31) - 1)) | ((uint64_t)1 << 31); // start in the middle of the page? - vm_address_t last = 0x0; -#else - vm_address_t first = 0xffc00000; - vm_address_t last = 0xfffe0000; -#endif - - vm_address_t page = first; - int allocated = 0; - vm_map_t task_self = mach_task_self(); - - while( !err && !allocated && page != last ) { - - err = vm_allocate( task_self, &page, pageSize, 0 ); - if( err == err_none ) - allocated = 1; - else if( err == KERN_NO_SPACE ) { -#if defined(__x86_64__) - page -= pageSize; -#else - page += pageSize; -#endif - err = err_none; - } - } - if( allocated ) - *island = (BranchIsland*) page; - else if( !allocated && !err ) - err = KERN_NO_SPACE; - } - } else { - void *block = malloc( sizeof( BranchIsland ) ); - if( block ) - *island = block; - else - err = KERN_NO_SPACE; - } - if( !err ) - (**island).allocatedHigh = allocateHigh; - - return err; -} - -/***************************************************************************//** - Implementation: Deallocates memory for a branch island. - - @param island -> The island to deallocate. - @result <- mach_error_t - - ***************************************************************************/ - - static mach_error_t -freeBranchIsland( - BranchIsland *island ) -{ - assert( island ); - assert( (*(long*)&island->instructions[0]) == kIslandTemplate[0] ); - assert( island->allocatedHigh ); - - mach_error_t err = err_none; - - if( island->allocatedHigh ) { - vm_size_t pageSize; - err = host_page_size( mach_host_self(), &pageSize ); - if( !err ) { - assert( sizeof( BranchIsland ) <= pageSize ); - err = vm_deallocate( - mach_task_self(), - (vm_address_t) island, pageSize ); - } - } else { - free( island ); - } - - return err; -} - -/***************************************************************************//** - Implementation: Sets the branch island's target, with an optional - instruction. - - @param island -> The branch island to insert target into. - @param branchTo -> The address of the target. - @param instruction -> Optional instruction to execute prior to branch. Set - to zero for nop. - @result <- mach_error_t - - ***************************************************************************/ -#if defined(__ppc__) || defined(__POWERPC__) - static mach_error_t -setBranchIslandTarget( - BranchIsland *island, - const void *branchTo, - long instruction ) -{ - // Copy over the template code. - bcopy( kIslandTemplate, island->instructions, sizeof( kIslandTemplate ) ); - - // Fill in the address. - ((short*)island->instructions)[kAddressLo] = ((long) branchTo) & 0x0000FFFF; - ((short*)island->instructions)[kAddressHi] - = (((long) branchTo) >> 16) & 0x0000FFFF; - - // Fill in the (optional) instuction. - if( instruction != 0 ) { - ((short*)island->instructions)[kInstructionLo] - = instruction & 0x0000FFFF; - ((short*)island->instructions)[kInstructionHi] - = (instruction >> 16) & 0x0000FFFF; - } - - //MakeDataExecutable( island->instructions, sizeof( kIslandTemplate ) ); - msync( island->instructions, sizeof( kIslandTemplate ), MS_INVALIDATE ); - - return err_none; -} -#endif - -#if defined(__i386__) - static mach_error_t -setBranchIslandTarget_i386( - BranchIsland *island, - const void *branchTo, - char* instructions ) -{ - - // Copy over the template code. - bcopy( kIslandTemplate, island->instructions, sizeof( kIslandTemplate ) ); - - // copy original instructions - if (instructions) { - bcopy (instructions, island->instructions + kInstructions, kOriginalInstructionsSize); - } - - // Fill in the address. - int32_t addressOffset = (char *)branchTo - (island->instructions + kJumpAddress + 4); - *((int32_t *)(island->instructions + kJumpAddress)) = addressOffset; - - msync( island->instructions, sizeof( kIslandTemplate ), MS_INVALIDATE ); - return err_none; -} - -#elif defined(__x86_64__) -static mach_error_t -setBranchIslandTarget_i386( - BranchIsland *island, - const void *branchTo, - char* instructions ) -{ - // Copy over the template code. - bcopy( kIslandTemplate, island->instructions, sizeof( kIslandTemplate ) ); - - // Copy original instructions. - if (instructions) { - bcopy (instructions, island->instructions, kOriginalInstructionsSize); - } - - // Fill in the address. - *((uint64_t *)(island->instructions + kJumpAddress)) = (uint64_t)branchTo; - msync( island->instructions, sizeof( kIslandTemplate ), MS_INVALIDATE ); - - return err_none; -} -#endif - - -#if defined(__i386__) || defined(__x86_64__) -// simplistic instruction matching -typedef struct { - unsigned int length; // max 15 - unsigned char mask[15]; // sequence of bytes in memory order - unsigned char constraint[15]; // sequence of bytes in memory order -} AsmInstructionMatch; - -#if defined(__i386__) -static AsmInstructionMatch possibleInstructions[] = { - { 0x5, {0xFF, 0x00, 0x00, 0x00, 0x00}, {0xE9, 0x00, 0x00, 0x00, 0x00} }, // jmp 0x???????? - { 0x5, {0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, {0x55, 0x89, 0xe5, 0xc9, 0xc3} }, // push %esp; mov %esp,%ebp; leave; ret - { 0x1, {0xFF}, {0x90} }, // nop - { 0x1, {0xF8}, {0x50} }, // push %reg - { 0x2, {0xFF, 0xFF}, {0x89, 0xE5} }, // mov %esp,%ebp - { 0x3, {0xFF, 0xFF, 0xFF}, {0x89, 0x1C, 0x24} }, // mov %ebx,(%esp) - { 0x3, {0xFF, 0xFF, 0x00}, {0x83, 0xEC, 0x00} }, // sub 0x??, %esp - { 0x6, {0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00}, {0x81, 0xEC, 0x00, 0x00, 0x00, 0x00} }, // sub 0x??, %esp with 32bit immediate - { 0x2, {0xFF, 0xFF}, {0x31, 0xC0} }, // xor %eax, %eax - { 0x3, {0xFF, 0x4F, 0x00}, {0x8B, 0x45, 0x00} }, // mov $imm(%ebp), %reg - { 0x3, {0xFF, 0x4C, 0x00}, {0x8B, 0x40, 0x00} }, // mov $imm(%eax-%edx), %reg - { 0x3, {0xFF, 0xCF, 0x00}, {0x8B, 0x4D, 0x00} }, // mov $imm(%rpb), %reg - { 0x3, {0xFF, 0x4F, 0x00}, {0x8A, 0x4D, 0x00} }, // mov $imm(%ebp), %cl - { 0x4, {0xFF, 0xFF, 0xFF, 0x00}, {0x8B, 0x4C, 0x24, 0x00} }, // mov $imm(%esp), %ecx - { 0x4, {0xFF, 0x00, 0x00, 0x00}, {0x8B, 0x00, 0x00, 0x00} }, // mov r16,r/m16 or r32,r/m32 - { 0x5, {0xFF, 0x00, 0x00, 0x00, 0x00}, {0xB9, 0x00, 0x00, 0x00, 0x00} }, // mov $imm, %ecx - { 0x5, {0xFF, 0x00, 0x00, 0x00, 0x00}, {0xB8, 0x00, 0x00, 0x00, 0x00} }, // mov $imm, %eax - { 0x4, {0xFF, 0xFF, 0xFF, 0x00}, {0x66, 0x0F, 0xEF, 0x00} }, // pxor xmm2/128, xmm1 - { 0x2, {0xFF, 0xFF}, {0xDB, 0xE3} }, // fninit - { 0x5, {0xFF, 0x00, 0x00, 0x00, 0x00}, {0xE8, 0x00, 0x00, 0x00, 0x00} }, // call $imm - { 0x4, {0xFF, 0xFF, 0xFF, 0x00}, {0x0F, 0xBE, 0x55, 0x00} }, // movsbl $imm(%ebp), %edx - { 0x0, {0x00}, {0x00} } -}; -#elif defined(__x86_64__) -// TODO(glider): disassembling the "0x48, 0x89" sequences is trickier than it's done below. -// If it stops working, refer to http://ref.x86asm.net/geek.html#modrm_byte_32_64 to do it -// more accurately. -// Note: 0x48 is in fact the REX.W prefix, but it might be wrong to treat it as a separate -// instruction. -static AsmInstructionMatch possibleInstructions[] = { - { 0x5, {0xFF, 0x00, 0x00, 0x00, 0x00}, {0xE9, 0x00, 0x00, 0x00, 0x00} }, // jmp 0x???????? - { 0x1, {0xFF}, {0x90} }, // nop - { 0x1, {0xF8}, {0x50} }, // push %rX - { 0x1, {0xFF}, {0x65} }, // GS prefix - { 0x3, {0xFF, 0xFF, 0xFF}, {0x48, 0x89, 0xE5} }, // mov %rsp,%rbp - { 0x4, {0xFF, 0xFF, 0xFF, 0x00}, {0x48, 0x83, 0xEC, 0x00} }, // sub 0x??, %rsp - { 0x4, {0xFB, 0xFF, 0x07, 0x00}, {0x48, 0x89, 0x05, 0x00} }, // move onto rbp - { 0x3, {0xFB, 0xFF, 0x00}, {0x48, 0x89, 0x00} }, // mov %reg, %reg - { 0x3, {0xFB, 0xFF, 0x00}, {0x49, 0x89, 0x00} }, // mov %reg, %reg (REX.WB) - { 0x2, {0xFF, 0x00}, {0x41, 0x00} }, // push %rXX - { 0x2, {0xFF, 0x00}, {0x84, 0x00} }, // test %rX8,%rX8 - { 0x2, {0xFF, 0x00}, {0x85, 0x00} }, // test %rX,%rX - { 0x2, {0xFF, 0x00}, {0x77, 0x00} }, // ja $i8 - { 0x2, {0xFF, 0x00}, {0x74, 0x00} }, // je $i8 - { 0x5, {0xF8, 0x00, 0x00, 0x00, 0x00}, {0xB8, 0x00, 0x00, 0x00, 0x00} }, // mov $imm, %reg - { 0x3, {0xFF, 0xFF, 0x00}, {0xFF, 0x77, 0x00} }, // pushq $imm(%rdi) - { 0x2, {0xFF, 0xFF}, {0x31, 0xC0} }, // xor %eax, %eax - { 0x5, {0xFF, 0x00, 0x00, 0x00, 0x00}, {0x25, 0x00, 0x00, 0x00, 0x00} }, // and $imm, %eax - { 0x3, {0xFF, 0xFF, 0xFF}, {0x80, 0x3F, 0x00} }, // cmpb $imm, (%rdi) - - { 0x8, {0xFF, 0xFF, 0xCF, 0xFF, 0x00, 0x00, 0x00, 0x00}, - {0x48, 0x8B, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00}, }, // mov $imm, %{rax,rdx,rsp,rsi} - { 0x4, {0xFF, 0xFF, 0xFF, 0x00}, {0x48, 0x83, 0xFA, 0x00}, }, // cmp $i8, %rdx - { 0x4, {0xFF, 0xFF, 0x00, 0x00}, {0x83, 0x7f, 0x00, 0x00}, }, // cmpl $imm, $imm(%rdi) - { 0xa, {0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - {0x48, 0xB8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, // mov $imm, %rax - { 0x6, {0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00}, - {0x81, 0xE6, 0x00, 0x00, 0x00, 0x00} }, // and $imm, %esi - { 0x6, {0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00}, - {0xFF, 0x25, 0x00, 0x00, 0x00, 0x00} }, // jmpq *(%rip) - { 0x4, {0xFF, 0xFF, 0xFF, 0x00}, {0x66, 0x0F, 0xEF, 0x00} }, // pxor xmm2/128, xmm1 - { 0x2, {0xFF, 0x00}, {0x89, 0x00} }, // mov r/m32,r32 or r/m16,r16 - { 0x3, {0xFF, 0xFF, 0xFF}, {0x49, 0x89, 0xF8} }, // mov %rdi,%r8 - { 0x4, {0xFF, 0xFF, 0xFF, 0xFF}, {0x40, 0x0F, 0xBE, 0xCE} }, // movsbl %sil,%ecx - { 0x7, {0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00}, - {0x48, 0x8D, 0x05, 0x00, 0x00, 0x00, 0x00} }, // lea $imm(%rip),%rax - { 0x3, {0xFF, 0xFF, 0xFF}, {0x0F, 0xBE, 0xCE} }, // movsbl, %dh, %ecx - { 0x3, {0xFF, 0xFF, 0x00}, {0xFF, 0x77, 0x00} }, // pushq $imm(%rdi) - { 0x2, {0xFF, 0xFF}, {0xDB, 0xE3} }, // fninit - { 0x3, {0xFF, 0xFF, 0xFF}, {0x48, 0x85, 0xD2} }, // test %rdx,%rdx - { 0x0, {0x00}, {0x00} } -}; -#endif - -static Boolean codeMatchesInstruction(unsigned char *code, AsmInstructionMatch* instruction) -{ - Boolean match = true; - - size_t i; - assert(instruction); -#ifdef DEBUG_DISASM - fprintf(stderr, "Matching: "); -#endif - for (i=0; i<instruction->length; i++) { - unsigned char mask = instruction->mask[i]; - unsigned char constraint = instruction->constraint[i]; - unsigned char codeValue = code[i]; -#ifdef DEBUG_DISASM - fprintf(stderr, "%x ", (unsigned)codeValue); -#endif - match = ((codeValue & mask) == constraint); - if (!match) break; - } -#ifdef DEBUG_DISASM - if (match) { - fprintf(stderr, " OK\n"); - } else { - fprintf(stderr, " FAIL\n"); - } -#endif - return match; -} - -#if defined(__i386__) || defined(__x86_64__) - static Boolean -eatKnownInstructions( - unsigned char *code, - uint64_t *newInstruction, - int *howManyEaten, - char *originalInstructions, - int *originalInstructionCount, - uint8_t *originalInstructionSizes ) -{ - Boolean allInstructionsKnown = true; - int totalEaten = 0; - unsigned char* ptr = code; - int remainsToEat = 5; // a JMP instruction takes 5 bytes - int instructionIndex = 0; - - if (howManyEaten) *howManyEaten = 0; - if (originalInstructionCount) *originalInstructionCount = 0; - while (remainsToEat > 0) { - Boolean curInstructionKnown = false; - - // See if instruction matches one we know - AsmInstructionMatch* curInstr = possibleInstructions; - do { - if ((curInstructionKnown = codeMatchesInstruction(ptr, curInstr))) break; - curInstr++; - } while (curInstr->length > 0); - - // if all instruction matches failed, we don't know current instruction then, stop here - if (!curInstructionKnown) { - allInstructionsKnown = false; - fprintf(stderr, "mach_override: some instructions unknown! Need to update mach_override.c\n"); - break; - } - - // At this point, we've matched curInstr - int eaten = curInstr->length; - ptr += eaten; - remainsToEat -= eaten; - totalEaten += eaten; - - if (originalInstructionSizes) originalInstructionSizes[instructionIndex] = eaten; - instructionIndex += 1; - if (originalInstructionCount) *originalInstructionCount = instructionIndex; - } - - - if (howManyEaten) *howManyEaten = totalEaten; - - if (originalInstructions) { - Boolean enoughSpaceForOriginalInstructions = (totalEaten < kOriginalInstructionsSize); - - if (enoughSpaceForOriginalInstructions) { - memset(originalInstructions, 0x90 /* NOP */, kOriginalInstructionsSize); // fill instructions with NOP - bcopy(code, originalInstructions, totalEaten); - } else { -#ifdef DEBUG_DISASM - fprintf(stderr, "Not enough space in island to store original instructions. Adapt the island definition and kOriginalInstructionsSize\n"); -#endif - return false; - } - } - - if (allInstructionsKnown) { - // save last 3 bytes of first 64bits of codre we'll replace - uint64_t currentFirst64BitsOfCode = *((uint64_t *)code); - currentFirst64BitsOfCode = OSSwapInt64(currentFirst64BitsOfCode); // back to memory representation - currentFirst64BitsOfCode &= 0x0000000000FFFFFFLL; - - // keep only last 3 instructions bytes, first 5 will be replaced by JMP instr - *newInstruction &= 0xFFFFFFFFFF000000LL; // clear last 3 bytes - *newInstruction |= (currentFirst64BitsOfCode & 0x0000000000FFFFFFLL); // set last 3 bytes - } - - return allInstructionsKnown; -} - - static void -fixupInstructions( - void *originalFunction, - void *escapeIsland, - void *instructionsToFix, - int instructionCount, - uint8_t *instructionSizes ) -{ - void *initialOriginalFunction = originalFunction; - int index, fixed_size, code_size = 0; - for (index = 0;index < instructionCount;index += 1) - code_size += instructionSizes[index]; - -#ifdef DEBUG_DISASM - void *initialInstructionsToFix = instructionsToFix; - fprintf(stderr, "BEFORE FIXING:\n"); - dump16Bytes(initialOriginalFunction); - dump16Bytes(initialInstructionsToFix); -#endif // DEBUG_DISASM - - for (index = 0;index < instructionCount;index += 1) - { - fixed_size = instructionSizes[index]; - if ((*(uint8_t*)instructionsToFix == 0xE9) || // 32-bit jump relative - (*(uint8_t*)instructionsToFix == 0xE8)) // 32-bit call relative - { - uint32_t offset = (uintptr_t)originalFunction - (uintptr_t)escapeIsland; - uint32_t *jumpOffsetPtr = (uint32_t*)((uintptr_t)instructionsToFix + 1); - *jumpOffsetPtr += offset; - } - if ((*(uint8_t*)instructionsToFix == 0x74) || // Near jump if equal (je), 2 bytes. - (*(uint8_t*)instructionsToFix == 0x77)) // Near jump if above (ja), 2 bytes. - { - // We replace a near je/ja instruction, "7P JJ", with a 32-bit je/ja, "0F 8P WW XX YY ZZ". - // This is critical, otherwise a near jump will likely fall outside the original function. - uint32_t offset = (uintptr_t)initialOriginalFunction - (uintptr_t)escapeIsland; - uint32_t jumpOffset = *(uint8_t*)((uintptr_t)instructionsToFix + 1); - *((uint8_t*)instructionsToFix + 1) = *(uint8_t*)instructionsToFix + 0x10; - *(uint8_t*)instructionsToFix = 0x0F; - uint32_t *jumpOffsetPtr = (uint32_t*)((uintptr_t)instructionsToFix + 2 ); - *jumpOffsetPtr = offset + jumpOffset; - fixed_size = 6; - } - - originalFunction = (void*)((uintptr_t)originalFunction + instructionSizes[index]); - escapeIsland = (void*)((uintptr_t)escapeIsland + instructionSizes[index]); - instructionsToFix = (void*)((uintptr_t)instructionsToFix + fixed_size); - - // Expanding short instructions into longer ones may overwrite the next instructions, - // so we must restore them. - code_size -= fixed_size; - if ((code_size > 0) && (fixed_size != instructionSizes[index])) { - bcopy(originalFunction, instructionsToFix, code_size); - } - } -#ifdef DEBUG_DISASM - fprintf(stderr, "AFTER_FIXING:\n"); - dump16Bytes(initialOriginalFunction); - dump16Bytes(initialInstructionsToFix); -#endif // DEBUG_DISASM -} - -#ifdef DEBUG_DISASM -#define HEX_DIGIT(x) ((((x) % 16) < 10) ? ('0' + ((x) % 16)) : ('A' + ((x) % 16 - 10))) - - static void -dump16Bytes( - void *ptr) { - int i; - char buf[3]; - uint8_t *bytes = (uint8_t*)ptr; - for (i = 0; i < 16; i++) { - buf[0] = HEX_DIGIT(bytes[i] / 16); - buf[1] = HEX_DIGIT(bytes[i] % 16); - buf[2] = ' '; - write(2, buf, 3); - } - write(2, "\n", 1); -} -#endif // DEBUG_DISASM -#endif - -#if defined(__i386__) -__asm( - ".text;" - ".align 2, 0x90;" - "_atomic_mov64:;" - " pushl %ebp;" - " movl %esp, %ebp;" - " pushl %esi;" - " pushl %ebx;" - " pushl %ecx;" - " pushl %eax;" - " pushl %edx;" - - // atomic push of value to an address - // we use cmpxchg8b, which compares content of an address with - // edx:eax. If they are equal, it atomically puts 64bit value - // ecx:ebx in address. - // We thus put contents of address in edx:eax to force ecx:ebx - // in address - " mov 8(%ebp), %esi;" // esi contains target address - " mov 12(%ebp), %ebx;" - " mov 16(%ebp), %ecx;" // ecx:ebx now contains value to put in target address - " mov (%esi), %eax;" - " mov 4(%esi), %edx;" // edx:eax now contains value currently contained in target address - " lock; cmpxchg8b (%esi);" // atomic move. - - // restore registers - " popl %edx;" - " popl %eax;" - " popl %ecx;" - " popl %ebx;" - " popl %esi;" - " popl %ebp;" - " ret" -); -#elif defined(__x86_64__) -void atomic_mov64( - uint64_t *targetAddress, - uint64_t value ) -{ - *targetAddress = value; -} -#endif -#endif -#endif // __APPLE__ diff --git a/lib/interception/mach_override/mach_override.h b/lib/interception/mach_override/mach_override.h deleted file mode 100644 index 7e60cdcd619c..000000000000 --- a/lib/interception/mach_override/mach_override.h +++ /dev/null @@ -1,140 +0,0 @@ -/******************************************************************************* - mach_override.h - Copyright (c) 2003-2009 Jonathan 'Wolf' Rentzsch: <http://rentzsch.com> - Some rights reserved: <http://opensource.org/licenses/mit-license.php> - - ***************************************************************************/ - -/***************************************************************************//** - @mainpage mach_override - @author Jonathan 'Wolf' Rentzsch: <http://rentzsch.com> - - This package, coded in C to the Mach API, allows you to override ("patch") - program- and system-supplied functions at runtime. You can fully replace - functions with your implementations, or merely head- or tail-patch the - original implementations. - - Use it by #include'ing mach_override.h from your .c, .m or .mm file(s). - - @todo Discontinue use of Carbon's MakeDataExecutable() and - CompareAndSwap() calls and start using the Mach equivalents, if they - exist. If they don't, write them and roll them in. That way, this - code will be pure Mach, which will make it easier to use everywhere. - Update: MakeDataExecutable() has been replaced by - msync(MS_INVALIDATE). There is an OSCompareAndSwap in libkern, but - I'm currently unsure if I can link against it. May have to roll in - my own version... - @todo Stop using an entire 4K high-allocated VM page per 28-byte escape - branch island. Done right, this will dramatically speed up escape - island allocations when they number over 250. Then again, if you're - overriding more than 250 functions, maybe speed isn't your main - concern... - @todo Add detection of: b, bl, bla, bc, bcl, bcla, bcctrl, bclrl - first-instructions. Initially, we should refuse to override - functions beginning with these instructions. Eventually, we should - dynamically rewrite them to make them position-independent. - @todo Write mach_unoverride(), which would remove an override placed on a - function. Must be multiple-override aware, which means an almost - complete rewrite under the covers, because the target address can't - be spread across two load instructions like it is now since it will - need to be atomically updatable. - @todo Add non-rentry variants of overrides to test_mach_override. - - ***************************************************************************/ - -#ifdef __APPLE__ - -#ifndef _mach_override_ -#define _mach_override_ - -#include <sys/types.h> -#include <mach/error.h> - -#ifdef __cplusplus - extern "C" { -#endif - -/** - Returned if the function to be overrided begins with a 'mfctr' instruction. -*/ -#define err_cannot_override (err_local|1) - -/************************************************************************************//** - Dynamically overrides the function implementation referenced by - originalFunctionAddress with the implentation pointed to by overrideFunctionAddress. - Optionally returns a pointer to a "reentry island" which, if jumped to, will resume - the original implementation. - - @param originalFunctionAddress -> Required address of the function to - override (with overrideFunctionAddress). - @param overrideFunctionAddress -> Required address to the overriding - function. - @param originalFunctionReentryIsland <- Optional pointer to pointer to the - reentry island. Can be NULL. - @result <- err_cannot_override if the original - function's implementation begins with - the 'mfctr' instruction. - - ************************************************************************************/ - -// We're prefixing mach_override_ptr() with "__asan_" to avoid name conflicts with other -// mach_override_ptr() implementations that may appear in the client program. - mach_error_t -__asan_mach_override_ptr( - void *originalFunctionAddress, - const void *overrideFunctionAddress, - void **originalFunctionReentryIsland ); - -// Allow to use custom allocation and deallocation routines with mach_override_ptr(). -// This should help to speed up the things on x86_64. -typedef mach_error_t island_malloc( void **ptr, size_t size, void *hint ); -typedef mach_error_t island_free( void *ptr ); - - mach_error_t -__asan_mach_override_ptr_custom( - void *originalFunctionAddress, - const void *overrideFunctionAddress, - void **originalFunctionReentryIsland, - island_malloc *alloc, - island_free *dealloc ); - -/************************************************************************************//** - - - ************************************************************************************/ - -#ifdef __cplusplus - -#define MACH_OVERRIDE( ORIGINAL_FUNCTION_RETURN_TYPE, ORIGINAL_FUNCTION_NAME, ORIGINAL_FUNCTION_ARGS, ERR ) \ - { \ - static ORIGINAL_FUNCTION_RETURN_TYPE (*ORIGINAL_FUNCTION_NAME##_reenter)ORIGINAL_FUNCTION_ARGS; \ - static bool ORIGINAL_FUNCTION_NAME##_overriden = false; \ - class mach_override_class__##ORIGINAL_FUNCTION_NAME { \ - public: \ - static kern_return_t override(void *originalFunctionPtr) { \ - kern_return_t result = err_none; \ - if (!ORIGINAL_FUNCTION_NAME##_overriden) { \ - ORIGINAL_FUNCTION_NAME##_overriden = true; \ - result = mach_override_ptr( (void*)originalFunctionPtr, \ - (void*)mach_override_class__##ORIGINAL_FUNCTION_NAME::replacement, \ - (void**)&ORIGINAL_FUNCTION_NAME##_reenter ); \ - } \ - return result; \ - } \ - static ORIGINAL_FUNCTION_RETURN_TYPE replacement ORIGINAL_FUNCTION_ARGS { - -#define END_MACH_OVERRIDE( ORIGINAL_FUNCTION_NAME ) \ - } \ - }; \ - \ - err = mach_override_class__##ORIGINAL_FUNCTION_NAME::override((void*)ORIGINAL_FUNCTION_NAME); \ - } - -#endif - -#ifdef __cplusplus - } -#endif -#endif // _mach_override_ - -#endif // __APPLE__ diff --git a/lib/lit.common.cfg b/lib/lit.common.cfg index 428554ac77a2..b410259a9e71 100644 --- a/lib/lit.common.cfg +++ b/lib/lit.common.cfg @@ -52,3 +52,9 @@ config.substitutions.append( ("%clangxx ", (" " + config.clang + config.substitutions.append( (' clang', """\n\n*** Do not use 'clangXXX' in tests, instead define '%clangXXX' substitution in lit config. ***\n\n""") ) + +# Add supported compiler_rt architectures to a list of available features. +compiler_rt_arch = getattr(config, 'compiler_rt_arch', None) +if compiler_rt_arch: + for arch in compiler_rt_arch.split(";"): + config.available_features.add(arch + "-supported-target") diff --git a/lib/lit.common.unit.cfg b/lib/lit.common.unit.cfg index 8250b4a829c6..ca00abb65e9f 100644 --- a/lib/lit.common.unit.cfg +++ b/lib/lit.common.unit.cfg @@ -7,8 +7,8 @@ import os # Setup test format -build_type = getattr(config, "build_type", "Debug") -config.test_format = lit.formats.GoogleTest(build_type, "Test") +llvm_build_mode = getattr(config, "llvm_build_mode", "Debug") +config.test_format = lit.formats.GoogleTest(llvm_build_mode, "Test") # Setup test suffixes. config.suffixes = [] diff --git a/lib/lsan/CMakeLists.txt b/lib/lsan/CMakeLists.txt new file mode 100644 index 000000000000..378f08199218 --- /dev/null +++ b/lib/lsan/CMakeLists.txt @@ -0,0 +1,51 @@ +include_directories(..) + +set(LSAN_CFLAGS + ${SANITIZER_COMMON_CFLAGS}) + +set(LSAN_COMMON_SOURCES + lsan_common.cc + lsan_common_linux.cc) + +set(LSAN_SOURCES + lsan_interceptors.cc + lsan_allocator.cc + lsan_thread.cc + lsan.cc) + +set(LSAN_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}) + +# The common files need to build on every arch supported by ASan. +# (Even if they build into dummy object files.) +filter_available_targets(LSAN_COMMON_SUPPORTED_ARCH + x86_64 i386 powerpc64 powerpc) + +# Architectures supported by the standalone LSan. +filter_available_targets(LSAN_SUPPORTED_ARCH + x86_64) + +set(LSAN_RUNTIME_LIBRARIES) + +if (NOT APPLE AND NOT ANDROID) + foreach(arch ${LSAN_COMMON_SUPPORTED_ARCH}) + add_compiler_rt_object_library(RTLSanCommon ${arch} + SOURCES ${LSAN_COMMON_SOURCES} + CFLAGS ${LSAN_CFLAGS}) + endforeach() + + foreach(arch ${LSAN_SUPPORTED_ARCH}) + add_compiler_rt_static_runtime(clang_rt.lsan-${arch} ${arch} + SOURCES ${LSAN_SOURCES} + $<TARGET_OBJECTS:RTInterception.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + $<TARGET_OBJECTS:RTLSanCommon.${arch}> + CFLAGS ${LSAN_CFLAGS}) + list(APPEND LSAN_RUNTIME_LIBRARIES clang_rt.lsan-${arch}) + endforeach() +endif() + +if (LLVM_INCLUDE_TESTS) + add_subdirectory(tests) +endif() +add_subdirectory(lit_tests) diff --git a/lib/interception/mach_override/Makefile.mk b/lib/lsan/Makefile.mk index 8f5ebdab1bf2..aae5c32fd32f 100644 --- a/lib/interception/mach_override/Makefile.mk +++ b/lib/lsan/Makefile.mk @@ -1,4 +1,4 @@ -#===- lib/interception/mach_override/Makefile.mk -----------*- Makefile -*--===# +#===- lib/lsan/Makefile.mk ---------------------------------*- Makefile -*--===# # # The LLVM Compiler Infrastructure # @@ -7,16 +7,17 @@ # #===------------------------------------------------------------------------===# -ModuleName := interception -SubDirs := +ModuleName := lsan_common +SubDirs := -Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file))) -ObjNames := $(Sources:%.c=%.o) +Sources := $(foreach file,$(wildcard $(Dir)/lsan_common*.cc),$(notdir $(file))) +ObjNames := $(Sources:%.cc=%.o) Implementation := Generic # FIXME: use automatic dependencies? Dependencies := $(wildcard $(Dir)/*.h) +Dependencies += $(wildcard $(Dir)/../sanitizer_common/*.h) -# Define a convenience variable for all the interception functions. -InterceptionFunctions += $(Sources:%.c=%) +# Define a convenience variable for all the asan functions. +LsanCommonFunctions := $(Sources:%.cc=%) diff --git a/lib/lsan/lit_tests/CMakeLists.txt b/lib/lsan/lit_tests/CMakeLists.txt new file mode 100644 index 000000000000..e1be508202b8 --- /dev/null +++ b/lib/lsan/lit_tests/CMakeLists.txt @@ -0,0 +1,28 @@ +set(LSAN_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/..) +set(LSAN_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/..) + +configure_lit_site_cfg( + ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in + ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg + ) + +configure_lit_site_cfg( + ${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.site.cfg.in + ${CMAKE_CURRENT_BINARY_DIR}/Unit/lit.site.cfg + ) + +if(COMPILER_RT_CAN_EXECUTE_TESTS) + set(LSAN_TEST_DEPS + ${SANITIZER_COMMON_LIT_TEST_DEPS} + ${LSAN_RUNTIME_LIBRARIES}) + set(LSAN_TEST_PARAMS + lsan_site_config=${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg) + if(LLVM_INCLUDE_TESTS) + list(APPEND LSAN_TEST_DEPS LsanTests) + endif() + add_lit_testsuite(check-lsan "Running the LeakSanitizer tests" + ${CMAKE_CURRENT_BINARY_DIR} + PARAMS ${LSAN_TEST_PARAMS} + DEPENDS ${LSAN_TEST_DEPS}) + set_target_properties(check-lsan PROPERTIES FOLDER "LSan tests") +endif() diff --git a/lib/lsan/lit_tests/SharedLibs/lit.local.cfg b/lib/lsan/lit_tests/SharedLibs/lit.local.cfg new file mode 100644 index 000000000000..b3677c17a0f2 --- /dev/null +++ b/lib/lsan/lit_tests/SharedLibs/lit.local.cfg @@ -0,0 +1,4 @@ +# Sources in this directory are compiled as shared libraries and used by +# tests in parent directory. + +config.suffixes = [] diff --git a/lib/msan/tests/lit.cfg b/lib/lsan/lit_tests/Unit/lit.cfg index 38aa1380443f..bcd1de4477f1 100644 --- a/lib/msan/tests/lit.cfg +++ b/lib/lsan/lit_tests/Unit/lit.cfg @@ -11,19 +11,16 @@ def get_required_attr(config, attr_name): return attr_value # Setup attributes common for all compiler-rt projects. -llvm_src_root = get_required_attr(config, 'llvm_src_root') -compiler_rt_lit_unit_cfg = os.path.join(llvm_src_root, "projects", - "compiler-rt", "lib", +compiler_rt_src_root = get_required_attr(config, 'compiler_rt_src_root') +compiler_rt_lit_unit_cfg = os.path.join(compiler_rt_src_root, "lib", "lit.common.unit.cfg") lit.load_config(config, compiler_rt_lit_unit_cfg) # Setup config name. -config.name = 'MemorySanitizer' +config.name = 'LeakSanitizer-Unit' # Setup test source and exec root. For unit tests, we define -# it as build directory with sanitizer_common unit tests. -llvm_obj_root = get_required_attr(config, "llvm_obj_root") -config.test_exec_root = os.path.join(llvm_obj_root, "projects", - "compiler-rt", "lib", - "msan", "tests") +# it as build directory with LSan unit tests. +lsan_binary_dir = get_required_attr(config, "lsan_binary_dir") +config.test_exec_root = os.path.join(lsan_binary_dir, "tests") config.test_source_root = config.test_exec_root diff --git a/lib/lsan/lit_tests/Unit/lit.site.cfg.in b/lib/lsan/lit_tests/Unit/lit.site.cfg.in new file mode 100644 index 000000000000..90c88c952156 --- /dev/null +++ b/lib/lsan/lit_tests/Unit/lit.site.cfg.in @@ -0,0 +1,17 @@ +## Autogenerated by LLVM/Clang configuration. +# Do not edit! + +config.target_triple = "@TARGET_TRIPLE@" +config.llvm_src_root = "@LLVM_SOURCE_DIR@" +config.compiler_rt_src_root = "@COMPILER_RT_SOURCE_DIR@" +config.llvm_build_mode = "@LLVM_BUILD_MODE@" +config.lsan_binary_dir = "@LSAN_BINARY_DIR@" + +try: + config.llvm_build_mode = config.llvm_build_mode % lit.params +except KeyError,e: + key, = e.args + lit.fatal("unable to find %r parameter, use '--param=%s=VALUE'" % (key, key)) + +# Let the main config do the real work. +lit.load_config(config, "@LSAN_SOURCE_DIR@/lit_tests/Unit/lit.cfg") diff --git a/lib/lsan/lit_tests/lit.cfg b/lib/lsan/lit_tests/lit.cfg new file mode 100644 index 000000000000..48e1453334d0 --- /dev/null +++ b/lib/lsan/lit_tests/lit.cfg @@ -0,0 +1,50 @@ +# -*- Python -*- + +import os + +def get_required_attr(config, attr_name): + attr_value = getattr(config, attr_name, None) + if not attr_value: + lit.fatal("No attribute %r in test configuration! You may need to run " + "tests from your build directory or add this attribute " + "to lit.site.cfg " % attr_name) + return attr_value + +# Setup attributes common for all compiler-rt projects. +compiler_rt_src_root = get_required_attr(config, 'compiler_rt_src_root') +compiler_rt_lit_unit_cfg = os.path.join(compiler_rt_src_root, "lib", + "lit.common.unit.cfg") +lit.load_config(config, compiler_rt_lit_unit_cfg) + +# Setup config name. +config.name = 'LeakSanitizer' + +# Setup source root. +config.test_source_root = os.path.dirname(__file__) + +# Setup attributes common for all compiler-rt projects. +compiler_rt_lit_cfg = os.path.join(compiler_rt_src_root, "lib", + "lit.common.cfg") +if (not compiler_rt_lit_cfg) or (not os.path.exists(compiler_rt_lit_cfg)): + lit.fatal("Can't find common compiler-rt lit config at: %r" + % compiler_rt_lit_cfg) +lit.load_config(config, compiler_rt_lit_cfg) + +clang_cxxflags = ("-ccc-cxx " + + "-g " + + "-O0 " + + "-m64 ") + +clang_lsan_cxxflags = clang_cxxflags + "-fsanitize=leak " + +config.substitutions.append( ("%clangxx ", (" " + config.clang + " " + + clang_cxxflags + " ")) ) +config.substitutions.append( ("%clangxx_lsan ", (" " + config.clang + " " + + clang_lsan_cxxflags + " ")) ) + +# Default test suffixes. +config.suffixes = ['.c', '.cc', '.cpp'] + +# LeakSanitizer tests are currently supported on x86-64 Linux only. +if config.host_os not in ['Linux'] or config.host_arch not in ['x86_64']: + config.unsupported = True diff --git a/lib/lsan/lit_tests/lit.site.cfg.in b/lib/lsan/lit_tests/lit.site.cfg.in new file mode 100644 index 000000000000..3de98a9811f8 --- /dev/null +++ b/lib/lsan/lit_tests/lit.site.cfg.in @@ -0,0 +1,20 @@ +config.host_os = "@HOST_OS@" +config.host_arch = "@HOST_ARCH@" +config.llvm_build_mode = "@LLVM_BUILD_MODE@" +config.llvm_src_root = "@LLVM_SOURCE_DIR@" +config.compiler_rt_src_root = "@COMPILER_RT_SOURCE_DIR@" +config.llvm_obj_root = "@LLVM_BINARY_DIR@" +config.llvm_tools_dir = "@LLVM_TOOLS_DIR@" +config.clang = "@LLVM_BINARY_DIR@/bin/clang" +config.compiler_rt_arch = "@COMPILER_RT_SUPPORTED_ARCH@" + +# LLVM tools dir can be passed in lit parameters, so try to +# apply substitution. +try: + config.llvm_tools_dir = config.llvm_tools_dir % lit.params +except KeyError,e: + key, = e.args + lit.fatal("unable to find %r parameter, use '--param=%s=VALUE'" % (key, key)) + +# Let the main config do the real work. +lit.load_config(config, "@CMAKE_CURRENT_SOURCE_DIR@/lit.cfg") diff --git a/lib/lsan/lit_tests/use_globals_initialized.cc b/lib/lsan/lit_tests/use_globals_initialized.cc new file mode 100644 index 000000000000..53c22c8ac057 --- /dev/null +++ b/lib/lsan/lit_tests/use_globals_initialized.cc @@ -0,0 +1,21 @@ +// Test that initialized globals are included in the root set. +// RUN: LSAN_BASE="report_blocks=1:use_stacks=0:use_registers=0" +// RUN: %clangxx_lsan %s -o %t +// RUN: LSAN_OPTIONS=$LSAN_BASE:"use_globals=0" %t 2>&1 | FileCheck %s +// RUN: LSAN_OPTIONS=$LSAN_BASE:"use_globals=1" %t 2>&1 +// RUN: LSAN_OPTIONS="" %t 2>&1 + +#include <stdio.h> +#include <stdlib.h> + +void *data_var = (void *)1; + +int main() { + data_var = malloc(1337); + fprintf(stderr, "Test alloc: %p.\n", data_var); + return 0; +} +// CHECK: Test alloc: [[ADDR:.*]]. +// CHECK: LeakSanitizer: detected memory leaks +// CHECK: Directly leaked 1337 byte block at [[ADDR]] +// CHECK: SUMMARY: LeakSanitizer: diff --git a/lib/lsan/lsan.cc b/lib/lsan/lsan.cc new file mode 100644 index 000000000000..9b83b411f843 --- /dev/null +++ b/lib/lsan/lsan.cc @@ -0,0 +1,63 @@ +//=-- lsan.cc -------------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Standalone LSan RTL. +// +//===----------------------------------------------------------------------===// + +#include "lsan.h" + +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "lsan_allocator.h" +#include "lsan_common.h" +#include "lsan_thread.h" + +namespace __lsan { + +static void InitializeCommonFlags() { + CommonFlags *cf = common_flags(); + cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH"); + cf->symbolize = (cf->external_symbolizer_path && + cf->external_symbolizer_path[0]); + cf->strip_path_prefix = ""; + cf->fast_unwind_on_malloc = true; + cf->malloc_context_size = 30; + + ParseCommonFlagsFromString(GetEnv("LSAN_OPTIONS")); +} + +void Init() { + static bool inited; + if (inited) + return; + inited = true; + SanitizerToolName = "LeakSanitizer"; + InitializeCommonFlags(); + InitializeAllocator(); + InitTlsSize(); + InitializeInterceptors(); + InitializeThreadRegistry(); + u32 tid = ThreadCreate(0, 0, true); + CHECK_EQ(tid, 0); + ThreadStart(tid, GetTid()); + + // Start symbolizer process if necessary. + const char* external_symbolizer = common_flags()->external_symbolizer_path; + if (common_flags()->symbolize && external_symbolizer && + external_symbolizer[0]) { + InitializeExternalSymbolizer(external_symbolizer); + } + + InitCommonLsan(); + Atexit(DoLeakCheck); +} + +} // namespace __lsan diff --git a/lib/lsan/lsan.h b/lib/lsan/lsan.h new file mode 100644 index 000000000000..d89a6ab8f983 --- /dev/null +++ b/lib/lsan/lsan.h @@ -0,0 +1,23 @@ +//=-- lsan.h --------------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Private header for standalone LSan RTL. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_stacktrace.h" + +namespace __lsan { + +void Init(); +void InitializeInterceptors(); + +} // namespace __lsan diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc new file mode 100644 index 000000000000..49b5a9fa4c5d --- /dev/null +++ b/lib/lsan/lsan_allocator.cc @@ -0,0 +1,190 @@ +//=-- lsan_allocator.cc ---------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// See lsan_allocator.h for details. +// +//===----------------------------------------------------------------------===// + +#include "lsan_allocator.h" + +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "lsan_common.h" + +namespace __lsan { + +static const uptr kMaxAllowedMallocSize = + FIRST_32_SECOND_64(3UL << 30, 8UL << 30); + +static const uptr kAllocatorSpace = 0x600000000000ULL; +static const uptr kAllocatorSize = 0x10000000000ULL; // 1T. + +struct ChunkMetadata { + bool allocated : 8; // Must be first. + ChunkTag tag : 2; + uptr requested_size : 54; + u32 stack_trace_id; +}; + +typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, + sizeof(ChunkMetadata), CompactSizeClassMap> PrimaryAllocator; +typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; +typedef LargeMmapAllocator<> SecondaryAllocator; +typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, + SecondaryAllocator> Allocator; + +static Allocator allocator; +static THREADLOCAL AllocatorCache cache; + +void InitializeAllocator() { + allocator.Init(); +} + +void AllocatorThreadFinish() { + allocator.SwallowCache(&cache); +} + +static ChunkMetadata *Metadata(void *p) { + return (ChunkMetadata *)allocator.GetMetaData(p); +} + +static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { + if (!p) return; + ChunkMetadata *m = Metadata(p); + CHECK(m); + m->stack_trace_id = StackDepotPut(stack.trace, stack.size); + m->requested_size = size; + atomic_store((atomic_uint8_t*)m, 1, memory_order_relaxed); +} + +static void RegisterDeallocation(void *p) { + if (!p) return; + ChunkMetadata *m = Metadata(p); + CHECK(m); + atomic_store((atomic_uint8_t*)m, 0, memory_order_relaxed); +} + +void *Allocate(const StackTrace &stack, uptr size, uptr alignment, + bool cleared) { + if (size == 0) + size = 1; + if (size > kMaxAllowedMallocSize) { + Report("WARNING: LeakSanitizer failed to allocate %p bytes\n", + (void*)size); + return 0; + } + void *p = allocator.Allocate(&cache, size, alignment, cleared); + RegisterAllocation(stack, p, size); + return p; +} + +void Deallocate(void *p) { + RegisterDeallocation(p); + allocator.Deallocate(&cache, p); +} + +void *Reallocate(const StackTrace &stack, void *p, uptr new_size, + uptr alignment) { + RegisterDeallocation(p); + if (new_size > kMaxAllowedMallocSize) { + Report("WARNING: LeakSanitizer failed to allocate %p bytes\n", + (void*)new_size); + allocator.Deallocate(&cache, p); + return 0; + } + p = allocator.Reallocate(&cache, p, new_size, alignment); + RegisterAllocation(stack, p, new_size); + return p; +} + +void GetAllocatorCacheRange(uptr *begin, uptr *end) { + *begin = (uptr)&cache; + *end = *begin + sizeof(cache); +} + +uptr GetMallocUsableSize(void *p) { + ChunkMetadata *m = Metadata(p); + if (!m) return 0; + return m->requested_size; +} + +///// Interface to the common LSan module. ///// + +void LockAllocator() { + allocator.ForceLock(); +} + +void UnlockAllocator() { + allocator.ForceUnlock(); +} + +void GetAllocatorGlobalRange(uptr *begin, uptr *end) { + *begin = (uptr)&allocator; + *end = *begin + sizeof(allocator); +} + +void *PointsIntoChunk(void* p) { + if (!allocator.PointerIsMine(p)) return 0; + void *chunk = allocator.GetBlockBegin(p); + if (!chunk) return 0; + // LargeMmapAllocator considers pointers to the meta-region of a chunk to be + // valid, but we don't want that. + if (p < chunk) return 0; + ChunkMetadata *m = Metadata(chunk); + CHECK(m); + if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) + return chunk; + return 0; +} + +void *GetUserBegin(void *p) { + return p; +} + +LsanMetadata::LsanMetadata(void *chunk) { + metadata_ = Metadata(chunk); + CHECK(metadata_); +} + +bool LsanMetadata::allocated() const { + return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated; +} + +ChunkTag LsanMetadata::tag() const { + return reinterpret_cast<ChunkMetadata *>(metadata_)->tag; +} + +void LsanMetadata::set_tag(ChunkTag value) { + reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value; +} + +uptr LsanMetadata::requested_size() const { + return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size; +} + +u32 LsanMetadata::stack_trace_id() const { + return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id; +} + +template<typename Callable> +void ForEachChunk(Callable const &callback) { + allocator.ForEachChunk(callback); +} + +template void ForEachChunk<ProcessPlatformSpecificAllocationsCb>( + ProcessPlatformSpecificAllocationsCb const &callback); +template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback); +template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback); +template void ForEachChunk<MarkIndirectlyLeakedCb>( + MarkIndirectlyLeakedCb const &callback); +template void ForEachChunk<ClearTagCb>(ClearTagCb const &callback); +} // namespace __lsan diff --git a/lib/lsan/lsan_allocator.h b/lib/lsan/lsan_allocator.h new file mode 100644 index 000000000000..00c55ae02f15 --- /dev/null +++ b/lib/lsan/lsan_allocator.h @@ -0,0 +1,39 @@ +//=-- lsan_allocator.h ----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Allocator for standalone LSan. +// +//===----------------------------------------------------------------------===// + +#ifndef LSAN_ALLOCATOR_H +#define LSAN_ALLOCATOR_H + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_internal_defs.h" + +namespace __lsan { + +void *Allocate(const StackTrace &stack, uptr size, uptr alignment, + bool cleared); +void Deallocate(void *p); +void *Reallocate(const StackTrace &stack, void *p, uptr new_size, + uptr alignment); +uptr GetMallocUsableSize(void *p); + +template<typename Callable> +void ForEachChunk(const Callable &callback); + +void GetAllocatorCacheRange(uptr *begin, uptr *end); +void AllocatorThreadFinish(); +void InitializeAllocator(); + +} // namespace __lsan + +#endif // LSAN_ALLOCATOR_H diff --git a/lib/lsan/lsan_common.cc b/lib/lsan/lsan_common.cc new file mode 100644 index 000000000000..e2971e999aa6 --- /dev/null +++ b/lib/lsan/lsan_common.cc @@ -0,0 +1,393 @@ +//=-- lsan_common.cc ------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Implementation of common leak checking functionality. +// +//===----------------------------------------------------------------------===// + +#include "lsan_common.h" + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "sanitizer_common/sanitizer_stoptheworld.h" + +#if CAN_SANITIZE_LEAKS +namespace __lsan { + +Flags lsan_flags; + +static void InitializeFlags() { + Flags *f = flags(); + // Default values. + f->report_blocks = false; + f->resolution = 0; + f->max_leaks = 0; + f->exitcode = 23; + f->use_registers = true; + f->use_globals = true; + f->use_stacks = true; + f->use_tls = true; + f->use_unaligned = false; + f->log_pointers = false; + f->log_threads = false; + + const char *options = GetEnv("LSAN_OPTIONS"); + if (options) { + ParseFlag(options, &f->use_registers, "use_registers"); + ParseFlag(options, &f->use_globals, "use_globals"); + ParseFlag(options, &f->use_stacks, "use_stacks"); + ParseFlag(options, &f->use_tls, "use_tls"); + ParseFlag(options, &f->use_unaligned, "use_unaligned"); + ParseFlag(options, &f->report_blocks, "report_blocks"); + ParseFlag(options, &f->resolution, "resolution"); + CHECK_GE(&f->resolution, 0); + ParseFlag(options, &f->max_leaks, "max_leaks"); + CHECK_GE(&f->max_leaks, 0); + ParseFlag(options, &f->log_pointers, "log_pointers"); + ParseFlag(options, &f->log_threads, "log_threads"); + ParseFlag(options, &f->exitcode, "exitcode"); + } +} + +void InitCommonLsan() { + InitializeFlags(); + InitializePlatformSpecificModules(); +} + +static inline bool CanBeAHeapPointer(uptr p) { + // Since our heap is located in mmap-ed memory, we can assume a sensible lower + // boundary on heap addresses. + const uptr kMinAddress = 4 * 4096; + if (p < kMinAddress) return false; +#ifdef __x86_64__ + // Accept only canonical form user-space addresses. + return ((p >> 47) == 0); +#else + return true; +#endif +} + +// Scan the memory range, looking for byte patterns that point into allocator +// chunks. Mark those chunks with tag and add them to the frontier. +// There are two usage modes for this function: finding non-leaked chunks +// (tag = kReachable) and finding indirectly leaked chunks +// (tag = kIndirectlyLeaked). In the second case, there's no flood fill, +// so frontier = 0. +void ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier, + const char *region_type, ChunkTag tag) { + const uptr alignment = flags()->pointer_alignment(); + if (flags()->log_pointers) + Report("Scanning %s range %p-%p.\n", region_type, begin, end); + uptr pp = begin; + if (pp % alignment) + pp = pp + alignment - pp % alignment; + for (; pp + sizeof(uptr) <= end; pp += alignment) { + void *p = *reinterpret_cast<void**>(pp); + if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; + // FIXME: PointsIntoChunk is SLOW because GetBlockBegin() in + // LargeMmapAllocator involves a lock and a linear search. + void *chunk = PointsIntoChunk(p); + if (!chunk) continue; + LsanMetadata m(chunk); + if (m.tag() == kReachable) continue; + m.set_tag(tag); + if (flags()->log_pointers) + Report("%p: found %p pointing into chunk %p-%p of size %llu.\n", pp, p, + chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(), + m.requested_size()); + if (frontier) + frontier->push_back(reinterpret_cast<uptr>(chunk)); + } +} + +// Scan thread data (stacks and TLS) for heap pointers. +static void ProcessThreads(SuspendedThreadsList const &suspended_threads, + InternalVector<uptr> *frontier) { + InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount()); + uptr registers_begin = reinterpret_cast<uptr>(registers.data()); + uptr registers_end = registers_begin + registers.size(); + for (uptr i = 0; i < suspended_threads.thread_count(); i++) { + uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i)); + if (flags()->log_threads) Report("Processing thread %d.\n", os_id); + uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; + bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, + &tls_begin, &tls_end, + &cache_begin, &cache_end); + if (!thread_found) { + // If a thread can't be found in the thread registry, it's probably in the + // process of destruction. Log this event and move on. + if (flags()->log_threads) + Report("Thread %d not found in registry.\n", os_id); + continue; + } + uptr sp; + bool have_registers = + (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0); + if (!have_registers) { + Report("Unable to get registers from thread %d.\n"); + // If unable to get SP, consider the entire stack to be reachable. + sp = stack_begin; + } + + if (flags()->use_registers && have_registers) + ScanRangeForPointers(registers_begin, registers_end, frontier, + "REGISTERS", kReachable); + + if (flags()->use_stacks) { + if (flags()->log_threads) + Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp); + if (sp < stack_begin || sp >= stack_end) { + // SP is outside the recorded stack range (e.g. the thread is running a + // signal handler on alternate stack). Again, consider the entire stack + // range to be reachable. + if (flags()->log_threads) + Report("WARNING: stack_pointer not in stack_range.\n"); + } else { + // Shrink the stack range to ignore out-of-scope values. + stack_begin = sp; + } + ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", + kReachable); + } + + if (flags()->use_tls) { + if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end); + if (cache_begin == cache_end) { + ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); + } else { + // Because LSan should not be loaded with dlopen(), we can assume + // that allocator cache will be part of static TLS image. + CHECK_LE(tls_begin, cache_begin); + CHECK_GE(tls_end, cache_end); + if (tls_begin < cache_begin) + ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", + kReachable); + if (tls_end > cache_end) + ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); + } + } + } +} + +static void FloodFillReachable(InternalVector<uptr> *frontier) { + while (frontier->size()) { + uptr next_chunk = frontier->back(); + frontier->pop_back(); + LsanMetadata m(reinterpret_cast<void *>(next_chunk)); + ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, + "HEAP", kReachable); + } +} + +// Mark leaked chunks which are reachable from other leaked chunks. +void MarkIndirectlyLeakedCb::operator()(void *p) const { + p = GetUserBegin(p); + LsanMetadata m(p); + if (m.allocated() && m.tag() != kReachable) { + ScanRangeForPointers(reinterpret_cast<uptr>(p), + reinterpret_cast<uptr>(p) + m.requested_size(), + /* frontier */ 0, "HEAP", kIndirectlyLeaked); + } +} + +// Set the appropriate tag on each chunk. +static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { + // Holds the flood fill frontier. + InternalVector<uptr> frontier(GetPageSizeCached()); + + if (flags()->use_globals) + ProcessGlobalRegions(&frontier); + ProcessThreads(suspended_threads, &frontier); + FloodFillReachable(&frontier); + ProcessPlatformSpecificAllocations(&frontier); + FloodFillReachable(&frontier); + + // Now all reachable chunks are marked. Iterate over leaked chunks and mark + // those that are reachable from other leaked chunks. + if (flags()->log_pointers) + Report("Now scanning leaked blocks for pointers.\n"); + ForEachChunk(MarkIndirectlyLeakedCb()); +} + +void ClearTagCb::operator()(void *p) const { + p = GetUserBegin(p); + LsanMetadata m(p); + m.set_tag(kDirectlyLeaked); +} + +static void PrintStackTraceById(u32 stack_trace_id) { + CHECK(stack_trace_id); + uptr size = 0; + const uptr *trace = StackDepotGet(stack_trace_id, &size); + StackTrace::PrintStack(trace, size, common_flags()->symbolize, + common_flags()->strip_path_prefix, 0); +} + +static void LockAndSuspendThreads(StopTheWorldCallback callback, void *arg) { + LockThreadRegistry(); + LockAllocator(); + StopTheWorld(callback, arg); + // Allocator must be unlocked by the callback. + UnlockThreadRegistry(); +} + +///// Normal leak checking. ///// + +void CollectLeaksCb::operator()(void *p) const { + p = GetUserBegin(p); + LsanMetadata m(p); + if (!m.allocated()) return; + if (m.tag() != kReachable) { + uptr resolution = flags()->resolution; + if (resolution > 0) { + uptr size = 0; + const uptr *trace = StackDepotGet(m.stack_trace_id(), &size); + size = Min(size, resolution); + leak_report_->Add(StackDepotPut(trace, size), m.requested_size(), + m.tag()); + } else { + leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag()); + } + } +} + +static void CollectLeaks(LeakReport *leak_report) { + ForEachChunk(CollectLeaksCb(leak_report)); +} + +void PrintLeakedCb::operator()(void *p) const { + p = GetUserBegin(p); + LsanMetadata m(p); + if (!m.allocated()) return; + if (m.tag() != kReachable) { + CHECK(m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked); + Printf("%s leaked %llu byte block at %p\n", + m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly", + m.requested_size(), p); + } +} + +static void PrintLeaked() { + Printf("Reporting individual blocks:\n"); + Printf("============================\n"); + ForEachChunk(PrintLeakedCb()); + Printf("\n"); +} + +enum LeakCheckResult { + kFatalError, + kLeaksFound, + kNoLeaks +}; + +static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, + void *arg) { + LeakCheckResult *result = reinterpret_cast<LeakCheckResult *>(arg); + CHECK_EQ(*result, kFatalError); + // Allocator must not be locked when we call GetRegionBegin(). + UnlockAllocator(); + ClassifyAllChunks(suspended_threads); + LeakReport leak_report; + CollectLeaks(&leak_report); + if (leak_report.IsEmpty()) { + *result = kNoLeaks; + return; + } + Printf("\n"); + Printf("=================================================================\n"); + Report("ERROR: LeakSanitizer: detected memory leaks\n"); + leak_report.PrintLargest(flags()->max_leaks); + if (flags()->report_blocks) + PrintLeaked(); + leak_report.PrintSummary(); + Printf("\n"); + ForEachChunk(ClearTagCb()); + *result = kLeaksFound; +} + +void DoLeakCheck() { + LeakCheckResult result = kFatalError; + LockAndSuspendThreads(DoLeakCheckCallback, &result); + if (result == kFatalError) { + Report("LeakSanitizer has encountered a fatal error.\n"); + Die(); + } else if (result == kLeaksFound) { + if (flags()->exitcode) + internal__exit(flags()->exitcode); + } +} + +///// LeakReport implementation. ///// + +// A hard limit on the number of distinct leaks, to avoid quadratic complexity +// in LeakReport::Add(). We don't expect to ever see this many leaks in +// real-world applications. +// FIXME: Get rid of this limit by changing the implementation of LeakReport to +// use a hash table. +const uptr kMaxLeaksConsidered = 1000; + +void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) { + CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); + bool is_directly_leaked = (tag == kDirectlyLeaked); + for (uptr i = 0; i < leaks_.size(); i++) + if (leaks_[i].stack_trace_id == stack_trace_id && + leaks_[i].is_directly_leaked == is_directly_leaked) { + leaks_[i].hit_count++; + leaks_[i].total_size += leaked_size; + return; + } + if (leaks_.size() == kMaxLeaksConsidered) return; + Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id, + is_directly_leaked }; + leaks_.push_back(leak); +} + +static bool IsLarger(const Leak &leak1, const Leak &leak2) { + return leak1.total_size > leak2.total_size; +} + +void LeakReport::PrintLargest(uptr max_leaks) { + CHECK(leaks_.size() <= kMaxLeaksConsidered); + Printf("\n"); + if (leaks_.size() == kMaxLeaksConsidered) + Printf("Too many leaks! Only the first %llu leaks encountered will be " + "reported.\n", + kMaxLeaksConsidered); + if (max_leaks > 0 && max_leaks < leaks_.size()) + Printf("The %llu largest leak(s):\n", max_leaks); + InternalSort(&leaks_, leaks_.size(), IsLarger); + max_leaks = max_leaks > 0 ? Min(max_leaks, leaks_.size()) : leaks_.size(); + for (uptr i = 0; i < max_leaks; i++) { + Printf("%s leak of %llu byte(s) in %llu object(s) allocated from:\n", + leaks_[i].is_directly_leaked ? "Direct" : "Indirect", + leaks_[i].total_size, leaks_[i].hit_count); + PrintStackTraceById(leaks_[i].stack_trace_id); + Printf("\n"); + } + if (max_leaks < leaks_.size()) { + uptr remaining = leaks_.size() - max_leaks; + Printf("Omitting %llu more leak(s).\n", remaining); + } +} + +void LeakReport::PrintSummary() { + CHECK(leaks_.size() <= kMaxLeaksConsidered); + uptr bytes = 0, allocations = 0; + for (uptr i = 0; i < leaks_.size(); i++) { + bytes += leaks_[i].total_size; + allocations += leaks_[i].hit_count; + } + Printf("SUMMARY: LeakSanitizer: %llu byte(s) leaked in %llu allocation(s).\n", + bytes, allocations); +} +} // namespace __lsan +#endif // CAN_SANITIZE_LEAKS diff --git a/lib/lsan/lsan_common.h b/lib/lsan/lsan_common.h new file mode 100644 index 000000000000..8cb4b2753cd9 --- /dev/null +++ b/lib/lsan/lsan_common.h @@ -0,0 +1,186 @@ +//=-- lsan_common.h -------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Private LSan header. +// +//===----------------------------------------------------------------------===// + +#ifndef LSAN_COMMON_H +#define LSAN_COMMON_H + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_platform.h" +#include "sanitizer_common/sanitizer_symbolizer.h" + +#if SANITIZER_LINUX && defined(__x86_64__) +#define CAN_SANITIZE_LEAKS 1 +#else +#define CAN_SANITIZE_LEAKS 0 +#endif + +namespace __lsan { + +// Chunk tags. +enum ChunkTag { + kDirectlyLeaked = 0, // default + kIndirectlyLeaked = 1, + kReachable = 2 +}; + +struct Flags { + uptr pointer_alignment() const { + return use_unaligned ? 1 : sizeof(uptr); + } + + // Print addresses of leaked blocks after main leak report. + bool report_blocks; + // Aggregate two blocks into one leak if this many stack frames match. If + // zero, the entire stack trace must match. + int resolution; + // The number of leaks reported. + int max_leaks; + // If nonzero kill the process with this exit code upon finding leaks. + int exitcode; + + // Flags controlling the root set of reachable memory. + // Global variables (.data and .bss). + bool use_globals; + // Thread stacks. + bool use_stacks; + // Thread registers. + bool use_registers; + // TLS and thread-specific storage. + bool use_tls; + + // Consider unaligned pointers valid. + bool use_unaligned; + + // Debug logging. + bool log_pointers; + bool log_threads; +}; + +extern Flags lsan_flags; +inline Flags *flags() { return &lsan_flags; } + +void InitCommonLsan(); +// Testing interface. Find leaked chunks and dump their addresses to vector. +void ReportLeaked(InternalVector<void *> *leaked, uptr sources); +// Normal leak check. Find leaks and print a report according to flags. +void DoLeakCheck(); + +struct Leak { + uptr hit_count; + uptr total_size; + u32 stack_trace_id; + bool is_directly_leaked; +}; + +// Aggregates leaks by stack trace prefix. +class LeakReport { + public: + LeakReport() : leaks_(1) {} + void Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag); + void PrintLargest(uptr max_leaks); + void PrintSummary(); + bool IsEmpty() { return leaks_.size() == 0; } + private: + InternalVector<Leak> leaks_; +}; + +// Platform-specific functions. +void InitializePlatformSpecificModules(); +void ProcessGlobalRegions(InternalVector<uptr> *frontier); +void ProcessPlatformSpecificAllocations(InternalVector<uptr> *frontier); + +void ScanRangeForPointers(uptr begin, uptr end, InternalVector<uptr> *frontier, + const char *region_type, ChunkTag tag); + +// Callables for iterating over chunks. Those classes are used as template +// parameters in ForEachChunk, so we must expose them here to allow for explicit +// template instantiation. + +// Identifies unreachable chunks which must be treated as reachable. Marks them +// as reachable and adds them to the frontier. +class ProcessPlatformSpecificAllocationsCb { + public: + explicit ProcessPlatformSpecificAllocationsCb(InternalVector<uptr> *frontier) + : frontier_(frontier) {} + void operator()(void *p) const; + private: + InternalVector<uptr> *frontier_; +}; + +// Prints addresses of unreachable chunks. +class PrintLeakedCb { + public: + void operator()(void *p) const; +}; + +// Aggregates unreachable chunks into a LeakReport. +class CollectLeaksCb { + public: + explicit CollectLeaksCb(LeakReport *leak_report) + : leak_report_(leak_report) {} + void operator()(void *p) const; + private: + LeakReport *leak_report_; +}; + +// Resets each chunk's tag to default (kDirectlyLeaked). +class ClearTagCb { + public: + void operator()(void *p) const; +}; + +// Scans each leaked chunk for pointers to other leaked chunks, and marks each +// of them as indirectly leaked. +class MarkIndirectlyLeakedCb { + public: + void operator()(void *p) const; +}; + +// The following must be implemented in the parent tool. + +template<typename Callable> void ForEachChunk(Callable const &callback); +// The address range occupied by the global allocator object. +void GetAllocatorGlobalRange(uptr *begin, uptr *end); +// Wrappers for allocator's ForceLock()/ForceUnlock(). +void LockAllocator(); +void UnlockAllocator(); +// Wrappers for ThreadRegistry access. +void LockThreadRegistry(); +void UnlockThreadRegistry(); +bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end, + uptr *tls_begin, uptr *tls_end, + uptr *cache_begin, uptr *cache_end); +// If p points into a chunk that has been allocated to the user, return its +// user-visible address. Otherwise, return 0. +void *PointsIntoChunk(void *p); +// Return address of user-visible chunk contained in this allocator chunk. +void *GetUserBegin(void *p); +// Wrapper for chunk metadata operations. +class LsanMetadata { + public: + // Constructor accepts pointer to user-visible chunk. + explicit LsanMetadata(void *chunk); + bool allocated() const; + ChunkTag tag() const; + void set_tag(ChunkTag value); + uptr requested_size() const; + u32 stack_trace_id() const; + private: + void *metadata_; +}; + +} // namespace __lsan + +#endif // LSAN_COMMON_H diff --git a/lib/lsan/lsan_common_linux.cc b/lib/lsan/lsan_common_linux.cc new file mode 100644 index 000000000000..10a434b5f851 --- /dev/null +++ b/lib/lsan/lsan_common_linux.cc @@ -0,0 +1,123 @@ +//=-- lsan_common_linux.cc ------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Implementation of common leak checking functionality. Linux-specific code. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#include "lsan_common.h" + +#if CAN_SANITIZE_LEAKS && SANITIZER_LINUX +#include <link.h> + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_linux.h" +#include "sanitizer_common/sanitizer_stackdepot.h" + +namespace __lsan { + +static const char kLinkerName[] = "ld"; +// We request 2 modules matching "ld", so we can print a warning if there's more +// than one match. But only the first one is actually used. +static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64); +static LoadedModule *linker = 0; + +static bool IsLinker(const char* full_name) { + return LibraryNameIs(full_name, kLinkerName); +} + +void InitializePlatformSpecificModules() { + internal_memset(linker_placeholder, 0, sizeof(linker_placeholder)); + uptr num_matches = GetListOfModules( + reinterpret_cast<LoadedModule *>(linker_placeholder), 2, IsLinker); + if (num_matches == 1) { + linker = reinterpret_cast<LoadedModule *>(linker_placeholder); + return; + } + if (num_matches == 0) + Report("LeakSanitizer: Dynamic linker not found. " + "TLS will not be handled correctly.\n"); + else if (num_matches > 1) + Report("LeakSanitizer: Multiple modules match \"%s\". " + "TLS will not be handled correctly.\n", kLinkerName); + linker = 0; +} + +static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size, + void *data) { + InternalVector<uptr> *frontier = + reinterpret_cast<InternalVector<uptr> *>(data); + for (uptr j = 0; j < info->dlpi_phnum; j++) { + const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]); + // We're looking for .data and .bss sections, which reside in writeable, + // loadable segments. + if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) || + (phdr->p_memsz == 0)) + continue; + uptr begin = info->dlpi_addr + phdr->p_vaddr; + uptr end = begin + phdr->p_memsz; + uptr allocator_begin = 0, allocator_end = 0; + GetAllocatorGlobalRange(&allocator_begin, &allocator_end); + if (begin <= allocator_begin && allocator_begin < end) { + CHECK_LE(allocator_begin, allocator_end); + CHECK_LT(allocator_end, end); + if (begin < allocator_begin) + ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL", + kReachable); + if (allocator_end < end) + ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", + kReachable); + } else { + ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable); + } + } + return 0; +} + +// Scan global variables for heap pointers. +void ProcessGlobalRegions(InternalVector<uptr> *frontier) { + // FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of + // deadlocking by running this under StopTheWorld. However, the lock is + // reentrant, so we should be able to fix this by acquiring the lock before + // suspending threads. + dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier); +} + +static uptr GetCallerPC(u32 stack_id) { + CHECK(stack_id); + uptr size = 0; + const uptr *trace = StackDepotGet(stack_id, &size); + // The top frame is our malloc/calloc/etc. The next frame is the caller. + CHECK_GE(size, 2); + return trace[1]; +} + +void ProcessPlatformSpecificAllocationsCb::operator()(void *p) const { + p = GetUserBegin(p); + LsanMetadata m(p); + if (m.allocated() && m.tag() != kReachable) { + if (linker->containsAddress(GetCallerPC(m.stack_trace_id()))) { + m.set_tag(kReachable); + frontier_->push_back(reinterpret_cast<uptr>(p)); + } + } +} + +// Handle dynamically allocated TLS blocks by treating all chunks allocated from +// ld-linux.so as reachable. +void ProcessPlatformSpecificAllocations(InternalVector<uptr> *frontier) { + if (!flags()->use_tls) return; + if (!linker) return; + ForEachChunk(ProcessPlatformSpecificAllocationsCb(frontier)); +} + +} // namespace __lsan +#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX diff --git a/lib/lsan/lsan_interceptors.cc b/lib/lsan/lsan_interceptors.cc new file mode 100644 index 000000000000..b2eb6e310229 --- /dev/null +++ b/lib/lsan/lsan_interceptors.cc @@ -0,0 +1,252 @@ +//=-- lsan_interceptors.cc ------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Interceptors for standalone LSan. +// +//===----------------------------------------------------------------------===// + +#include "interception/interception.h" +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_linux.h" +#include "sanitizer_common/sanitizer_platform_limits_posix.h" +#include "lsan.h" +#include "lsan_allocator.h" +#include "lsan_thread.h" + +using namespace __lsan; + +extern "C" { +int pthread_attr_init(void *attr); +int pthread_attr_destroy(void *attr); +int pthread_attr_getdetachstate(void *attr, int *v); +int pthread_key_create(unsigned *key, void (*destructor)(void* v)); +int pthread_setspecific(unsigned key, const void *v); +} + +#define GET_STACK_TRACE \ + StackTrace stack; \ + { \ + uptr stack_top = 0, stack_bottom = 0; \ + ThreadContext *t; \ + bool fast = common_flags()->fast_unwind_on_malloc; \ + if (fast && (t = CurrentThreadContext())) { \ + stack_top = t->stack_end(); \ + stack_bottom = t->stack_begin(); \ + } \ + GetStackTrace(&stack, __sanitizer::common_flags()->malloc_context_size, \ + StackTrace::GetCurrentPc(), \ + GET_CURRENT_FRAME(), stack_top, stack_bottom, fast); \ + } + +///// Malloc/free interceptors. ///// + +namespace std { + struct nothrow_t; +} + +INTERCEPTOR(void*, malloc, uptr size) { + Init(); + GET_STACK_TRACE; + return Allocate(stack, size, 1, false); +} + +INTERCEPTOR(void, free, void *p) { + Init(); + Deallocate(p); +} + +INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { + if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0; + Init(); + GET_STACK_TRACE; + size *= nmemb; + return Allocate(stack, size, 1, true); +} + +INTERCEPTOR(void*, realloc, void *q, uptr size) { + Init(); + GET_STACK_TRACE; + return Reallocate(stack, q, size, 1); +} + +INTERCEPTOR(void*, memalign, uptr alignment, uptr size) { + Init(); + GET_STACK_TRACE; + return Allocate(stack, size, alignment, false); +} + +INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { + Init(); + GET_STACK_TRACE; + *memptr = Allocate(stack, size, alignment, false); + // FIXME: Return ENOMEM if user requested more than max alloc size. + return 0; +} + +INTERCEPTOR(void*, valloc, uptr size) { + Init(); + GET_STACK_TRACE; + if (size == 0) + size = GetPageSizeCached(); + return Allocate(stack, size, GetPageSizeCached(), false); +} + +INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { + Init(); + return GetMallocUsableSize(ptr); +} + +struct fake_mallinfo { + int x[10]; +}; + +INTERCEPTOR(struct fake_mallinfo, mallinfo, void) { + struct fake_mallinfo res; + internal_memset(&res, 0, sizeof(res)); + return res; +} + +INTERCEPTOR(int, mallopt, int cmd, int value) { + return -1; +} + +void *operator new(uptr size) ALIAS("malloc") SANITIZER_INTERFACE_ATTRIBUTE; +void *operator new[](uptr size) ALIAS("malloc") SANITIZER_INTERFACE_ATTRIBUTE; +void *operator new(uptr size, std::nothrow_t const&) ALIAS("malloc") + SANITIZER_INTERFACE_ATTRIBUTE; +void *operator new[](uptr size, std::nothrow_t const&) ALIAS("malloc") + SANITIZER_INTERFACE_ATTRIBUTE; +void operator delete(void *ptr) ALIAS("free") SANITIZER_INTERFACE_ATTRIBUTE; +void operator delete[](void *ptr) ALIAS("free") SANITIZER_INTERFACE_ATTRIBUTE; +void operator delete(void *ptr, std::nothrow_t const&) ALIAS("free") + SANITIZER_INTERFACE_ATTRIBUTE; +void operator delete[](void *ptr, std::nothrow_t const&) ALIAS("free") + SANITIZER_INTERFACE_ATTRIBUTE; + +extern "C" { +void cfree(void *p) ALIAS("free") SANITIZER_INTERFACE_ATTRIBUTE; +void *pvalloc(uptr size) ALIAS("valloc") + SANITIZER_INTERFACE_ATTRIBUTE; +// We need this to intercept the __libc_memalign calls that are used to +// allocate dynamic TLS space in ld-linux.so. +void *__libc_memalign(uptr alignment, uptr size) + ALIAS("memalign") SANITIZER_INTERFACE_ATTRIBUTE; +} + +///// Thread initialization and finalization. ///// + +static unsigned g_thread_finalize_key; + +static void thread_finalize(void *v) { + uptr iter = (uptr)v; + if (iter > 1) { + if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) { + Report("LeakSanitizer: failed to set thread key.\n"); + Die(); + } + return; + } + ThreadFinish(); +} + +struct ThreadParam { + void *(*callback)(void *arg); + void *param; + atomic_uintptr_t tid; +}; + +// PTHREAD_DESTRUCTOR_ITERATIONS from glibc. +const uptr kPthreadDestructorIterations = 4; + +extern "C" void *__lsan_thread_start_func(void *arg) { + ThreadParam *p = (ThreadParam*)arg; + void* (*callback)(void *arg) = p->callback; + void *param = p->param; + // Wait until the last iteration to maximize the chance that we are the last + // destructor to run. + if (pthread_setspecific(g_thread_finalize_key, + (void*)kPthreadDestructorIterations)) { + Report("LeakSanitizer: failed to set thread key.\n"); + Die(); + } + int tid = 0; + while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) + internal_sched_yield(); + atomic_store(&p->tid, 0, memory_order_release); + SetCurrentThread(tid); + ThreadStart(tid, GetTid()); + return callback(param); +} + +INTERCEPTOR(int, pthread_create, void *th, void *attr, + void *(*callback)(void *), void *param) { + Init(); + __sanitizer_pthread_attr_t myattr; + if (attr == 0) { + pthread_attr_init(&myattr); + attr = &myattr; + } + AdjustStackSizeLinux(attr, 0); + int detached = 0; + pthread_attr_getdetachstate(attr, &detached); + ThreadParam p; + p.callback = callback; + p.param = param; + atomic_store(&p.tid, 0, memory_order_relaxed); + int res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p); + if (res == 0) { + int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, detached); + CHECK_NE(tid, 0); + atomic_store(&p.tid, tid, memory_order_release); + while (atomic_load(&p.tid, memory_order_acquire) != 0) + internal_sched_yield(); + } + if (attr == &myattr) + pthread_attr_destroy(&myattr); + return res; +} + +INTERCEPTOR(int, pthread_join, void *th, void **ret) { + Init(); + int tid = ThreadTid((uptr)th); + int res = REAL(pthread_join)(th, ret); + if (res == 0) + ThreadJoin(tid); + return res; +} + +namespace __lsan { + +void InitializeInterceptors() { + INTERCEPT_FUNCTION(malloc); + INTERCEPT_FUNCTION(free); + INTERCEPT_FUNCTION(calloc); + INTERCEPT_FUNCTION(realloc); + INTERCEPT_FUNCTION(memalign); + INTERCEPT_FUNCTION(posix_memalign); + INTERCEPT_FUNCTION(memalign); + INTERCEPT_FUNCTION(valloc); + INTERCEPT_FUNCTION(malloc_usable_size); + INTERCEPT_FUNCTION(mallinfo); + INTERCEPT_FUNCTION(mallopt); + INTERCEPT_FUNCTION(pthread_create); + INTERCEPT_FUNCTION(pthread_join); + + if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) { + Report("LeakSanitizer: failed to create thread key.\n"); + Die(); + } +} + +} // namespace __lsan diff --git a/lib/lsan/lsan_thread.cc b/lib/lsan/lsan_thread.cc new file mode 100644 index 000000000000..3e28dee1325a --- /dev/null +++ b/lib/lsan/lsan_thread.cc @@ -0,0 +1,151 @@ +//=-- lsan_thread.cc ------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// See lsan_thread.h for details. +// +//===----------------------------------------------------------------------===// + +#include "lsan_thread.h" + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_thread_registry.h" +#include "lsan_allocator.h" + +namespace __lsan { + +const u32 kInvalidTid = (u32) -1; + +static ThreadRegistry *thread_registry; +static THREADLOCAL u32 current_thread_tid = kInvalidTid; + +static ThreadContextBase *CreateThreadContext(u32 tid) { + void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext"); + return new(mem) ThreadContext(tid); +} + +static const uptr kMaxThreads = 1 << 13; +static const uptr kThreadQuarantineSize = 64; + +void InitializeThreadRegistry() { + static char thread_registry_placeholder[sizeof(ThreadRegistry)] ALIGNED(64); + thread_registry = new(thread_registry_placeholder) + ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize); +} + +u32 GetCurrentThread() { + return current_thread_tid; +} + +void SetCurrentThread(u32 tid) { + current_thread_tid = tid; +} + +ThreadContext::ThreadContext(int tid) + : ThreadContextBase(tid), + stack_begin_(0), + stack_end_(0), + cache_begin_(0), + cache_end_(0), + tls_begin_(0), + tls_end_(0) {} + +struct OnStartedArgs { + uptr stack_begin, stack_end, + cache_begin, cache_end, + tls_begin, tls_end; +}; + +void ThreadContext::OnStarted(void *arg) { + OnStartedArgs *args = reinterpret_cast<OnStartedArgs *>(arg); + stack_begin_ = args->stack_begin; + stack_end_ = args->stack_end; + tls_begin_ = args->tls_begin; + tls_end_ = args->tls_end; + cache_begin_ = args->cache_begin; + cache_end_ = args->cache_end; +} + +void ThreadContext::OnFinished() { + AllocatorThreadFinish(); +} + +u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) { + return thread_registry->CreateThread(user_id, detached, parent_tid, + /* arg */ 0); +} + +void ThreadStart(u32 tid, uptr os_id) { + OnStartedArgs args; + uptr stack_size = 0; + uptr tls_size = 0; + GetThreadStackAndTls(tid == 0, &args.stack_begin, &stack_size, + &args.tls_begin, &tls_size); + args.stack_end = args.stack_begin + stack_size; + args.tls_end = args.tls_begin + tls_size; + GetAllocatorCacheRange(&args.cache_begin, &args.cache_end); + thread_registry->StartThread(tid, os_id, &args); +} + +void ThreadFinish() { + thread_registry->FinishThread(GetCurrentThread()); +} + +ThreadContext *CurrentThreadContext() { + if (!thread_registry) return 0; + if (GetCurrentThread() == kInvalidTid) + return 0; + // No lock needed when getting current thread. + return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread()); +} + +static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { + uptr uid = (uptr)arg; + if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) { + return true; + } + return false; +} + +u32 ThreadTid(uptr uid) { + return thread_registry->FindThread(FindThreadByUid, (void*)uid); +} + +void ThreadJoin(u32 tid) { + CHECK_NE(tid, kInvalidTid); + thread_registry->JoinThread(tid, /* arg */0); +} + +///// Interface to the common LSan module. ///// + +bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end, + uptr *tls_begin, uptr *tls_end, + uptr *cache_begin, uptr *cache_end) { + ThreadContext *context = static_cast<ThreadContext *>( + thread_registry->FindThreadContextByOsIDLocked(os_id)); + if (!context) return false; + *stack_begin = context->stack_begin(); + *stack_end = context->stack_end(); + *tls_begin = context->tls_begin(); + *tls_end = context->tls_end(); + *cache_begin = context->cache_begin(); + *cache_end = context->cache_end(); + return true; +} + +void LockThreadRegistry() { + thread_registry->Lock(); +} + +void UnlockThreadRegistry() { + thread_registry->Unlock(); +} + +} // namespace __lsan diff --git a/lib/lsan/lsan_thread.h b/lib/lsan/lsan_thread.h new file mode 100644 index 000000000000..b62f04b8eb74 --- /dev/null +++ b/lib/lsan/lsan_thread.h @@ -0,0 +1,53 @@ +//=-- lsan_thread.h -------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Thread registry for standalone LSan. +// +//===----------------------------------------------------------------------===// + +#ifndef LSAN_THREAD_H +#define LSAN_THREAD_H + +#include "sanitizer_common/sanitizer_thread_registry.h" + +namespace __lsan { + +class ThreadContext : public ThreadContextBase { + public: + explicit ThreadContext(int tid); + void OnStarted(void *arg); + void OnFinished(); + uptr stack_begin() { return stack_begin_; } + uptr stack_end() { return stack_end_; } + uptr tls_begin() { return tls_begin_; } + uptr tls_end() { return tls_end_; } + uptr cache_begin() { return cache_begin_; } + uptr cache_end() { return cache_end_; } + private: + uptr stack_begin_, stack_end_, + cache_begin_, cache_end_, + tls_begin_, tls_end_; +}; + +void InitializeThreadRegistry(); + +void ThreadStart(u32 tid, uptr os_id); +void ThreadFinish(); +u32 ThreadCreate(u32 tid, uptr uid, bool detached); +void ThreadJoin(u32 tid); +u32 ThreadTid(uptr uid); + +u32 GetCurrentThread(); +void SetCurrentThread(u32 tid); +ThreadContext *CurrentThreadContext(); + +} // namespace __lsan + +#endif // LSAN_THREAD_H diff --git a/lib/lsan/tests/CMakeLists.txt b/lib/lsan/tests/CMakeLists.txt new file mode 100644 index 000000000000..3d97ae96f650 --- /dev/null +++ b/lib/lsan/tests/CMakeLists.txt @@ -0,0 +1,51 @@ +include(CheckCXXCompilerFlag) +include(CompilerRTCompile) +include(CompilerRTLink) + +include_directories(..) +include_directories(../..) + +set(LSAN_TESTS_SRC + lsan_dummy_unittest.cc) + +set(LSAN_TESTS_CFLAGS + ${LSAN_CFLAGS} + ${COMPILER_RT_GTEST_INCLUDE_CFLAGS} + -I${COMPILER_RT_SOURCE_DIR}/lib) + +add_custom_target(LsanTests) +set_target_properties(LsanTests PROPERTIES + FOLDER "LSan unittests") + +# Compile source for the given architecture, using compiler +# options in ${ARGN}, and add it to the object list. +macro(lsan_compile obj_list source arch) + get_filename_component(basename ${source} NAME) + set(output_obj "${basename}.${arch}.o") + get_target_flags_for_arch(${arch} TARGET_CFLAGS) + clang_compile(${output_obj} ${source} + CFLAGS ${ARGN} ${TARGET_CFLAGS} + DEPS gtest) + list(APPEND ${obj_list} ${output_obj}) +endmacro() + +function(add_lsan_test testname arch) + set(testname_arch ${testname}-${arch}-Test) + get_target_flags_for_arch(${arch} TARGET_LINKFLAGS) + add_unittest(LsanTests ${testname_arch} ${ARGN}) + target_link_libraries(${testname_arch} "clang_rt.lsan-${arch}") + set_target_compile_flags(${testname_arch} ${LSAN_TESTS_CFLAGS}) + set_target_link_flags(${testname_arch} ${TARGET_LINKFLAGS}) +endfunction() + +macro(add_lsan_tests_for_arch arch) + set(LSAN_TESTS_OBJ) + lsan_compile(LSAN_TESTS_OBJ ${LSAN_TESTS_SRC} ${arch} ${LSAN_TESTS_CFLAGS} + -I${LSAN_SRC_DIR}) + add_lsan_test(Lsan ${arch} ${LSAN_TESTS_OBJ}) +endmacro() + +# Build tests for 64-bit Linux only. +if(UNIX AND NOT APPLE AND CAN_TARGET_x86_64) + add_lsan_tests_for_arch(x86_64) +endif() diff --git a/lib/lsan/tests/lsan_dummy_unittest.cc b/lib/lsan/tests/lsan_dummy_unittest.cc new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/lib/lsan/tests/lsan_dummy_unittest.cc diff --git a/lib/lsan/tests/lsan_testlib.cc b/lib/lsan/tests/lsan_testlib.cc new file mode 100644 index 000000000000..363cc14f1941 --- /dev/null +++ b/lib/lsan/tests/lsan_testlib.cc @@ -0,0 +1,25 @@ +//===-- lsan_testlib.cc ---------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// Standalone LSan tool as a shared library, to be used with LD_PRELOAD. +// +//===----------------------------------------------------------------------===// +/* Usage: +clang++ ../sanitizer_common/sanitizer_*.cc ../interception/interception_*.cc \ + lsan*.cc tests/lsan_testlib.cc -I. -I.. -g -ldl -lpthread -fPIC -shared -O2 \ + -o lsan.so +LD_PRELOAD=./lsan.so /your/app +*/ +#include "lsan.h" + +__attribute__((constructor)) +void constructor() { + __lsan::Init(); +} diff --git a/lib/msan/CMakeLists.txt b/lib/msan/CMakeLists.txt index bb8dbccbeba6..0671b59c0025 100644 --- a/lib/msan/CMakeLists.txt +++ b/lib/msan/CMakeLists.txt @@ -7,29 +7,33 @@ set(MSAN_RTL_SOURCES msan_interceptors.cc msan_linux.cc msan_new_delete.cc - msan_platform_limits_posix.cc msan_report.cc ) set(MSAN_RTL_CFLAGS ${SANITIZER_COMMON_CFLAGS} + -fno-rtti -fPIE # Prevent clang from generating libc calls. -ffreestanding) # Static runtime library. set(MSAN_RUNTIME_LIBRARIES) -add_library(clang_rt.msan-x86_64 STATIC - ${MSAN_RTL_SOURCES} - $<TARGET_OBJECTS:RTInterception.x86_64> - $<TARGET_OBJECTS:RTSanitizerCommon.x86_64> - ) -set_target_compile_flags(clang_rt.msan-x86_64 - ${MSAN_RTL_CFLAGS} ${TARGET_x86_64_CFLAGS} - ) -list(APPEND MSAN_RUNTIME_LIBRARIES clang_rt.msan-x86_64) +set(arch "x86_64") +if(CAN_TARGET_${arch}) + add_compiler_rt_static_runtime(clang_rt.msan-${arch} ${arch} + SOURCES ${MSAN_RTL_SOURCES} + $<TARGET_OBJECTS:RTInterception.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + CFLAGS ${MSAN_RTL_CFLAGS} + SYMS msan.syms) + list(APPEND MSAN_RUNTIME_LIBRARIES clang_rt.msan-${arch}) +endif() -add_clang_compiler_rt_libraries(${MSAN_RUNTIME_LIBRARIES}) +add_compiler_rt_resource_file(msan_blacklist msan_blacklist.txt) if(LLVM_INCLUDE_TESTS) add_subdirectory(tests) endif() + +add_subdirectory(lit_tests) diff --git a/lib/asan/dynamic/Makefile.mk b/lib/msan/Makefile.mk index 897844e7eed4..99e3b036ea11 100644 --- a/lib/asan/dynamic/Makefile.mk +++ b/lib/msan/Makefile.mk @@ -1,4 +1,4 @@ -#===- lib/asan/dynamic/Makefile.mk -------------------------*- Makefile -*--===# +#===- lib/msan/Makefile.mk ---------------------------------*- Makefile -*--===# # # The LLVM Compiler Infrastructure # @@ -7,7 +7,7 @@ # #===------------------------------------------------------------------------===# -ModuleName := asan_dynamic +ModuleName := msan SubDirs := Sources := $(foreach file,$(wildcard $(Dir)/*.cc),$(notdir $(file))) @@ -17,9 +17,8 @@ Implementation := Generic # FIXME: use automatic dependencies? Dependencies := $(wildcard $(Dir)/*.h) -Dependencies += $(wildcard $(Dir)/../../interception/*.h) -Dependencies += $(wildcard $(Dir)/../../interception/mach_override/*.h) -Dependencies += $(wildcard $(Dir)/../../sanitizer_common/*.h) +Dependencies += $(wildcard $(Dir)/../interception/*.h) +Dependencies += $(wildcard $(Dir)/../sanitizer_common/*.h) -# Define a convenience variable for the asan dynamic functions. -AsanDynamicFunctions := $(Sources:%.cc=%) +# Define a convenience variable for all the msan functions. +MsanFunctions := $(Sources:%.cc=%) diff --git a/lib/msan/lit_tests/CMakeLists.txt b/lib/msan/lit_tests/CMakeLists.txt new file mode 100644 index 000000000000..ed2da6b839f5 --- /dev/null +++ b/lib/msan/lit_tests/CMakeLists.txt @@ -0,0 +1,32 @@ +set(MSAN_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/..) +set(MSAN_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/..) + +configure_lit_site_cfg( + ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in + ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg + ) + +configure_lit_site_cfg( + ${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.site.cfg.in + ${CMAKE_CURRENT_BINARY_DIR}/Unit/lit.site.cfg + ) + +if(COMPILER_RT_CAN_EXECUTE_TESTS) + # Run MSan tests only if we're sure we may produce working binaries. + set(MSAN_TEST_DEPS + ${SANITIZER_COMMON_LIT_TEST_DEPS} + ${MSAN_RUNTIME_LIBRARIES} + msan_blacklist) + set(MSAN_TEST_PARAMS + msan_site_config=${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg + ) + if(LLVM_INCLUDE_TESTS) + list(APPEND MSAN_TEST_DEPS MsanUnitTests) + endif() + add_lit_testsuite(check-msan "Running the MemorySanitizer tests" + ${CMAKE_CURRENT_BINARY_DIR} + PARAMS ${MSAN_TEST_PARAMS} + DEPENDS ${MSAN_TEST_DEPS} + ) + set_target_properties(check-msan PROPERTIES FOLDER "MSan tests") +endif() diff --git a/lib/msan/lit_tests/Linux/glob.cc b/lib/msan/lit_tests/Linux/glob.cc new file mode 100644 index 000000000000..513679c6d3d7 --- /dev/null +++ b/lib/msan/lit_tests/Linux/glob.cc @@ -0,0 +1,26 @@ +// RUN: %clangxx_msan -m64 -O0 %s -o %t && %t %p 2>&1 | FileCheck %s +// RUN: %clangxx_msan -m64 -O3 %s -o %t && %t %p 2>&1 | FileCheck %s + +#include <assert.h> +#include <glob.h> +#include <stdio.h> +#include <string.h> +#include <errno.h> + +int main(int argc, char *argv[]) { + assert(argc == 2); + char buf[1024]; + snprintf(buf, sizeof(buf), "%s/%s", argv[1], "glob_test_root/*a"); + + glob_t globbuf; + int res = glob(buf, 0, 0, &globbuf); + + printf("%d %s\n", errno, strerror(errno)); + assert(res == 0); + assert(globbuf.gl_pathc == 2); + printf("%zu\n", strlen(globbuf.gl_pathv[0])); + printf("%zu\n", strlen(globbuf.gl_pathv[1])); + printf("PASS\n"); + // CHECK: PASS + return 0; +} diff --git a/lib/msan/lit_tests/Linux/glob_test_root/aa b/lib/msan/lit_tests/Linux/glob_test_root/aa new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/lib/msan/lit_tests/Linux/glob_test_root/aa diff --git a/lib/msan/lit_tests/Linux/glob_test_root/ab b/lib/msan/lit_tests/Linux/glob_test_root/ab new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/lib/msan/lit_tests/Linux/glob_test_root/ab diff --git a/lib/msan/lit_tests/Linux/glob_test_root/ba b/lib/msan/lit_tests/Linux/glob_test_root/ba new file mode 100644 index 000000000000..e69de29bb2d1 --- /dev/null +++ b/lib/msan/lit_tests/Linux/glob_test_root/ba diff --git a/lib/msan/lit_tests/Linux/lit.local.cfg b/lib/msan/lit_tests/Linux/lit.local.cfg new file mode 100644 index 000000000000..57271b8078a4 --- /dev/null +++ b/lib/msan/lit_tests/Linux/lit.local.cfg @@ -0,0 +1,9 @@ +def getRoot(config): + if not config.parent: + return config + return getRoot(config.parent) + +root = getRoot(config) + +if root.host_os not in ['Linux']: + config.unsupported = True diff --git a/lib/msan/lit_tests/Linux/syscalls.cc b/lib/msan/lit_tests/Linux/syscalls.cc new file mode 100644 index 000000000000..c12eda39189e --- /dev/null +++ b/lib/msan/lit_tests/Linux/syscalls.cc @@ -0,0 +1,50 @@ +// RUN: %clangxx_msan -m64 -O0 %s -o %t && %t 2>&1 +// RUN: %clangxx_msan -m64 -O3 %s -o %t && %t 2>&1 + +#include <assert.h> +#include <errno.h> +#include <glob.h> +#include <stdio.h> +#include <string.h> + +#include <sanitizer/linux_syscall_hooks.h> +#include <sanitizer/msan_interface.h> + +/* Test the presence of __sanitizer_syscall_ in the tool runtime, and general + sanity of their behaviour. */ + +int main(int argc, char *argv[]) { + char buf[1000]; + const int kTen = 10; + memset(buf, 0, sizeof(buf)); + __msan_unpoison(buf, sizeof(buf)); + __sanitizer_syscall_pre_recvmsg(0, buf, 0); + __sanitizer_syscall_pre_rt_sigpending(buf, kTen); + __sanitizer_syscall_pre_getdents(0, buf, kTen); + __sanitizer_syscall_pre_getdents64(0, buf, kTen); + + __msan_unpoison(buf, sizeof(buf)); + __sanitizer_syscall_post_recvmsg(0, 0, buf, 0); + __sanitizer_syscall_post_rt_sigpending(-1, buf, kTen); + __sanitizer_syscall_post_getdents(0, 0, buf, kTen); + __sanitizer_syscall_post_getdents64(0, 0, buf, kTen); + assert(__msan_test_shadow(buf, sizeof(buf)) == -1); + + __msan_unpoison(buf, sizeof(buf)); + __sanitizer_syscall_post_recvmsg(kTen, 0, buf, 0); + + // Tell the kernel that the output struct size is 10 bytes, verify that those + // bytes are unpoisoned, and the next byte is not. + __msan_poison(buf, kTen + 1); + __sanitizer_syscall_post_rt_sigpending(0, buf, kTen); + assert(__msan_test_shadow(buf, sizeof(buf)) == kTen); + + __msan_poison(buf, kTen + 1); + __sanitizer_syscall_post_getdents(kTen, 0, buf, kTen); + assert(__msan_test_shadow(buf, sizeof(buf)) == kTen); + + __msan_poison(buf, kTen + 1); + __sanitizer_syscall_post_getdents64(kTen, 0, buf, kTen); + assert(__msan_test_shadow(buf, sizeof(buf)) == kTen); + return 0; +} diff --git a/lib/msan/lit_tests/Unit/lit.cfg b/lib/msan/lit_tests/Unit/lit.cfg new file mode 100644 index 000000000000..ee379d0deaed --- /dev/null +++ b/lib/msan/lit_tests/Unit/lit.cfg @@ -0,0 +1,26 @@ +# -*- Python -*- + +import os + +def get_required_attr(config, attr_name): + attr_value = getattr(config, attr_name, None) + if not attr_value: + lit.fatal("No attribute %r in test configuration! You may need to run " + "tests from your build directory or add this attribute " + "to lit.site.cfg " % attr_name) + return attr_value + +# Setup attributes common for all compiler-rt projects. +compiler_rt_src_root = get_required_attr(config, 'compiler_rt_src_root') +compiler_rt_lit_unit_cfg = os.path.join(compiler_rt_src_root, "lib", + "lit.common.unit.cfg") +lit.load_config(config, compiler_rt_lit_unit_cfg) + +# Setup config name. +config.name = 'MemorySanitizer-Unit' + +# Setup test source and exec root. For unit tests, we define +# it as build directory with MSan unit tests. +msan_binary_dir = get_required_attr(config, "msan_binary_dir") +config.test_exec_root = os.path.join(msan_binary_dir, "tests") +config.test_source_root = config.test_exec_root diff --git a/lib/msan/lit_tests/Unit/lit.site.cfg.in b/lib/msan/lit_tests/Unit/lit.site.cfg.in new file mode 100644 index 000000000000..a91f6713303a --- /dev/null +++ b/lib/msan/lit_tests/Unit/lit.site.cfg.in @@ -0,0 +1,17 @@ +## Autogenerated by LLVM/Clang configuration. +# Do not edit! + +config.target_triple = "@TARGET_TRIPLE@" +config.llvm_src_root = "@LLVM_SOURCE_DIR@" +config.compiler_rt_src_root = "@COMPILER_RT_SOURCE_DIR@" +config.llvm_build_mode = "@LLVM_BUILD_MODE@" +config.msan_binary_dir = "@MSAN_BINARY_DIR@" + +try: + config.llvm_build_mode = config.llvm_build_mode % lit.params +except KeyError,e: + key, = e.args + lit.fatal("unable to find %r parameter, use '--param=%s=VALUE'" % (key, key)) + +# Let the main config do the real work. +lit.load_config(config, "@MSAN_SOURCE_DIR@/lit_tests/Unit/lit.cfg") diff --git a/lib/msan/lit_tests/c-strdup.c b/lib/msan/lit_tests/c-strdup.c new file mode 100644 index 000000000000..7772f0f307b7 --- /dev/null +++ b/lib/msan/lit_tests/c-strdup.c @@ -0,0 +1,17 @@ +// RUN: %clang_msan -m64 -O0 %s -o %t && %t >%t.out 2>&1 +// RUN: %clang_msan -m64 -O1 %s -o %t && %t >%t.out 2>&1 +// RUN: %clang_msan -m64 -O2 %s -o %t && %t >%t.out 2>&1 +// RUN: %clang_msan -m64 -O3 %s -o %t && %t >%t.out 2>&1 + +// Test that strdup in C programs is intercepted. +// GLibC headers translate strdup to __strdup at -O1 and higher. + +#include <stdlib.h> +#include <string.h> +int main(int argc, char **argv) { + char buf[] = "abc"; + char *p = strdup(buf); + if (*p) + exit(0); + return 0; +} diff --git a/lib/msan/lit_tests/default_blacklist.cc b/lib/msan/lit_tests/default_blacklist.cc new file mode 100644 index 000000000000..32cc02257cb0 --- /dev/null +++ b/lib/msan/lit_tests/default_blacklist.cc @@ -0,0 +1,3 @@ +// Test that MSan uses the default blacklist from resource directory. +// RUN: %clangxx_msan -### %s 2>&1 | FileCheck %s +// CHECK: fsanitize-blacklist={{.*}}msan_blacklist.txt diff --git a/lib/msan/lit_tests/getaddrinfo-positive.cc b/lib/msan/lit_tests/getaddrinfo-positive.cc new file mode 100644 index 000000000000..f16679cc2aa2 --- /dev/null +++ b/lib/msan/lit_tests/getaddrinfo-positive.cc @@ -0,0 +1,19 @@ +// RUN: %clangxx_msan -m64 -O0 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out +// RUN: %clangxx_msan -m64 -O3 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out + +#include <sys/types.h> +#include <sys/socket.h> +#include <netdb.h> +#include <stdlib.h> + +int main(void) { + struct addrinfo *ai; + struct addrinfo hint; + int res = getaddrinfo("localhost", NULL, &hint, &ai); + // CHECK: UMR in __interceptor_getaddrinfo at offset 0 inside + // CHECK: WARNING: Use of uninitialized value + // CHECK: #0 {{.*}} in main {{.*}}getaddrinfo-positive.cc:[[@LINE-3]] + return 0; +} diff --git a/lib/msan/lit_tests/getaddrinfo.cc b/lib/msan/lit_tests/getaddrinfo.cc new file mode 100644 index 000000000000..0518cf4733d0 --- /dev/null +++ b/lib/msan/lit_tests/getaddrinfo.cc @@ -0,0 +1,24 @@ +// RUN: %clangxx_msan -m64 -O0 %s -o %t && %t + +#include <sys/types.h> +#include <sys/socket.h> +#include <netdb.h> +#include <stdlib.h> + +void poison_stack_ahead() { + char buf[100000]; + // With -O0 this poisons a large chunk of stack. +} + +int main(void) { + poison_stack_ahead(); + + struct addrinfo *ai; + + // This should trigger loading of libnss_dns and friends. + // Those libraries are typically uninstrumented.They will call strlen() on a + // stack-allocated buffer, which is very likely to be poisoned. Test that we + // don't report this as an UMR. + int res = getaddrinfo("not-in-etc-hosts", NULL, NULL, &ai); + return 0; +} diff --git a/lib/msan/lit_tests/heap-origin.cc b/lib/msan/lit_tests/heap-origin.cc new file mode 100644 index 000000000000..54e2c31438ff --- /dev/null +++ b/lib/msan/lit_tests/heap-origin.cc @@ -0,0 +1,33 @@ +// RUN: %clangxx_msan -m64 -O0 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out +// RUN: %clangxx_msan -m64 -O1 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out +// RUN: %clangxx_msan -m64 -O2 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out +// RUN: %clangxx_msan -m64 -O3 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out + +// RUN: %clangxx_msan -fsanitize-memory-track-origins -m64 -O0 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-ORIGINS < %t.out +// RUN: %clangxx_msan -fsanitize-memory-track-origins -m64 -O1 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-ORIGINS < %t.out +// RUN: %clangxx_msan -fsanitize-memory-track-origins -m64 -O2 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-ORIGINS < %t.out +// RUN: %clangxx_msan -fsanitize-memory-track-origins -m64 -O3 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-ORIGINS < %t.out + +#include <stdlib.h> +int main(int argc, char **argv) { + char *volatile x = (char*)malloc(5 * sizeof(char)); + if (*x) + exit(0); + // CHECK: WARNING: Use of uninitialized value + // CHECK: {{#0 0x.* in main .*heap-origin.cc:}}[[@LINE-3]] + + // CHECK-ORIGINS: Uninitialized value was created by a heap allocation + // CHECK-ORIGINS: {{#0 0x.* in .*malloc}} + // CHECK-ORIGINS: {{#1 0x.* in main .*heap-origin.cc:}}[[@LINE-8]] + + // CHECK: SUMMARY: MemorySanitizer: use-of-uninitialized-value {{.*heap-origin.cc:.* main}} + return 0; +} diff --git a/lib/msan/lit_tests/lit.cfg b/lib/msan/lit_tests/lit.cfg new file mode 100644 index 000000000000..42381885fe8e --- /dev/null +++ b/lib/msan/lit_tests/lit.cfg @@ -0,0 +1,85 @@ +# -*- Python -*- + +import os + +def get_required_attr(config, attr_name): + attr_value = getattr(config, attr_name, None) + if not attr_value: + lit.fatal("No attribute %r in test configuration! You may need to run " + "tests from your build directory or add this attribute " + "to lit.site.cfg " % attr_name) + return attr_value + +# Setup config name. +config.name = 'MemorySanitizer' + +# Setup source root. +config.test_source_root = os.path.dirname(__file__) + +def DisplayNoConfigMessage(): + lit.fatal("No site specific configuration available! " + + "Try running your test from the build tree or running " + + "make check-msan") + +# Figure out LLVM source root. +llvm_src_root = getattr(config, 'llvm_src_root', None) +if llvm_src_root is None: + # We probably haven't loaded the site-specific configuration: the user + # is likely trying to run a test file directly, and the site configuration + # wasn't created by the build system. + msan_site_cfg = lit.params.get('msan_site_config', None) + if (msan_site_cfg) and (os.path.exists(msan_site_cfg)): + lit.load_config(config, msan_site_cfg) + raise SystemExit + + # Try to guess the location of site-specific configuration using llvm-config + # util that can point where the build tree is. + llvm_config = lit.util.which("llvm-config", config.environment["PATH"]) + if not llvm_config: + DisplayNoConfigMessage() + + # Find out the presumed location of generated site config. + llvm_obj_root = lit.util.capture(["llvm-config", "--obj-root"]).strip() + msan_site_cfg = os.path.join(llvm_obj_root, "projects", "compiler-rt", + "lib", "msan", "lit_tests", "lit.site.cfg") + if (not msan_site_cfg) or (not os.path.exists(msan_site_cfg)): + DisplayNoConfigMessage() + + lit.load_config(config, msan_site_cfg) + raise SystemExit + +# Setup attributes common for all compiler-rt projects. +compiler_rt_src_root = get_required_attr(config, "compiler_rt_src_root") +compiler_rt_lit_cfg = os.path.join(compiler_rt_src_root, "lib", + "lit.common.cfg") +if (not compiler_rt_lit_cfg) or (not os.path.exists(compiler_rt_lit_cfg)): + lit.fatal("Can't find common compiler-rt lit config at: %r" + % compiler_rt_lit_cfg) +lit.load_config(config, compiler_rt_lit_cfg) + +# Setup default compiler flags used with -fsanitize=memory option. +clang_msan_cflags = ["-fsanitize=memory", + "-mno-omit-leaf-frame-pointer", + "-fno-omit-frame-pointer", + "-fno-optimize-sibling-calls", + "-g"] +clang_msan_cxxflags = ["-ccc-cxx "] + clang_msan_cflags +config.substitutions.append( ("%clang_msan ", + " ".join([config.clang] + clang_msan_cflags) + + " ") ) +config.substitutions.append( ("%clangxx_msan ", + " ".join([config.clang] + clang_msan_cxxflags) + + " ") ) + +# Setup path to external LLVM symbolizer to run MemorySanitizer output tests. +llvm_tools_dir = getattr(config, 'llvm_tools_dir', None) +if llvm_tools_dir: + llvm_symbolizer_path = os.path.join(llvm_tools_dir, "llvm-symbolizer") + config.environment['MSAN_SYMBOLIZER_PATH'] = llvm_symbolizer_path + +# Default test suffixes. +config.suffixes = ['.c', '.cc', '.cpp'] + +# MemorySanitizer tests are currently supported on Linux only. +if config.host_os not in ['Linux']: + config.unsupported = True diff --git a/lib/msan/lit_tests/lit.site.cfg.in b/lib/msan/lit_tests/lit.site.cfg.in new file mode 100644 index 000000000000..3b969e0b0614 --- /dev/null +++ b/lib/msan/lit_tests/lit.site.cfg.in @@ -0,0 +1,18 @@ +config.target_triple = "@TARGET_TRIPLE@" +config.host_os = "@HOST_OS@" +config.llvm_src_root = "@LLVM_SOURCE_DIR@" +config.compiler_rt_src_root = "@COMPILER_RT_SOURCE_DIR@" +config.llvm_obj_root = "@LLVM_BINARY_DIR@" +config.llvm_tools_dir = "@LLVM_TOOLS_DIR@" +config.clang = "@LLVM_BINARY_DIR@/bin/clang" + +# LLVM tools dir can be passed in lit parameters, so try to +# apply substitution. +try: + config.llvm_tools_dir = config.llvm_tools_dir % lit.params +except KeyError,e: + key, = e.args + lit.fatal("unable to find %r parameter, use '--param=%s=VALUE'" % (key, key)) + +# Let the main config do the real work. +lit.load_config(config, "@MSAN_SOURCE_DIR@/lit_tests/lit.cfg") diff --git a/lib/msan/lit_tests/no_sanitize_memory.cc b/lib/msan/lit_tests/no_sanitize_memory.cc new file mode 100644 index 000000000000..48afc17e35e9 --- /dev/null +++ b/lib/msan/lit_tests/no_sanitize_memory.cc @@ -0,0 +1,34 @@ +// RUN: %clangxx_msan -m64 -O0 %s -o %t && %t >%t.out 2>&1 +// RUN: %clangxx_msan -m64 -O1 %s -o %t && %t >%t.out 2>&1 +// RUN: %clangxx_msan -m64 -O2 %s -o %t && %t >%t.out 2>&1 +// RUN: %clangxx_msan -m64 -O3 %s -o %t && %t >%t.out 2>&1 + +// RUN: %clangxx_msan -m64 -O0 %s -o %t -DCHECK_IN_F && %t >%t.out 2>&1 +// RUN: %clangxx_msan -m64 -O1 %s -o %t -DCHECK_IN_F && %t >%t.out 2>&1 +// RUN: %clangxx_msan -m64 -O2 %s -o %t -DCHECK_IN_F && %t >%t.out 2>&1 +// RUN: %clangxx_msan -m64 -O3 %s -o %t -DCHECK_IN_F && %t >%t.out 2>&1 + +// Test that (no_sanitize_memory) functions +// * don't check shadow values (-DCHECK_IN_F) +// * treat all values loaded from memory as fully initialized (-UCHECK_IN_F) + +#include <stdlib.h> +#include <stdio.h> + +__attribute__((noinline)) +__attribute__((no_sanitize_memory)) +int f(void) { + int x; + int * volatile p = &x; +#ifdef CHECK_IN_F + if (*p) + exit(0); +#endif + return *p; +} + +int main(void) { + if (f()) + exit(0); + return 0; +} diff --git a/lib/msan/lit_tests/no_sanitize_memory_prop.cc b/lib/msan/lit_tests/no_sanitize_memory_prop.cc new file mode 100644 index 000000000000..c74ca6b89db9 --- /dev/null +++ b/lib/msan/lit_tests/no_sanitize_memory_prop.cc @@ -0,0 +1,33 @@ +// RUN: %clangxx_msan -m64 -O0 %s -o %t && %t >%t.out 2>&1 +// RUN: %clangxx_msan -m64 -O1 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out +// RUN: %clangxx_msan -m64 -O2 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out +// RUN: %clangxx_msan -m64 -O3 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out + +// Test that (no_sanitize_memory) functions propagate shadow. + +// Note that at -O0 there is no report, because 'x' in 'f' is spilled to the +// stack, and then loaded back as a fully initialiazed value (due to +// no_sanitize_memory attribute). + +#include <stdlib.h> +#include <stdio.h> + +__attribute__((noinline)) +__attribute__((no_sanitize_memory)) +int f(int x) { + return x; +} + +int main(void) { + int x; + int * volatile p = &x; + int y = f(*p); + // CHECK: WARNING: Use of uninitialized value + // CHECK: {{#0 0x.* in main .*no_sanitize_memory_prop.cc:}}[[@LINE+1]] + if (y) + exit(0); + return 0; +} diff --git a/lib/msan/lit_tests/readdir64.cc b/lib/msan/lit_tests/readdir64.cc new file mode 100644 index 000000000000..0ec106c741f5 --- /dev/null +++ b/lib/msan/lit_tests/readdir64.cc @@ -0,0 +1,27 @@ +// RUN: %clangxx_msan -m64 -O0 %s -o %t && %t +// RUN: %clangxx_msan -m64 -O1 %s -o %t && %t +// RUN: %clangxx_msan -m64 -O2 %s -o %t && %t +// RUN: %clangxx_msan -m64 -O3 %s -o %t && %t + +// RUN: %clangxx_msan -m64 -O0 -D_FILE_OFFSET_BITS=64 %s -o %t && %t +// RUN: %clangxx_msan -m64 -O1 -D_FILE_OFFSET_BITS=64 %s -o %t && %t +// RUN: %clangxx_msan -m64 -O2 -D_FILE_OFFSET_BITS=64 %s -o %t && %t +// RUN: %clangxx_msan -m64 -O3 -D_FILE_OFFSET_BITS=64 %s -o %t && %t + +// Test that readdir64 is intercepted as well as readdir. + +#include <sys/types.h> +#include <dirent.h> +#include <stdlib.h> + + +int main(void) { + DIR *dir = opendir("."); + struct dirent *d = readdir(dir); + if (d->d_name[0]) { + closedir(dir); + exit(0); + } + closedir(dir); + return 0; +} diff --git a/lib/msan/lit_tests/stack-origin.cc b/lib/msan/lit_tests/stack-origin.cc new file mode 100644 index 000000000000..90f527309224 --- /dev/null +++ b/lib/msan/lit_tests/stack-origin.cc @@ -0,0 +1,32 @@ +// RUN: %clangxx_msan -m64 -O0 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out +// RUN: %clangxx_msan -m64 -O1 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out +// RUN: %clangxx_msan -m64 -O2 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out +// RUN: %clangxx_msan -m64 -O3 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out + +// RUN: %clangxx_msan -fsanitize-memory-track-origins -m64 -O0 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-ORIGINS < %t.out +// RUN: %clangxx_msan -fsanitize-memory-track-origins -m64 -O1 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-ORIGINS < %t.out +// RUN: %clangxx_msan -fsanitize-memory-track-origins -m64 -O2 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-ORIGINS < %t.out +// RUN: %clangxx_msan -fsanitize-memory-track-origins -m64 -O3 %s -o %t && not %t >%t.out 2>&1 +// RUN: FileCheck %s < %t.out && FileCheck %s --check-prefix=CHECK-ORIGINS < %t.out + +#include <stdlib.h> +int main(int argc, char **argv) { + int x; + int *volatile p = &x; + if (*p) + exit(0); + // CHECK: WARNING: Use of uninitialized value + // CHECK: {{#0 0x.* in main .*stack-origin.cc:}}[[@LINE-3]] + + // CHECK-ORIGINS: Uninitialized value was created by an allocation of 'x' in the stack frame of function 'main' + + // CHECK: SUMMARY: MemorySanitizer: use-of-uninitialized-value {{.*stack-origin.cc:.* main}} + return 0; +} diff --git a/lib/msan/msan.cc b/lib/msan/msan.cc index 670213f011c7..aa79b31be2e0 100644 --- a/lib/msan/msan.cc +++ b/lib/msan/msan.cc @@ -58,7 +58,10 @@ static THREADLOCAL struct { uptr stack_top, stack_bottom; } __msan_stack_bounds; -extern const int __msan_track_origins; +static THREADLOCAL bool is_in_symbolizer; +static THREADLOCAL bool is_in_loader; + +extern "C" const int __msan_track_origins; int __msan_get_track_origins() { return __msan_track_origins; } @@ -67,11 +70,11 @@ namespace __msan { static bool IsRunningUnderDr() { bool result = false; - MemoryMappingLayout proc_maps; + MemoryMappingLayout proc_maps(/*cache_enabled*/true); const sptr kBufSize = 4095; char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__); while (proc_maps.Next(/* start */0, /* end */0, /* file_offset */0, - filename, kBufSize)) { + filename, kBufSize, /* protection */0)) { if (internal_strstr(filename, "libdynamorio") != 0) { result = true; break; @@ -81,6 +84,18 @@ static bool IsRunningUnderDr() { return result; } +void EnterSymbolizer() { is_in_symbolizer = true; } +void ExitSymbolizer() { is_in_symbolizer = false; } +bool IsInSymbolizer() { return is_in_symbolizer; } + +void EnterLoader() { is_in_loader = true; } +void ExitLoader() { is_in_loader = false; } + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +bool __msan_is_in_loader() { return is_in_loader; } +} + static Flags msan_flags; Flags *flags() { @@ -99,6 +114,7 @@ static const char *StackOriginDescr[kNumStackOriginDescrs]; static atomic_uint32_t NumStackOriginDescrs; static void ParseFlagsFromString(Flags *f, const char *str) { + ParseCommonFlagsFromString(str); ParseFlag(str, &f->poison_heap_with_zeroes, "poison_heap_with_zeroes"); ParseFlag(str, &f->poison_stack_with_zeroes, "poison_stack_with_zeroes"); ParseFlag(str, &f->poison_in_malloc, "poison_in_malloc"); @@ -108,22 +124,31 @@ static void ParseFlagsFromString(Flags *f, const char *str) { f->exit_code = 1; Die(); } - ParseFlag(str, &f->num_callers, "num_callers"); ParseFlag(str, &f->report_umrs, "report_umrs"); ParseFlag(str, &f->verbosity, "verbosity"); + ParseFlag(str, &f->wrap_signals, "wrap_signals"); } static void InitializeFlags(Flags *f, const char *options) { - internal_memset(f, 0, sizeof(*f)); + CommonFlags *cf = common_flags(); + cf->external_symbolizer_path = GetEnv("MSAN_SYMBOLIZER_PATH"); + cf->strip_path_prefix = ""; + cf->fast_unwind_on_fatal = false; + cf->fast_unwind_on_malloc = true; + cf->malloc_context_size = 20; + internal_memset(f, 0, sizeof(*f)); f->poison_heap_with_zeroes = false; f->poison_stack_with_zeroes = false; f->poison_in_malloc = true; f->exit_code = 77; - f->num_callers = 20; f->report_umrs = true; f->verbosity = 0; + f->wrap_signals = true; + // Override from user-specified string. + if (__msan_default_options) + ParseFlagsFromString(f, __msan_default_options()); ParseFlagsFromString(f, options); } @@ -140,7 +165,14 @@ static void GetCurrentStackBounds(uptr *stack_top, uptr *stack_bottom) { *stack_bottom = __msan_stack_bounds.stack_bottom; } -void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) { +void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, + bool fast) { + if (!fast) { + // Block reports from our interceptors during _Unwind_Backtrace. + SymbolizerScope sym_scope; + return stack->SlowUnwindStack(pc, max_s); + } + uptr stack_top, stack_bottom; GetCurrentStackBounds(&stack_top, &stack_bottom); stack->size = 0; @@ -168,7 +200,8 @@ void PrintWarningWithOrigin(uptr pc, uptr bp, u32 origin) { ++msan_report_count; StackTrace stack; - GetStackTrace(&stack, kStackTraceMax, pc, bp); + GetStackTrace(&stack, kStackTraceMax, pc, bp, + common_flags()->fast_unwind_on_fatal); u32 report_origin = (__msan_track_origins && OriginIsValid(origin)) ? origin : 0; @@ -204,12 +237,17 @@ void __msan_warning_noreturn() { void __msan_init() { if (msan_inited) return; msan_init_is_running = 1; + SanitizerToolName = "MemorySanitizer"; InstallAtExitHandler(); SetDieCallback(MsanDie); + InitTlsSize(); InitializeInterceptors(); - ReplaceOperatorsNewAndDelete(); + if (MSAN_REPLACE_OPERATORS_NEW_AND_DELETE) + ReplaceOperatorsNewAndDelete(); + const char *msan_options = GetEnv("MSAN_OPTIONS"); + InitializeFlags(&msan_flags, msan_options); if (StackSizeIsUnlimited()) { if (flags()->verbosity) Printf("Unlimited stack, doing reexec\n"); @@ -218,10 +256,10 @@ void __msan_init() { SetStackSizeLimitInBytes(32 * 1024 * 1024); ReExec(); } - const char *msan_options = GetEnv("MSAN_OPTIONS"); - InitializeFlags(&msan_flags, msan_options); + if (flags()->verbosity) Printf("MSAN_OPTIONS: %s\n", msan_options ? msan_options : "<empty>"); + msan_running_under_dr = IsRunningUnderDr(); __msan_clear_on_return(); if (__msan_track_origins && flags()->verbosity > 0) @@ -238,9 +276,7 @@ void __msan_init() { Die(); } - InstallTrapHandler(); - - const char *external_symbolizer = GetEnv("MSAN_SYMBOLIZER_PATH"); + const char *external_symbolizer = common_flags()->external_symbolizer_path; if (external_symbolizer && external_symbolizer[0]) { CHECK(InitializeExternalSymbolizer(external_symbolizer)); } @@ -265,7 +301,8 @@ void __msan_set_expect_umr(int expect_umr) { GET_CALLER_PC_BP_SP; (void)sp; StackTrace stack; - GetStackTrace(&stack, kStackTraceMax, pc, bp); + GetStackTrace(&stack, kStackTraceMax, pc, bp, + common_flags()->fast_unwind_on_fatal); ReportExpectedUMRNotFound(&stack); Die(); } @@ -294,6 +331,10 @@ void __msan_print_param_shadow() { Printf("\n"); } +void __msan_unpoison_param(uptr n) { + internal_memset(__msan_param_tls, 0, n * sizeof(*__msan_param_tls)); +} + sptr __msan_test_shadow(const void *x, uptr size) { unsigned char *s = (unsigned char*)MEM_TO_SHADOW((uptr)x); for (uptr i = 0; i < size; ++i) @@ -308,8 +349,6 @@ int __msan_set_poison_in_malloc(int do_poison) { return old; } -void __msan_break_optimization(void *x) { } - int __msan_has_dynamic_component() { return msan_running_under_dr; } @@ -344,7 +383,7 @@ int __msan_get_param_tls_offset() { return param_tls_p - tls_base_p; } -void __msan_partial_poison(void* data, void* shadow, uptr size) { +void __msan_partial_poison(const void* data, void* shadow, uptr size) { internal_memcpy((void*)MEM_TO_SHADOW((uptr)data), shadow, size); } @@ -353,7 +392,7 @@ void __msan_load_unpoisoned(void *src, uptr size, void *dst) { __msan_unpoison(dst, size); } -void __msan_set_origin(void *a, uptr size, u32 origin) { +void __msan_set_origin(const void *a, uptr size, u32 origin) { // Origin mapping is 4 bytes per 4 bytes of application memory. // Here we extend the range such that its left and right bounds are both // 4 byte aligned. @@ -407,7 +446,7 @@ const char *__msan_get_origin_descr_if_stack(u32 id) { } -u32 __msan_get_origin(void *a) { +u32 __msan_get_origin(const void *a) { if (!__msan_track_origins) return 0; uptr x = (uptr)a; uptr aligned = x & ~3ULL; @@ -415,6 +454,14 @@ u32 __msan_get_origin(void *a) { return *(u32*)origin_ptr; } -u32 __msan_get_origin_tls() { +u32 __msan_get_umr_origin() { return __msan_origin_tls; } + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +extern "C" { +SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE +const char* __msan_default_options() { return ""; } +} // extern "C" +#endif + diff --git a/lib/msan/msan.h b/lib/msan/msan.h index 99d9a90d2dc7..baaba49f4187 100644 --- a/lib/msan/msan.h +++ b/lib/msan/msan.h @@ -15,17 +15,24 @@ #ifndef MSAN_H #define MSAN_H +#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_stacktrace.h" -#include "sanitizer/msan_interface.h" +#include "msan_interface_internal.h" #include "msan_flags.h" +#ifndef MSAN_REPLACE_OPERATORS_NEW_AND_DELETE +# define MSAN_REPLACE_OPERATORS_NEW_AND_DELETE 1 +#endif + #define MEM_TO_SHADOW(mem) (((uptr)mem) & ~0x400000000000ULL) #define MEM_TO_ORIGIN(mem) (MEM_TO_SHADOW(mem) + 0x200000000000ULL) #define MEM_IS_APP(mem) ((uptr)mem >= 0x600000000000ULL) #define MEM_IS_SHADOW(mem) ((uptr)mem >= 0x200000000000ULL && \ (uptr)mem <= 0x400000000000ULL) +struct link_map; // Opaque type returned by dlopen(). + const int kMsanParamTlsSizeInWords = 100; const int kMsanRetvalTlsSizeInWords = 100; @@ -46,22 +53,38 @@ void InstallTrapHandler(); void InstallAtExitHandler(); void ReplaceOperatorsNewAndDelete(); +void EnterSymbolizer(); +void ExitSymbolizer(); +bool IsInSymbolizer(); + +struct SymbolizerScope { + SymbolizerScope() { EnterSymbolizer(); } + ~SymbolizerScope() { ExitSymbolizer(); } +}; + +void EnterLoader(); +void ExitLoader(); + void MsanDie(); void PrintWarning(uptr pc, uptr bp); void PrintWarningWithOrigin(uptr pc, uptr bp, u32 origin); -void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp); +void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, + bool fast); void ReportUMR(StackTrace *stack, u32 origin); void ReportExpectedUMRNotFound(StackTrace *stack); void ReportAtExitStatistics(); +void UnpoisonMappedDSO(struct link_map *map); + #define GET_MALLOC_STACK_TRACE \ StackTrace stack; \ stack.size = 0; \ if (__msan_get_track_origins() && msan_inited) \ - GetStackTrace(&stack, flags()->num_callers, \ - StackTrace::GetCurrentPc(), GET_CURRENT_FRAME()) + GetStackTrace(&stack, common_flags()->malloc_context_size, \ + StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \ + common_flags()->fast_unwind_on_malloc) } // namespace __msan diff --git a/lib/msan/msan.syms b/lib/msan/msan.syms new file mode 100644 index 000000000000..24bbaba478b7 --- /dev/null +++ b/lib/msan/msan.syms @@ -0,0 +1,5 @@ +{ + __msan_*; + __sanitizer_syscall_pre_*; + __sanitizer_syscall_post_*; +}; diff --git a/lib/msan/msan_blacklist.txt b/lib/msan/msan_blacklist.txt new file mode 100644 index 000000000000..44a5680d4d06 --- /dev/null +++ b/lib/msan/msan_blacklist.txt @@ -0,0 +1,7 @@ +# Blacklist for MemorySanitizer. Turns off instrumentation of particular +# functions or sources. Use with care. You may set location of blacklist +# at compile-time using -fsanitize-blacklist=<path> flag. + +# Example usage: +# fun:*bad_function_name* +# src:file_with_tricky_code.cc diff --git a/lib/msan/msan_flags.h b/lib/msan/msan_flags.h index a85fc57253c9..64ef84509888 100644 --- a/lib/msan/msan_flags.h +++ b/lib/msan/msan_flags.h @@ -19,12 +19,12 @@ namespace __msan { // Flags. struct Flags { int exit_code; - int num_callers; int verbosity; bool poison_heap_with_zeroes; // default: false bool poison_stack_with_zeroes; // default: false bool poison_in_malloc; // default: true bool report_umrs; + bool wrap_signals; }; Flags *flags(); diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc index 462920413d4e..1bcf93db9440 100644 --- a/lib/msan/msan_interceptors.cc +++ b/lib/msan/msan_interceptors.cc @@ -17,16 +17,33 @@ #include "interception/interception.h" #include "msan.h" -#include "msan_platform_limits_posix.h" +#include "sanitizer_common/sanitizer_platform_limits_posix.h" +#include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_linux.h" #include <stdarg.h> // ACHTUNG! No other system header includes in this file. // Ideally, we should get rid of stdarg.h as well. +extern "C" const int __msan_keep_going; + using namespace __msan; +// True if this is a nested interceptor. +static THREADLOCAL int in_interceptor_scope; + +struct InterceptorScope { + InterceptorScope() { ++in_interceptor_scope; } + ~InterceptorScope() { --in_interceptor_scope; } +}; + +bool IsInInterceptorScope() { + return in_interceptor_scope; +} + #define ENSURE_MSAN_INITED() do { \ CHECK(!msan_init_is_running); \ if (!msan_inited) { \ @@ -34,19 +51,30 @@ using namespace __msan; } \ } while (0) -#define CHECK_UNPOISONED(x, n) \ - do { \ - sptr offset = __msan_test_shadow(x, n); \ - if (offset >= 0 && flags()->report_umrs) { \ - GET_CALLER_PC_BP_SP; \ - (void)sp; \ - Printf("UMR in %s at offset %d inside [%p, +%d) \n", \ - __FUNCTION__, offset, x, n); \ - __msan::PrintWarningWithOrigin( \ - pc, bp, __msan_get_origin((char*)x + offset)); \ - } \ +// Check that [x, x+n) range is unpoisoned. +#define CHECK_UNPOISONED_0(x, n) \ + do { \ + sptr offset = __msan_test_shadow(x, n); \ + if (__msan::IsInSymbolizer()) break; \ + if (offset >= 0 && __msan::flags()->report_umrs) { \ + GET_CALLER_PC_BP_SP; \ + (void) sp; \ + Printf("UMR in %s at offset %d inside [%p, +%d) \n", __FUNCTION__, \ + offset, x, n); \ + __msan::PrintWarningWithOrigin(pc, bp, \ + __msan_get_origin((char *) x + offset)); \ + if (!__msan_keep_going) { \ + Printf("Exiting\n"); \ + Die(); \ + } \ + } \ } while (0) +// Check that [x, x+n) range is unpoisoned unless we are in a nested +// interceptor. +#define CHECK_UNPOISONED(x, n) \ + if (!IsInInterceptorScope()) CHECK_UNPOISONED_0(x, n); + static void *fast_memset(void *ptr, int c, SIZE_T n); static void *fast_memcpy(void *dst, const void *src, SIZE_T n); @@ -78,7 +106,14 @@ INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) { INTERCEPTOR(void *, readdir, void *a) { ENSURE_MSAN_INITED(); void *res = REAL(readdir)(a); - __msan_unpoison(res, __msan::struct_dirent_sz); + __msan_unpoison(res, __sanitizer::struct_dirent_sz); + return res; +} + +INTERCEPTOR(void *, readdir64, void *a) { + ENSURE_MSAN_INITED(); + void *res = REAL(readdir)(a); + __msan_unpoison(res, __sanitizer::struct_dirent64_sz); return res; } @@ -97,8 +132,10 @@ INTERCEPTOR(void *, memset, void *s, int c, SIZE_T n) { INTERCEPTOR(int, posix_memalign, void **memptr, SIZE_T alignment, SIZE_T size) { GET_MALLOC_STACK_TRACE; CHECK_EQ(alignment & (alignment - 1), 0); - *memptr = MsanReallocate(&stack, 0, size, alignment, false); CHECK_NE(memptr, 0); + *memptr = MsanReallocate(&stack, 0, size, alignment, false); + CHECK_NE(*memptr, 0); + __msan_unpoison(memptr, sizeof(*memptr)); return 0; } @@ -152,6 +189,32 @@ INTERCEPTOR(char *, strdup, char *src) { return res; } +INTERCEPTOR(char *, __strdup, char *src) { + ENSURE_MSAN_INITED(); + SIZE_T n = REAL(strlen)(src); + char *res = REAL(__strdup)(src); + __msan_copy_poison(res, src, n + 1); + return res; +} + +INTERCEPTOR(char *, strndup, char *src, SIZE_T n) { + ENSURE_MSAN_INITED(); + SIZE_T copy_size = REAL(strnlen)(src, n); + char *res = REAL(strndup)(src, n); + __msan_copy_poison(res, src, copy_size); + __msan_unpoison(res + copy_size, 1); // \0 + return res; +} + +INTERCEPTOR(char *, __strndup, char *src, SIZE_T n) { + ENSURE_MSAN_INITED(); + SIZE_T copy_size = REAL(strnlen)(src, n); + char *res = REAL(__strndup)(src, n); + __msan_copy_poison(res, src, copy_size); + __msan_unpoison(res + copy_size, 1); // \0 + return res; +} + INTERCEPTOR(char *, gcvt, double number, SIZE_T ndigit, char *buf) { ENSURE_MSAN_INITED(); char *res = REAL(gcvt)(number, ndigit, buf); @@ -443,7 +506,7 @@ INTERCEPTOR(int, __fxstat, int magic, int fd, void *buf) { ENSURE_MSAN_INITED(); int res = REAL(__fxstat)(magic, fd, buf); if (!res) - __msan_unpoison(buf, __msan::struct_stat_sz); + __msan_unpoison(buf, __sanitizer::struct_stat_sz); return res; } @@ -451,7 +514,7 @@ INTERCEPTOR(int, __fxstat64, int magic, int fd, void *buf) { ENSURE_MSAN_INITED(); int res = REAL(__fxstat64)(magic, fd, buf); if (!res) - __msan_unpoison(buf, __msan::struct_stat64_sz); + __msan_unpoison(buf, __sanitizer::struct_stat64_sz); return res; } @@ -459,7 +522,7 @@ INTERCEPTOR(int, __xstat, int magic, char *path, void *buf) { ENSURE_MSAN_INITED(); int res = REAL(__xstat)(magic, path, buf); if (!res) - __msan_unpoison(buf, __msan::struct_stat_sz); + __msan_unpoison(buf, __sanitizer::struct_stat_sz); return res; } @@ -467,7 +530,7 @@ INTERCEPTOR(int, __xstat64, int magic, char *path, void *buf) { ENSURE_MSAN_INITED(); int res = REAL(__xstat64)(magic, path, buf); if (!res) - __msan_unpoison(buf, __msan::struct_stat64_sz); + __msan_unpoison(buf, __sanitizer::struct_stat64_sz); return res; } @@ -475,7 +538,7 @@ INTERCEPTOR(int, __lxstat, int magic, char *path, void *buf) { ENSURE_MSAN_INITED(); int res = REAL(__lxstat)(magic, path, buf); if (!res) - __msan_unpoison(buf, __msan::struct_stat_sz); + __msan_unpoison(buf, __sanitizer::struct_stat_sz); return res; } @@ -483,7 +546,7 @@ INTERCEPTOR(int, __lxstat64, int magic, char *path, void *buf) { ENSURE_MSAN_INITED(); int res = REAL(__lxstat64)(magic, path, buf); if (!res) - __msan_unpoison(buf, __msan::struct_stat64_sz); + __msan_unpoison(buf, __sanitizer::struct_stat64_sz); return res; } @@ -497,19 +560,19 @@ INTERCEPTOR(int, pipe, int pipefd[2]) { return res; } -INTERCEPTOR(int, wait, int *status) { +INTERCEPTOR(int, pipe2, int pipefd[2], int flags) { ENSURE_MSAN_INITED(); - int res = REAL(wait)(status); - if (status) - __msan_unpoison(status, sizeof(*status)); + int res = REAL(pipe2)(pipefd, flags); + if (!res) + __msan_unpoison(pipefd, sizeof(int[2])); return res; } -INTERCEPTOR(int, waitpid, int pid, int *status, int options) { +INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int sv[2]) { ENSURE_MSAN_INITED(); - int res = REAL(waitpid)(pid, status, options); - if (status) - __msan_unpoison(status, sizeof(*status)); + int res = REAL(socketpair)(domain, type, protocol, sv); + if (!res) + __msan_unpoison(sv, sizeof(int[2])); return res; } @@ -533,7 +596,7 @@ INTERCEPTOR(char *, getcwd, char *buf, SIZE_T size) { ENSURE_MSAN_INITED(); char *res = REAL(getcwd)(buf, size); if (res) - __msan_unpoison(buf, REAL(strlen)(buf) + 1); + __msan_unpoison(res, REAL(strlen)(res) + 1); return res; } @@ -551,7 +614,7 @@ INTERCEPTOR(int, getrlimit, int resource, void *rlim) { ENSURE_MSAN_INITED(); int res = REAL(getrlimit)(resource, rlim); if (!res) - __msan_unpoison(rlim, __msan::struct_rlimit_sz); + __msan_unpoison(rlim, __sanitizer::struct_rlimit_sz); return res; } @@ -561,7 +624,7 @@ INTERCEPTOR(int, getrlimit64, int resource, void *rlim) { ENSURE_MSAN_INITED(); int res = REAL(getrlimit64)(resource, rlim); if (!res) - __msan_unpoison(rlim, __msan::struct_rlimit64_sz); + __msan_unpoison(rlim, __sanitizer::struct_rlimit64_sz); return res; } @@ -569,7 +632,7 @@ INTERCEPTOR(int, statfs, const char *s, void *buf) { ENSURE_MSAN_INITED(); int res = REAL(statfs)(s, buf); if (!res) - __msan_unpoison(buf, __msan::struct_statfs_sz); + __msan_unpoison(buf, __sanitizer::struct_statfs_sz); return res; } @@ -577,7 +640,7 @@ INTERCEPTOR(int, fstatfs, int fd, void *buf) { ENSURE_MSAN_INITED(); int res = REAL(fstatfs)(fd, buf); if (!res) - __msan_unpoison(buf, __msan::struct_statfs_sz); + __msan_unpoison(buf, __sanitizer::struct_statfs_sz); return res; } @@ -585,7 +648,7 @@ INTERCEPTOR(int, statfs64, const char *s, void *buf) { ENSURE_MSAN_INITED(); int res = REAL(statfs64)(s, buf); if (!res) - __msan_unpoison(buf, __msan::struct_statfs64_sz); + __msan_unpoison(buf, __sanitizer::struct_statfs64_sz); return res; } @@ -593,7 +656,7 @@ INTERCEPTOR(int, fstatfs64, int fd, void *buf) { ENSURE_MSAN_INITED(); int res = REAL(fstatfs64)(fd, buf); if (!res) - __msan_unpoison(buf, __msan::struct_statfs64_sz); + __msan_unpoison(buf, __sanitizer::struct_statfs64_sz); return res; } @@ -601,7 +664,19 @@ INTERCEPTOR(int, uname, void *utsname) { ENSURE_MSAN_INITED(); int res = REAL(uname)(utsname); if (!res) { - __msan_unpoison(utsname, __msan::struct_utsname_sz); + __msan_unpoison(utsname, __sanitizer::struct_utsname_sz); + } + return res; +} + +INTERCEPTOR(int, gethostname, char *name, SIZE_T len) { + ENSURE_MSAN_INITED(); + int res = REAL(gethostname)(name, len); + if (!res) { + SIZE_T real_len = REAL(strnlen)(name, len); + if (real_len < len) + ++real_len; + __msan_unpoison(name, real_len); } return res; } @@ -611,7 +686,7 @@ INTERCEPTOR(int, epoll_wait, int epfd, void *events, int maxevents, ENSURE_MSAN_INITED(); int res = REAL(epoll_wait)(epfd, events, maxevents, timeout); if (res > 0) { - __msan_unpoison(events, __msan::struct_epoll_event_sz * res); + __msan_unpoison(events, __sanitizer::struct_epoll_event_sz * res); } return res; } @@ -621,7 +696,7 @@ INTERCEPTOR(int, epoll_pwait, int epfd, void *events, int maxevents, ENSURE_MSAN_INITED(); int res = REAL(epoll_pwait)(epfd, events, maxevents, timeout, sigmask); if (res > 0) { - __msan_unpoison(events, __msan::struct_epoll_event_sz * res); + __msan_unpoison(events, __sanitizer::struct_epoll_event_sz * res); } return res; } @@ -639,12 +714,12 @@ INTERCEPTOR(SSIZE_T, recvfrom, int fd, void *buf, SIZE_T len, int flags, ENSURE_MSAN_INITED(); SIZE_T srcaddr_sz; if (srcaddr) - srcaddr_sz = __msan_get_socklen_t(addrlen); + srcaddr_sz = __sanitizer_get_socklen_t(addrlen); SSIZE_T res = REAL(recvfrom)(fd, buf, len, flags, srcaddr, addrlen); if (res > 0) { __msan_unpoison(buf, res); if (srcaddr) { - SIZE_T sz = __msan_get_socklen_t(addrlen); + SIZE_T sz = __sanitizer_get_socklen_t(addrlen); __msan_unpoison(srcaddr, (sz < srcaddr_sz) ? sz : srcaddr_sz); } } @@ -655,14 +730,15 @@ INTERCEPTOR(SSIZE_T, recvmsg, int fd, struct msghdr *msg, int flags) { ENSURE_MSAN_INITED(); SSIZE_T res = REAL(recvmsg)(fd, msg, flags); if (res > 0) { - for (SIZE_T i = 0; i < __msan_get_msghdr_iovlen(msg); ++i) - __msan_unpoison(__msan_get_msghdr_iov_iov_base(msg, i), - __msan_get_msghdr_iov_iov_len(msg, i)); + for (SIZE_T i = 0; i < __sanitizer_get_msghdr_iovlen(msg); ++i) + __msan_unpoison(__sanitizer_get_msghdr_iov_iov_base(msg, i), + __sanitizer_get_msghdr_iov_iov_len(msg, i)); } return res; } INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) { + if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0; GET_MALLOC_STACK_TRACE; if (!msan_inited) { // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. @@ -688,6 +764,18 @@ INTERCEPTOR(void *, malloc, SIZE_T size) { return MsanReallocate(&stack, 0, size, sizeof(u64), false); } +void __msan_allocated_memory(const void* data, uptr size) { + GET_MALLOC_STACK_TRACE; + if (flags()->poison_in_malloc) + __msan_poison(data, size); + if (__msan_get_track_origins()) { + u32 stack_id = StackDepotPut(stack.trace, stack.size); + CHECK(stack_id); + CHECK_EQ((stack_id >> 31), 0); // Higher bit is occupied by stack origins. + __msan_set_origin(data, size, stack_id); + } +} + INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags, int fd, OFF_T offset) { ENSURE_MSAN_INITED(); @@ -726,30 +814,197 @@ INTERCEPTOR(int, dladdr, void *addr, dlinfo *info) { return res; } +// dlopen() ultimately calls mmap() down inside the loader, which generally +// doesn't participate in dynamic symbol resolution. Therefore we won't +// intercept its calls to mmap, and we have to hook it here. The loader +// initializes the module before returning, so without the dynamic component, we +// won't be able to clear the shadow before the initializers. Fixing this would +// require putting our own initializer first to clear the shadow. +INTERCEPTOR(void *, dlopen, const char *filename, int flag) { + ENSURE_MSAN_INITED(); + EnterLoader(); + link_map *map = (link_map *)REAL(dlopen)(filename, flag); + ExitLoader(); + if (!__msan_has_dynamic_component() && map) { + // If msandr didn't clear the shadow before the initializers ran, we do it + // ourselves afterwards. + UnpoisonMappedDSO(map); + } + return (void *)map; +} + +typedef int (*dl_iterate_phdr_cb)(__sanitizer_dl_phdr_info *info, SIZE_T size, + void *data); +struct dl_iterate_phdr_data { + dl_iterate_phdr_cb callback; + void *data; +}; + +static int msan_dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size, + void *data) { + if (info) { + __msan_unpoison(info, size); + if (info->dlpi_name) + __msan_unpoison(info->dlpi_name, REAL(strlen)(info->dlpi_name) + 1); + } + dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data; + __msan_unpoison_param(3); + return cbdata->callback(info, size, cbdata->data); +} + +INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb callback, void *data) { + ENSURE_MSAN_INITED(); + EnterLoader(); + dl_iterate_phdr_data cbdata; + cbdata.callback = callback; + cbdata.data = data; + int res = REAL(dl_iterate_phdr)(msan_dl_iterate_phdr_cb, (void *)&cbdata); + ExitLoader(); + return res; +} + INTERCEPTOR(int, getrusage, int who, void *usage) { ENSURE_MSAN_INITED(); int res = REAL(getrusage)(who, usage); if (res == 0) { - __msan_unpoison(usage, __msan::struct_rusage_sz); + __msan_unpoison(usage, __sanitizer::struct_rusage_sz); + } + return res; +} + +const int kMaxSignals = 1024; +static uptr sigactions[kMaxSignals]; +static StaticSpinMutex sigactions_mu; + +static void SignalHandler(int signo) { + typedef void (*signal_cb)(int x); + signal_cb cb = (signal_cb)sigactions[signo]; + cb(signo); +} + +static void SignalAction(int signo, void *si, void *uc) { + __msan_unpoison(si, __sanitizer::struct_sigaction_sz); + __msan_unpoison(uc, __sanitizer::ucontext_t_sz); + + typedef void (*sigaction_cb)(int, void *, void *); + sigaction_cb cb = (sigaction_cb)sigactions[signo]; + cb(signo, si, uc); +} + +INTERCEPTOR(int, sigaction, int signo, const __sanitizer_sigaction *act, + __sanitizer_sigaction *oldact) { + ENSURE_MSAN_INITED(); + // FIXME: check that *act is unpoisoned. + // That requires intercepting all of sigemptyset, sigfillset, etc. + int res; + if (flags()->wrap_signals) { + SpinMutexLock lock(&sigactions_mu); + CHECK_LT(signo, kMaxSignals); + uptr old_cb = sigactions[signo]; + __sanitizer_sigaction new_act; + __sanitizer_sigaction *pnew_act = act ? &new_act : 0; + if (act) { + internal_memcpy(pnew_act, act, __sanitizer::struct_sigaction_sz); + uptr cb = __sanitizer::__sanitizer_get_sigaction_sa_sigaction(pnew_act); + uptr new_cb = + __sanitizer::__sanitizer_get_sigaction_sa_siginfo(pnew_act) ? + (uptr)SignalAction : (uptr)SignalHandler; + if (cb != __sanitizer::sig_ign && cb != __sanitizer::sig_dfl) { + sigactions[signo] = cb; + __sanitizer::__sanitizer_set_sigaction_sa_sigaction(pnew_act, new_cb); + } + } + res = REAL(sigaction)(signo, pnew_act, oldact); + if (res == 0 && oldact) { + uptr cb = __sanitizer::__sanitizer_get_sigaction_sa_sigaction(oldact); + if (cb != __sanitizer::sig_ign && cb != __sanitizer::sig_dfl) { + __sanitizer::__sanitizer_set_sigaction_sa_sigaction(oldact, old_cb); + } + } + } else { + res = REAL(sigaction)(signo, act, oldact); + } + + if (res == 0 && oldact) { + __msan_unpoison(oldact, __sanitizer::struct_sigaction_sz); } return res; } +INTERCEPTOR(int, signal, int signo, uptr cb) { + ENSURE_MSAN_INITED(); + if (flags()->wrap_signals) { + CHECK_LT(signo, kMaxSignals); + SpinMutexLock lock(&sigactions_mu); + if (cb != __sanitizer::sig_ign && cb != __sanitizer::sig_dfl) { + sigactions[signo] = cb; + cb = (uptr) SignalHandler; + } + return REAL(signal)(signo, cb); + } else { + return REAL(signal)(signo, cb); + } +} + +extern "C" int pthread_attr_init(void *attr); +extern "C" int pthread_attr_destroy(void *attr); +extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize); +extern "C" int pthread_attr_getstack(void *attr, uptr *stack, uptr *stacksize); + +INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*), + void * param) { + ENSURE_MSAN_INITED(); // for GetTlsSize() + __sanitizer_pthread_attr_t myattr; + if (attr == 0) { + pthread_attr_init(&myattr); + attr = &myattr; + } + + AdjustStackSizeLinux(attr, flags()->verbosity); + + int res = REAL(pthread_create)(th, attr, callback, param); + if (attr == &myattr) + pthread_attr_destroy(&myattr); + if (!res) { + __msan_unpoison(th, __sanitizer::pthread_t_sz); + } + return res; +} + +struct MSanInterceptorContext { + bool in_interceptor_scope; +}; + +// A version of CHECK_UNPOISED using a saved scope value. Used in common +// interceptors. +#define CHECK_UNPOISONED_CTX(ctx, x, n) \ + if (!((MSanInterceptorContext *) ctx)->in_interceptor_scope) \ + CHECK_UNPOISONED_0(x, n); + #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ - __msan_unpoison(ptr, size) -#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) do { } while (false) -#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ - do { \ - ctx = 0; \ - (void)ctx; \ - ENSURE_MSAN_INITED(); \ + __msan_unpoison(ptr, size) +#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ + CHECK_UNPOISONED_CTX(ctx, ptr, size); +#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ + if (msan_init_is_running) return REAL(func)(__VA_ARGS__); \ + MSanInterceptorContext msan_ctx = { IsInInterceptorScope() }; \ + ctx = (void *)&msan_ctx; \ + InterceptorScope interceptor_scope; \ + ENSURE_MSAN_INITED(); +#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ + do { \ } while (false) -#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) do { } while (false) #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) do { } while (false) #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \ do { } while (false) // FIXME #include "sanitizer_common/sanitizer_common_interceptors.inc" +#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) CHECK_UNPOISONED(p, s) +#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) +#define COMMON_SYSCALL_POST_READ_RANGE(p, s) +#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) __msan_unpoison(p, s) +#include "sanitizer_common/sanitizer_common_syscalls.inc" + // static void *fast_memset(void *ptr, int c, SIZE_T n) { // hack until we have a really fast internal_memset @@ -785,12 +1040,12 @@ void *fast_memcpy(void *dst, const void *src, SIZE_T n) { // These interface functions reside here so that they can use // fast_memset, etc. -void __msan_unpoison(void *a, uptr size) { +void __msan_unpoison(const void *a, uptr size) { if (!MEM_IS_APP(a)) return; fast_memset((void*)MEM_TO_SHADOW((uptr)a), 0, size); } -void __msan_poison(void *a, uptr size) { +void __msan_poison(const void *a, uptr size) { if (!MEM_IS_APP(a)) return; fast_memset((void*)MEM_TO_SHADOW((uptr)a), __msan::flags()->poison_heap_with_zeroes ? 0 : -1, size); @@ -872,6 +1127,7 @@ void InitializeInterceptors() { INTERCEPT_FUNCTION(fread_unlocked); INTERCEPT_FUNCTION(readlink); INTERCEPT_FUNCTION(readdir); + INTERCEPT_FUNCTION(readdir64); INTERCEPT_FUNCTION(memcpy); INTERCEPT_FUNCTION(memset); INTERCEPT_FUNCTION(memmove); @@ -880,6 +1136,9 @@ void InitializeInterceptors() { INTERCEPT_FUNCTION(wmemmove); INTERCEPT_FUNCTION(strcpy); // NOLINT INTERCEPT_FUNCTION(strdup); + INTERCEPT_FUNCTION(__strdup); + INTERCEPT_FUNCTION(strndup); + INTERCEPT_FUNCTION(__strndup); INTERCEPT_FUNCTION(strncpy); // NOLINT INTERCEPT_FUNCTION(strlen); INTERCEPT_FUNCTION(strnlen); @@ -917,8 +1176,8 @@ void InitializeInterceptors() { INTERCEPT_FUNCTION(__xstat64); INTERCEPT_FUNCTION(__lxstat64); INTERCEPT_FUNCTION(pipe); - INTERCEPT_FUNCTION(wait); - INTERCEPT_FUNCTION(waitpid); + INTERCEPT_FUNCTION(pipe2); + INTERCEPT_FUNCTION(socketpair); INTERCEPT_FUNCTION(fgets); INTERCEPT_FUNCTION(fgets_unlocked); INTERCEPT_FUNCTION(getcwd); @@ -930,13 +1189,19 @@ void InitializeInterceptors() { INTERCEPT_FUNCTION(statfs64); INTERCEPT_FUNCTION(fstatfs64); INTERCEPT_FUNCTION(uname); + INTERCEPT_FUNCTION(gethostname); INTERCEPT_FUNCTION(epoll_wait); INTERCEPT_FUNCTION(epoll_pwait); INTERCEPT_FUNCTION(recv); INTERCEPT_FUNCTION(recvfrom); INTERCEPT_FUNCTION(recvmsg); INTERCEPT_FUNCTION(dladdr); + INTERCEPT_FUNCTION(dlopen); + INTERCEPT_FUNCTION(dl_iterate_phdr); INTERCEPT_FUNCTION(getrusage); + INTERCEPT_FUNCTION(sigaction); + INTERCEPT_FUNCTION(signal); + INTERCEPT_FUNCTION(pthread_create); inited = 1; } } // namespace __msan diff --git a/lib/msan/msan_interface_internal.h b/lib/msan/msan_interface_internal.h new file mode 100644 index 000000000000..fb57f67c42ed --- /dev/null +++ b/lib/msan/msan_interface_internal.h @@ -0,0 +1,128 @@ +//===-- msan_interface_internal.h -------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of MemorySanitizer. +// +// Private MSan interface header. +//===----------------------------------------------------------------------===// + +#ifndef MSAN_INTERFACE_INTERNAL_H +#define MSAN_INTERFACE_INTERNAL_H + +#include "sanitizer_common/sanitizer_internal_defs.h" + +extern "C" { +// FIXME: document all interface functions. + +SANITIZER_INTERFACE_ATTRIBUTE +int __msan_get_track_origins(); + +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_init(); + +// Print a warning and maybe return. +// This function can die based on flags()->exit_code. +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_warning(); + +// Print a warning and die. +// Intrumentation inserts calls to this function when building in "fast" mode +// (i.e. -mllvm -msan-keep-going) +SANITIZER_INTERFACE_ATTRIBUTE __attribute__((noreturn)) +void __msan_warning_noreturn(); + +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_unpoison(const void *a, uptr size); +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_clear_and_unpoison(void *a, uptr size); +SANITIZER_INTERFACE_ATTRIBUTE +void* __msan_memcpy(void *dst, const void *src, uptr size); +SANITIZER_INTERFACE_ATTRIBUTE +void* __msan_memset(void *s, int c, uptr n); +SANITIZER_INTERFACE_ATTRIBUTE +void* __msan_memmove(void* dest, const void* src, uptr n); +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_copy_poison(void *dst, const void *src, uptr size); +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_copy_origin(void *dst, const void *src, uptr size); +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_move_poison(void *dst, const void *src, uptr size); +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_poison(const void *a, uptr size); +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_poison_stack(void *a, uptr size); + +// Copy size bytes from src to dst and unpoison the result. +// Useful to implement unsafe loads. +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_load_unpoisoned(void *src, uptr size, void *dst); + +// Returns the offset of the first (at least partially) poisoned byte, +// or -1 if the whole range is good. +SANITIZER_INTERFACE_ATTRIBUTE +sptr __msan_test_shadow(const void *x, uptr size); + +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_set_origin(const void *a, uptr size, u32 origin); +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_set_alloca_origin(void *a, uptr size, const char *descr); +SANITIZER_INTERFACE_ATTRIBUTE +u32 __msan_get_origin(const void *a); + +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_clear_on_return(); + +// Default: -1 (don't exit on error). +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_set_exit_code(int exit_code); + +SANITIZER_INTERFACE_ATTRIBUTE +int __msan_set_poison_in_malloc(int do_poison); + +SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE +/* OPTIONAL */ const char* __msan_default_options(); + +// For testing. +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_set_expect_umr(int expect_umr); +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_print_shadow(const void *x, uptr size); +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_print_param_shadow(); +SANITIZER_INTERFACE_ATTRIBUTE +int __msan_has_dynamic_component(); + +// Returns x such that %fs:x is the first byte of __msan_retval_tls. +SANITIZER_INTERFACE_ATTRIBUTE +int __msan_get_retval_tls_offset(); +SANITIZER_INTERFACE_ATTRIBUTE +int __msan_get_param_tls_offset(); + +// For intercepting mmap from ld.so in msandr. +SANITIZER_INTERFACE_ATTRIBUTE +bool __msan_is_in_loader(); + +// For testing. +SANITIZER_INTERFACE_ATTRIBUTE +u32 __msan_get_umr_origin(); +SANITIZER_INTERFACE_ATTRIBUTE +const char *__msan_get_origin_descr_if_stack(u32 id); +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_partial_poison(const void* data, void* shadow, uptr size); + +// Tell MSan about newly allocated memory (ex.: custom allocator). +// Memory will be marked uninitialized, with origin at the call site. +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_allocated_memory(const void* data, uptr size); +} // extern "C" + +// Unpoison first n function arguments. +void __msan_unpoison_param(uptr n); + +#endif // MSAN_INTERFACE_INTERNAL_H diff --git a/lib/msan/msan_linux.cc b/lib/msan/msan_linux.cc index 2203980c638d..367dc904d05d 100644 --- a/lib/msan/msan_linux.cc +++ b/lib/msan/msan_linux.cc @@ -12,10 +12,14 @@ // Linux-specific code. //===----------------------------------------------------------------------===// -#ifdef __linux__ +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_LINUX #include "msan.h" +#include <algorithm> +#include <elf.h> +#include <link.h> #include <stdio.h> #include <stdlib.h> #include <signal.h> @@ -72,22 +76,6 @@ bool InitShadow(bool prot1, bool prot2, bool map_shadow, bool init_origins) { return true; } -static void MsanTrap(int, siginfo_t *siginfo, void *context) { - ucontext_t *ucontext = (ucontext_t*)context; - uptr pc = ucontext->uc_mcontext.gregs[REG_RIP]; - uptr bp = ucontext->uc_mcontext.gregs[REG_RBP]; - PrintWarning(pc + 1 /*1 will be subtracted in StackTrace::Print */, bp); - ucontext->uc_mcontext.gregs[REG_RIP] += 2; -} - -void InstallTrapHandler() { - struct sigaction sigact; - internal_memset(&sigact, 0, sizeof(sigact)); - sigact.sa_sigaction = MsanTrap; - sigact.sa_flags = SA_SIGINFO; - CHECK_EQ(0, sigaction(SIGILL, &sigact, 0)); -} - void MsanDie() { _exit(flags()->exit_code); } @@ -103,6 +91,42 @@ static void MsanAtExit(void) { void InstallAtExitHandler() { atexit(MsanAtExit); } + +void UnpoisonMappedDSO(link_map *map) { + typedef ElfW(Phdr) Elf_Phdr; + typedef ElfW(Ehdr) Elf_Ehdr; + char *base = (char *)map->l_addr; + Elf_Ehdr *ehdr = (Elf_Ehdr *)base; + char *phdrs = base + ehdr->e_phoff; + char *phdrs_end = phdrs + ehdr->e_phnum * ehdr->e_phentsize; + + // Find the segment with the minimum base so we can "relocate" the p_vaddr + // fields. Typically ET_DYN objects (DSOs) have base of zero and ET_EXEC + // objects have a non-zero base. + uptr preferred_base = ~0ULL; + for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) { + Elf_Phdr *phdr = (Elf_Phdr *)iter; + if (phdr->p_type == PT_LOAD) + preferred_base = std::min(preferred_base, (uptr)phdr->p_vaddr); + } + + // Compute the delta from the real base to get a relocation delta. + sptr delta = (uptr)base - preferred_base; + // Now we can figure out what the loader really mapped. + for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) { + Elf_Phdr *phdr = (Elf_Phdr *)iter; + if (phdr->p_type == PT_LOAD) { + uptr seg_start = phdr->p_vaddr + delta; + uptr seg_end = seg_start + phdr->p_memsz; + // None of these values are aligned. We consider the ragged edges of the + // load command as defined, since they are mapped from the file. + seg_start = RoundDownTo(seg_start, GetPageSizeCached()); + seg_end = RoundUpTo(seg_end, GetPageSizeCached()); + __msan_unpoison((void *)seg_start, seg_end - seg_start); + } + } } +} // namespace __msan + #endif // __linux__ diff --git a/lib/msan/msan_new_delete.cc b/lib/msan/msan_new_delete.cc index c4efe2ef70ce..88d4364f6562 100644 --- a/lib/msan/msan_new_delete.cc +++ b/lib/msan/msan_new_delete.cc @@ -14,6 +14,8 @@ #include "msan.h" +#if MSAN_REPLACE_OPERATORS_NEW_AND_DELETE + #include <stddef.h> namespace __msan { @@ -49,3 +51,5 @@ void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } void operator delete[](void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } + +#endif // MSAN_REPLACE_OPERATORS_NEW_AND_DELETE diff --git a/lib/msan/msan_platform_limits_posix.cc b/lib/msan/msan_platform_limits_posix.cc deleted file mode 100644 index 19d6c5d0ab3f..000000000000 --- a/lib/msan/msan_platform_limits_posix.cc +++ /dev/null @@ -1,59 +0,0 @@ -//===-- msan_platform_limits.cc -------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file is a part of MemorySanitizer. -// -// Sizes and layouts of platform-specific POSIX data structures. -//===----------------------------------------------------------------------===// - -#ifdef __linux__ - -#include "msan.h" -#include "msan_platform_limits_posix.h" - -#include <sys/utsname.h> -#include <sys/types.h> -#include <sys/stat.h> -#include <sys/time.h> -#include <sys/resource.h> -#include <sys/vfs.h> -#include <sys/epoll.h> -#include <sys/socket.h> -#include <dirent.h> - -namespace __msan { - unsigned struct_utsname_sz = sizeof(struct utsname); - unsigned struct_stat_sz = sizeof(struct stat); - unsigned struct_stat64_sz = sizeof(struct stat64); - unsigned struct_rlimit_sz = sizeof(struct rlimit); - unsigned struct_rlimit64_sz = sizeof(struct rlimit64); - unsigned struct_dirent_sz = sizeof(struct dirent); - unsigned struct_statfs_sz = sizeof(struct statfs); - unsigned struct_statfs64_sz = sizeof(struct statfs64); - unsigned struct_epoll_event_sz = sizeof(struct epoll_event); - unsigned struct_rusage_sz = sizeof(struct rusage); - - void* __msan_get_msghdr_iov_iov_base(void* msg, int idx) { - return ((struct msghdr *)msg)->msg_iov[idx].iov_base; - } - - uptr __msan_get_msghdr_iov_iov_len(void* msg, int idx) { - return ((struct msghdr *)msg)->msg_iov[idx].iov_len; - } - - uptr __msan_get_msghdr_iovlen(void* msg) { - return ((struct msghdr *)msg)->msg_iovlen; - } - - uptr __msan_get_socklen_t(void* socklen_ptr) { - return *(socklen_t*)socklen_ptr; - } -} - -#endif // __linux__ diff --git a/lib/msan/msan_platform_limits_posix.h b/lib/msan/msan_platform_limits_posix.h deleted file mode 100644 index 3cd90ce93f6c..000000000000 --- a/lib/msan/msan_platform_limits_posix.h +++ /dev/null @@ -1,36 +0,0 @@ -//===-- msan_platform_limits.h ----------------------------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file is a part of MemorySanitizer. -// -// Sizes and layouts of platform-specific data structures. -//===----------------------------------------------------------------------===// - -#ifndef MSAN_PLATFORM_LIMITS_H -#define MSAN_PLATFORM_LIMITS_H - -namespace __msan { - extern unsigned struct_utsname_sz; - extern unsigned struct_stat_sz; - extern unsigned struct_stat64_sz; - extern unsigned struct_rlimit_sz; - extern unsigned struct_rlimit64_sz; - extern unsigned struct_dirent_sz; - extern unsigned struct_statfs_sz; - extern unsigned struct_statfs64_sz; - extern unsigned struct_epoll_event_sz; - extern unsigned struct_rusage_sz; - - void* __msan_get_msghdr_iov_iov_base(void* msg, int idx); - uptr __msan_get_msghdr_iov_iov_len(void* msg, int idx); - uptr __msan_get_msghdr_iovlen(void* msg); - uptr __msan_get_socklen_t(void* socklen_ptr); -} // namespace __msan - -#endif diff --git a/lib/msan/msan_report.cc b/lib/msan/msan_report.cc index 872108999733..734fc96fe69f 100644 --- a/lib/msan/msan_report.cc +++ b/lib/msan/msan_report.cc @@ -14,14 +14,14 @@ #include "msan.h" #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_mutex.h" #include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_symbolizer.h" using namespace __sanitizer; -static StaticSpinMutex report_mu; - namespace __msan { static bool PrintsToTtyCached() { @@ -43,6 +43,12 @@ class Decorator: private __sanitizer::AnsiColorDecorator { const char *End() { return Default(); } }; +static void PrintStack(const uptr *trace, uptr size) { + SymbolizerScope sym_scope; + StackTrace::PrintStack(trace, size, true, + common_flags()->strip_path_prefix, 0); +} + static void DescribeOrigin(u32 origin) { Decorator d; if (flags()->verbosity) @@ -53,43 +59,60 @@ static void DescribeOrigin(u32 origin) { CHECK(sep); *sep = '\0'; Printf("%s", d.Origin()); - Printf(" %sUninitialised value was created by an allocation of '%s%s%s'" + Printf(" %sUninitialized value was created by an allocation of '%s%s%s'" " in the stack frame of function '%s%s%s'%s\n", - d.Origin(), d.Name(), s, d.Origin(), d.Name(), sep + 1, + d.Origin(), d.Name(), s, d.Origin(), d.Name(), Demangle(sep + 1), d.Origin(), d.End()); InternalFree(s); } else { uptr size = 0; const uptr *trace = StackDepotGet(origin, &size); - Printf(" %sUninitialised value was created by a heap allocation%s\n", + Printf(" %sUninitialized value was created by a heap allocation%s\n", d.Origin(), d.End()); - StackTrace::PrintStack(trace, size, true, "", 0); + PrintStack(trace, size); } } +static void ReportSummary(const char *error_type, StackTrace *stack) { + if (!stack->size || !IsSymbolizerAvailable()) return; + AddressInfo ai; + uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]); + { + SymbolizerScope sym_scope; + SymbolizeCode(pc, &ai, 1); + } + ReportErrorSummary(error_type, + StripPathPrefix(ai.file, + common_flags()->strip_path_prefix), + ai.line, ai.function); +} + void ReportUMR(StackTrace *stack, u32 origin) { if (!__msan::flags()->report_umrs) return; - GenericScopedLock<StaticSpinMutex> lock(&report_mu); + SpinMutexLock l(&CommonSanitizerReportMutex); Decorator d; Printf("%s", d.Warning()); Report(" WARNING: Use of uninitialized value\n"); Printf("%s", d.End()); - StackTrace::PrintStack(stack->trace, stack->size, true, "", 0); + PrintStack(stack->trace, stack->size); if (origin) { DescribeOrigin(origin); } + ReportSummary("use-of-uninitialized-value", stack); } void ReportExpectedUMRNotFound(StackTrace *stack) { - GenericScopedLock<StaticSpinMutex> lock(&report_mu); + SpinMutexLock l(&CommonSanitizerReportMutex); Printf(" WARNING: Expected use of uninitialized value not found\n"); - StackTrace::PrintStack(stack->trace, stack->size, true, "", 0); + PrintStack(stack->trace, stack->size); } void ReportAtExitStatistics() { + SpinMutexLock l(&CommonSanitizerReportMutex); + Decorator d; Printf("%s", d.Warning()); Printf("MemorySanitizer: %d warnings reported.\n", msan_report_count); @@ -97,4 +120,4 @@ void ReportAtExitStatistics() { } -} // namespace msan +} // namespace __msan diff --git a/lib/msan/tests/CMakeLists.txt b/lib/msan/tests/CMakeLists.txt index d2a28b2cba5c..7a784023d2db 100644 --- a/lib/msan/tests/CMakeLists.txt +++ b/lib/msan/tests/CMakeLists.txt @@ -31,8 +31,13 @@ set(MSAN_LIBCXX_LINK_FLAGS -fsanitize=memory) # Unittest sources and build flags. -set(MSAN_UNITTEST_SOURCE msan_test.cc) -set(MSAN_UNITTEST_HEADERS msandr_test_so.h) +set(MSAN_UNITTEST_SOURCES msan_test.cc msan_test_main.cc) +set(MSAN_LOADABLE_SOURCE msan_loadable.cc) +set(MSAN_UNITTEST_HEADERS + msan_test_config.h + msandr_test_so.h + ../../../include/sanitizer/msan_interface.h +) set(MSANDR_UNITTEST_SOURCE msandr_test_so.cc) set(MSAN_UNITTEST_COMMON_CFLAGS -I${MSAN_LIBCXX_PATH}/include @@ -42,7 +47,6 @@ set(MSAN_UNITTEST_COMMON_CFLAGS -I${COMPILER_RT_SOURCE_DIR}/lib/msan -std=c++0x -stdlib=libc++ - -fPIE -g -O2 -fno-exceptions @@ -57,11 +61,14 @@ set(MSAN_UNITTEST_INSTRUMENTED_CFLAGS ) set(MSAN_UNITTEST_LINK_FLAGS -fsanitize=memory - -pie -ldl # FIXME: we build libcxx without cxxabi and need libstdc++ to provide it. -lstdc++ ) +set(MSAN_LOADABLE_LINK_FLAGS + -fsanitize=memory + -shared +) # Compile source for the given architecture, using compiler # options in ${ARGN}, and add it to the object list. @@ -77,9 +84,7 @@ endmacro() macro(msan_link_shared so_list so_name arch) parse_arguments(SOURCE "OBJECTS;LINKFLAGS;DEPS" "" ${ARGN}) - get_unittest_directory(OUTPUT_DIR) - file(MAKE_DIRECTORY ${OUTPUT_DIR}) - set(output_so "${OUTPUT_DIR}/${so_name}.${arch}.so") + set(output_so "${CMAKE_CURRENT_BINARY_DIR}/${so_name}.${arch}.so") get_target_flags_for_arch(${arch} TARGET_LINKFLAGS) clang_link_shared(${output_so} OBJECTS ${SOURCE_OBJECTS} @@ -92,13 +97,13 @@ endmacro() # of objects in ${ARGN}. macro(add_msan_test test_suite test_name arch) get_target_flags_for_arch(${arch} TARGET_LINK_FLAGS) - get_unittest_directory(OUTPUT_DIR) add_compiler_rt_test(${test_suite} ${test_name} OBJECTS ${ARGN} DEPS ${MSAN_RUNTIME_LIBRARIES} ${ARGN} + ${MSAN_LOADABLE_SO} LINK_FLAGS ${MSAN_UNITTEST_LINK_FLAGS} ${TARGET_LINK_FLAGS} - "-Wl,-rpath=${OUTPUT_DIR}") + "-Wl,-rpath=${CMAKE_CURRENT_BINARY_DIR}") endmacro() # Main MemorySanitizer unit tests. @@ -127,7 +132,14 @@ macro(add_msan_tests_for_arch arch) # Instrumented tests. set(MSAN_INST_TEST_OBJECTS) - msan_compile(MSAN_INST_TEST_OBJECTS ${MSAN_UNITTEST_SOURCE} ${arch} + foreach (SOURCE ${MSAN_UNITTEST_SOURCES}) + msan_compile(MSAN_INST_TEST_OBJECTS ${SOURCE} ${arch} + ${MSAN_UNITTEST_INSTRUMENTED_CFLAGS}) + endforeach(SOURCE) + + # Instrumented loadable module objects. + set(MSAN_INST_LOADABLE_OBJECTS) + msan_compile(MSAN_INST_LOADABLE_OBJECTS ${MSAN_LOADABLE_SOURCE} ${arch} ${MSAN_UNITTEST_INSTRUMENTED_CFLAGS}) # Uninstrumented shared object for MSanDR tests. @@ -135,6 +147,12 @@ macro(add_msan_tests_for_arch arch) msan_compile(MSANDR_TEST_OBJECTS ${MSANDR_UNITTEST_SOURCE} ${arch} ${MSAN_UNITTEST_COMMON_CFLAGS}) + # Instrumented loadable library tests. + set(MSAN_LOADABLE_SO) + msan_link_shared(MSAN_LOADABLE_SO "libmsan_loadable" ${arch} + OBJECTS ${MSAN_INST_LOADABLE_OBJECTS} + DEPS ${MSAN_INST_LOADABLE_OBJECTS} ${MSAN_RUNTIME_LIBRARIES}) + # Uninstrumented shared library tests. set(MSANDR_TEST_SO) msan_link_shared(MSANDR_TEST_SO "libmsandr_test" ${arch} @@ -151,16 +169,4 @@ if(COMPILER_RT_CAN_EXECUTE_TESTS AND EXISTS ${MSAN_LIBCXX_PATH}/) if(CAN_TARGET_x86_64) add_msan_tests_for_arch(x86_64) endif() - - # Run unittests as a part of lit testsuite. - configure_lit_site_cfg( - ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in - ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg - ) - - add_lit_testsuite(check-msan "Running MemorySanitizer unittests" - ${CMAKE_CURRENT_BINARY_DIR} - DEPENDS MsanUnitTests - ) - set_target_properties(check-msan PROPERTIES FOLDER "MemorySanitizer unittests") endif() diff --git a/lib/msan/tests/lit.site.cfg.in b/lib/msan/tests/lit.site.cfg.in deleted file mode 100644 index bb9a28d6a6cb..000000000000 --- a/lib/msan/tests/lit.site.cfg.in +++ /dev/null @@ -1,9 +0,0 @@ -## Autogenerated by LLVM/Clang configuration. -# Do not edit! - -config.build_type = "@CMAKE_BUILD_TYPE@" -config.llvm_obj_root = "@LLVM_BINARY_DIR@" -config.llvm_src_root = "@LLVM_SOURCE_DIR@" - -# Let the main config do the real work. -lit.load_config(config, "@CMAKE_CURRENT_SOURCE_DIR@/lit.cfg") diff --git a/lib/msan/tests/msan_loadable.cc b/lib/msan/tests/msan_loadable.cc new file mode 100644 index 000000000000..db3bf489853d --- /dev/null +++ b/lib/msan/tests/msan_loadable.cc @@ -0,0 +1,45 @@ +//===-- msan_loadable.cc --------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of MemorySanitizer. +// +// MemorySanitizer unit tests. +//===----------------------------------------------------------------------===// + +#include "msan/msan_interface_internal.h" +#include <stdlib.h> + +static void *dso_global; + +// No name mangling. +extern "C" { + +__attribute__((constructor)) +void loadable_module_init(void) { + if (!__msan_has_dynamic_component()) + return; + // The real test is that this compare should not make an uninit. + if (dso_global == NULL) + dso_global = malloc(4); +} + +__attribute__((destructor)) +void loadable_module_fini(void) { + if (!__msan_has_dynamic_component()) + return; + free(dso_global); + // *Don't* overwrite it with NULL! That would unpoison it, but our test + // relies on reloading at the same address and keeping the poison. +} + +void **get_dso_global() { + return &dso_global; +} + +} diff --git a/lib/msan/tests/msan_test.cc b/lib/msan/tests/msan_test.cc index 1ee7a27d56b1..1e05382e4b9f 100644 --- a/lib/msan/tests/msan_test.cc +++ b/lib/msan/tests/msan_test.cc @@ -12,18 +12,25 @@ // MemorySanitizer unit tests. //===----------------------------------------------------------------------===// +#ifndef MSAN_EXTERNAL_TEST_CONFIG +#include "msan_test_config.h" +#endif // MSAN_EXTERNAL_TEST_CONFIG + #include "sanitizer/msan_interface.h" #include "msandr_test_so.h" -#include "gtest/gtest.h" #include <stdlib.h> #include <stdarg.h> #include <stdio.h> #include <assert.h> #include <wchar.h> +#include <math.h> +#include <arpa/inet.h> #include <dlfcn.h> +#include <grp.h> #include <unistd.h> +#include <link.h> #include <limits.h> #include <sys/time.h> #include <sys/types.h> @@ -34,6 +41,10 @@ #include <sys/utsname.h> #include <sys/mman.h> #include <sys/vfs.h> +#include <dirent.h> +#include <pwd.h> +#include <sys/socket.h> +#include <netdb.h> #if defined(__i386__) || defined(__x86_64__) # include <emmintrin.h> @@ -53,29 +64,36 @@ typedef signed long long S8; // NOLINT #define NOINLINE __attribute__((noinline)) #define INLINE __attribute__((always_inline)) +static bool TrackingOrigins() { + S8 x; + __msan_set_origin(&x, sizeof(x), 0x1234); + U4 origin = __msan_get_origin(&x); + __msan_set_origin(&x, sizeof(x), 0); + return origin == 0x1234; +} -#define EXPECT_POISONED(action) \ +#define EXPECT_UMR(action) \ do { \ __msan_set_expect_umr(1); \ action; \ __msan_set_expect_umr(0); \ } while (0) -#define EXPECT_POISONED_O(action, origin) \ +#define EXPECT_UMR_O(action, origin) \ do { \ __msan_set_expect_umr(1); \ action; \ __msan_set_expect_umr(0); \ if (TrackingOrigins()) \ - EXPECT_EQ(origin, __msan_get_origin_tls()); \ + EXPECT_EQ(origin, __msan_get_umr_origin()); \ } while (0) -#define EXPECT_POISONED_S(action, stack_origin) \ +#define EXPECT_UMR_S(action, stack_origin) \ do { \ __msan_set_expect_umr(1); \ action; \ __msan_set_expect_umr(0); \ - u32 id = __msan_get_origin_tls(); \ + U4 id = __msan_get_umr_origin(); \ const char *str = __msan_get_origin_descr_if_stack(id); \ if (!str || strcmp(str, stack_origin)) { \ fprintf(stderr, "EXPECT_POISONED_S: id=%u %s, %s", \ @@ -84,6 +102,44 @@ typedef signed long long S8; // NOLINT } \ } while (0) +#define EXPECT_POISONED(x) ExpectPoisoned(x) + +template<typename T> +void ExpectPoisoned(const T& t) { + EXPECT_NE(-1, __msan_test_shadow((void*)&t, sizeof(t))); +} + +#define EXPECT_POISONED_O(x, origin) \ + ExpectPoisonedWithOrigin(x, origin) + +template<typename T> +void ExpectPoisonedWithOrigin(const T& t, unsigned origin) { + EXPECT_NE(-1, __msan_test_shadow((void*)&t, sizeof(t))); + if (TrackingOrigins()) + EXPECT_EQ(origin, __msan_get_origin((void*)&t)); +} + +#define EXPECT_POISONED_S(x, stack_origin) \ + ExpectPoisonedWithStackOrigin(x, stack_origin) + +template<typename T> +void ExpectPoisonedWithStackOrigin(const T& t, const char *stack_origin) { + EXPECT_NE(-1, __msan_test_shadow((void*)&t, sizeof(t))); + U4 id = __msan_get_origin((void*)&t); + const char *str = __msan_get_origin_descr_if_stack(id); + if (!str || strcmp(str, stack_origin)) { + fprintf(stderr, "EXPECT_POISONED_S: id=%u %s, %s", + id, stack_origin, str); + EXPECT_EQ(1, 0); + } +} + +#define EXPECT_NOT_POISONED(x) ExpectNotPoisoned(x) + +template<typename T> +void ExpectNotPoisoned(const T& t) { + EXPECT_EQ(-1, __msan_test_shadow((void*)&t, sizeof(t))); +} static U8 poisoned_array[100]; template<class T> @@ -95,7 +151,7 @@ T *GetPoisoned(int i = 0, T val = 0) { } template<class T> -T *GetPoisonedO(int i, u32 origin, T val = 0) { +T *GetPoisonedO(int i, U4 origin, T val = 0) { T *res = (T*)&poisoned_array[i]; *res = val; __msan_poison(&poisoned_array[i], sizeof(T)); @@ -112,81 +168,67 @@ static T Ident(T t) { return ret; } -static bool TrackingOrigins() { - S8 x; - __msan_set_origin(&x, sizeof(x), 0x1234); - u32 origin = __msan_get_origin(&x); - __msan_set_origin(&x, sizeof(x), 0); - return origin == 0x1234; -} - template<class T> NOINLINE T ReturnPoisoned() { return *GetPoisoned<T>(); } -static volatile S1 v_s1; -static volatile S2 v_s2; -static volatile S4 v_s4; -static volatile S8 v_s8; -static volatile U1 v_u1; -static volatile U2 v_u2; -static volatile U4 v_u4; -static volatile U8 v_u8; -static void* volatile v_p; -static volatile double v_d; static volatile int g_one = 1; static volatile int g_zero = 0; static volatile int g_0 = 0; static volatile int g_1 = 1; -#if MSAN_HAS_M128 -static volatile __m128i v_m128; -#endif - S4 a_s4[100]; S8 a_s8[100]; +// Check that malloc poisons memory. +// A lot of tests below depend on this. +TEST(MemorySanitizerSanity, PoisonInMalloc) { + int *x = (int*)malloc(sizeof(int)); + EXPECT_POISONED(*x); + free(x); +} + TEST(MemorySanitizer, NegativeTest1) { S4 *x = GetPoisoned<S4>(); if (g_one) *x = 0; - v_s4 = *x; + EXPECT_NOT_POISONED(*x); } TEST(MemorySanitizer, PositiveTest1) { // Load to store. - EXPECT_POISONED(v_s1 = *GetPoisoned<S1>()); - EXPECT_POISONED(v_s2 = *GetPoisoned<S2>()); - EXPECT_POISONED(v_s4 = *GetPoisoned<S4>()); - EXPECT_POISONED(v_s8 = *GetPoisoned<S8>()); + EXPECT_POISONED(*GetPoisoned<S1>()); + EXPECT_POISONED(*GetPoisoned<S2>()); + EXPECT_POISONED(*GetPoisoned<S4>()); + EXPECT_POISONED(*GetPoisoned<S8>()); // S->S conversions. - EXPECT_POISONED(v_s2 = *GetPoisoned<S1>()); - EXPECT_POISONED(v_s4 = *GetPoisoned<S1>()); - EXPECT_POISONED(v_s8 = *GetPoisoned<S1>()); + EXPECT_POISONED(*GetPoisoned<S1>()); + EXPECT_POISONED(*GetPoisoned<S1>()); + EXPECT_POISONED(*GetPoisoned<S1>()); - EXPECT_POISONED(v_s1 = *GetPoisoned<S2>()); - EXPECT_POISONED(v_s4 = *GetPoisoned<S2>()); - EXPECT_POISONED(v_s8 = *GetPoisoned<S2>()); + EXPECT_POISONED(*GetPoisoned<S2>()); + EXPECT_POISONED(*GetPoisoned<S2>()); + EXPECT_POISONED(*GetPoisoned<S2>()); - EXPECT_POISONED(v_s1 = *GetPoisoned<S4>()); - EXPECT_POISONED(v_s2 = *GetPoisoned<S4>()); - EXPECT_POISONED(v_s8 = *GetPoisoned<S4>()); + EXPECT_POISONED(*GetPoisoned<S4>()); + EXPECT_POISONED(*GetPoisoned<S4>()); + EXPECT_POISONED(*GetPoisoned<S4>()); - EXPECT_POISONED(v_s1 = *GetPoisoned<S8>()); - EXPECT_POISONED(v_s2 = *GetPoisoned<S8>()); - EXPECT_POISONED(v_s4 = *GetPoisoned<S8>()); + EXPECT_POISONED(*GetPoisoned<S8>()); + EXPECT_POISONED(*GetPoisoned<S8>()); + EXPECT_POISONED(*GetPoisoned<S8>()); // ZExt - EXPECT_POISONED(v_s2 = *GetPoisoned<U1>()); - EXPECT_POISONED(v_s4 = *GetPoisoned<U1>()); - EXPECT_POISONED(v_s8 = *GetPoisoned<U1>()); - EXPECT_POISONED(v_s4 = *GetPoisoned<U2>()); - EXPECT_POISONED(v_s8 = *GetPoisoned<U2>()); - EXPECT_POISONED(v_s8 = *GetPoisoned<U4>()); + EXPECT_POISONED(*GetPoisoned<U1>()); + EXPECT_POISONED(*GetPoisoned<U1>()); + EXPECT_POISONED(*GetPoisoned<U1>()); + EXPECT_POISONED(*GetPoisoned<U2>()); + EXPECT_POISONED(*GetPoisoned<U2>()); + EXPECT_POISONED(*GetPoisoned<U4>()); // Unary ops. - EXPECT_POISONED(v_s4 = - *GetPoisoned<S4>()); + EXPECT_POISONED(- *GetPoisoned<S4>()); - EXPECT_POISONED(a_s4[g_zero] = 100 / *GetPoisoned<S4>(0, 1)); + EXPECT_UMR(a_s4[g_zero] = 100 / *GetPoisoned<S4>(0, 1)); a_s4[g_zero] = 1 - *GetPoisoned<S4>(); @@ -198,22 +240,22 @@ TEST(MemorySanitizer, Phi1) { if (g_one) { c = *GetPoisoned<S4>(); } else { - __msan_break_optimization(0); + break_optimization(0); c = 0; } - EXPECT_POISONED(v_s4 = c); + EXPECT_POISONED(c); } TEST(MemorySanitizer, Phi2) { S4 i = *GetPoisoned<S4>(); S4 n = g_one; - EXPECT_POISONED(for (; i < g_one; i++);); - EXPECT_POISONED(v_s4 = i); + EXPECT_UMR(for (; i < g_one; i++);); + EXPECT_POISONED(i); } -NOINLINE void Arg1ExpectUMR(S4 a1) { EXPECT_POISONED(v_s4 = a1); } -NOINLINE void Arg2ExpectUMR(S4 a1, S4 a2) { EXPECT_POISONED(v_s4 = a2); } -NOINLINE void Arg3ExpectUMR(S1 a1, S4 a2, S8 a3) { EXPECT_POISONED(v_s8 = a3); } +NOINLINE void Arg1ExpectUMR(S4 a1) { EXPECT_POISONED(a1); } +NOINLINE void Arg2ExpectUMR(S4 a1, S4 a2) { EXPECT_POISONED(a2); } +NOINLINE void Arg3ExpectUMR(S1 a1, S4 a2, S8 a3) { EXPECT_POISONED(a3); } TEST(MemorySanitizer, ArgTest) { Arg1ExpectUMR(*GetPoisoned<S4>()); @@ -229,50 +271,50 @@ TEST(MemorySanitizer, CallAndRet) { ReturnPoisoned<S4>(); ReturnPoisoned<S8>(); - EXPECT_POISONED(v_s1 = ReturnPoisoned<S1>()); - EXPECT_POISONED(v_s2 = ReturnPoisoned<S2>()); - EXPECT_POISONED(v_s4 = ReturnPoisoned<S4>()); - EXPECT_POISONED(v_s8 = ReturnPoisoned<S8>()); + EXPECT_POISONED(ReturnPoisoned<S1>()); + EXPECT_POISONED(ReturnPoisoned<S2>()); + EXPECT_POISONED(ReturnPoisoned<S4>()); + EXPECT_POISONED(ReturnPoisoned<S8>()); } // malloc() in the following test may be optimized to produce a compile-time // undef value. Check that we trap on the volatile assignment anyway. TEST(MemorySanitizer, DISABLED_MallocNoIdent) { S4 *x = (int*)malloc(sizeof(S4)); - EXPECT_POISONED(v_s4 = *x); + EXPECT_POISONED(*x); free(x); } TEST(MemorySanitizer, Malloc) { S4 *x = (int*)Ident(malloc(sizeof(S4))); - EXPECT_POISONED(v_s4 = *x); + EXPECT_POISONED(*x); free(x); } TEST(MemorySanitizer, Realloc) { S4 *x = (int*)Ident(realloc(0, sizeof(S4))); - EXPECT_POISONED(v_s4 = x[0]); + EXPECT_POISONED(x[0]); x[0] = 1; x = (int*)Ident(realloc(x, 2 * sizeof(S4))); - v_s4 = x[0]; // Ok, was inited before. - EXPECT_POISONED(v_s4 = x[1]); + EXPECT_NOT_POISONED(x[0]); // Ok, was inited before. + EXPECT_POISONED(x[1]); x = (int*)Ident(realloc(x, 3 * sizeof(S4))); - v_s4 = x[0]; // Ok, was inited before. - EXPECT_POISONED(v_s4 = x[2]); - EXPECT_POISONED(v_s4 = x[1]); + EXPECT_NOT_POISONED(x[0]); // Ok, was inited before. + EXPECT_POISONED(x[2]); + EXPECT_POISONED(x[1]); x[2] = 1; // Init this here. Check that after realloc it is poisoned again. x = (int*)Ident(realloc(x, 2 * sizeof(S4))); - v_s4 = x[0]; // Ok, was inited before. - EXPECT_POISONED(v_s4 = x[1]); + EXPECT_NOT_POISONED(x[0]); // Ok, was inited before. + EXPECT_POISONED(x[1]); x = (int*)Ident(realloc(x, 3 * sizeof(S4))); - EXPECT_POISONED(v_s4 = x[1]); - EXPECT_POISONED(v_s4 = x[2]); + EXPECT_POISONED(x[1]); + EXPECT_POISONED(x[2]); free(x); } TEST(MemorySanitizer, Calloc) { S4 *x = (int*)Ident(calloc(1, sizeof(S4))); - v_s4 = *x; // Should not be poisoned. + EXPECT_NOT_POISONED(*x); // Should not be poisoned. // EXPECT_EQ(0, *x); free(x); } @@ -283,83 +325,79 @@ TEST(MemorySanitizer, AndOr) { // correct regardless of endianness. ((U1*)p)[1] = 0; ((U1*)p)[2] = 0xff; - v_u4 = *p & 0x00ffff00; - v_u4 = *p & 0x00ff0000; - v_u4 = *p & 0x0000ff00; - EXPECT_POISONED(v_u4 = *p & 0xff000000); - EXPECT_POISONED(v_u4 = *p & 0x000000ff); - EXPECT_POISONED(v_u4 = *p & 0x0000ffff); - EXPECT_POISONED(v_u4 = *p & 0xffff0000); + EXPECT_NOT_POISONED(*p & 0x00ffff00); + EXPECT_NOT_POISONED(*p & 0x00ff0000); + EXPECT_NOT_POISONED(*p & 0x0000ff00); + EXPECT_POISONED(*p & 0xff000000); + EXPECT_POISONED(*p & 0x000000ff); + EXPECT_POISONED(*p & 0x0000ffff); + EXPECT_POISONED(*p & 0xffff0000); - v_u4 = *p | 0xff0000ff; - v_u4 = *p | 0xff00ffff; - v_u4 = *p | 0xffff00ff; - EXPECT_POISONED(v_u4 = *p | 0xff000000); - EXPECT_POISONED(v_u4 = *p | 0x000000ff); - EXPECT_POISONED(v_u4 = *p | 0x0000ffff); - EXPECT_POISONED(v_u4 = *p | 0xffff0000); + EXPECT_NOT_POISONED(*p | 0xff0000ff); + EXPECT_NOT_POISONED(*p | 0xff00ffff); + EXPECT_NOT_POISONED(*p | 0xffff00ff); + EXPECT_POISONED(*p | 0xff000000); + EXPECT_POISONED(*p | 0x000000ff); + EXPECT_POISONED(*p | 0x0000ffff); + EXPECT_POISONED(*p | 0xffff0000); - EXPECT_POISONED(v_u4 = *GetPoisoned<bool>() & *GetPoisoned<bool>()); + EXPECT_POISONED(*GetPoisoned<bool>() & *GetPoisoned<bool>()); } template<class T> -static void testNot(T value, T shadow) { +static bool applyNot(T value, T shadow) { __msan_partial_poison(&value, &shadow, sizeof(T)); - volatile bool v_T = !value; + return !value; } TEST(MemorySanitizer, Not) { - testNot<U4>(0x0, 0x0); - testNot<U4>(0xFFFFFFFF, 0x0); - EXPECT_POISONED(testNot<U4>(0xFFFFFFFF, 0xFFFFFFFF)); - testNot<U4>(0xFF000000, 0x0FFFFFFF); - testNot<U4>(0xFF000000, 0x00FFFFFF); - testNot<U4>(0xFF000000, 0x0000FFFF); - testNot<U4>(0xFF000000, 0x00000000); - EXPECT_POISONED(testNot<U4>(0xFF000000, 0xFF000000)); - testNot<U4>(0xFF800000, 0xFF000000); - EXPECT_POISONED(testNot<U4>(0x00008000, 0x00008000)); - - testNot<U1>(0x0, 0x0); - testNot<U1>(0xFF, 0xFE); - testNot<U1>(0xFF, 0x0); - EXPECT_POISONED(testNot<U1>(0xFF, 0xFF)); - - EXPECT_POISONED(testNot<void*>((void*)0xFFFFFF, (void*)(-1))); - testNot<void*>((void*)0xFFFFFF, (void*)(-2)); + EXPECT_NOT_POISONED(applyNot<U4>(0x0, 0x0)); + EXPECT_NOT_POISONED(applyNot<U4>(0xFFFFFFFF, 0x0)); + EXPECT_POISONED(applyNot<U4>(0xFFFFFFFF, 0xFFFFFFFF)); + EXPECT_NOT_POISONED(applyNot<U4>(0xFF000000, 0x0FFFFFFF)); + EXPECT_NOT_POISONED(applyNot<U4>(0xFF000000, 0x00FFFFFF)); + EXPECT_NOT_POISONED(applyNot<U4>(0xFF000000, 0x0000FFFF)); + EXPECT_NOT_POISONED(applyNot<U4>(0xFF000000, 0x00000000)); + EXPECT_POISONED(applyNot<U4>(0xFF000000, 0xFF000000)); + EXPECT_NOT_POISONED(applyNot<U4>(0xFF800000, 0xFF000000)); + EXPECT_POISONED(applyNot<U4>(0x00008000, 0x00008000)); + + EXPECT_NOT_POISONED(applyNot<U1>(0x0, 0x0)); + EXPECT_NOT_POISONED(applyNot<U1>(0xFF, 0xFE)); + EXPECT_NOT_POISONED(applyNot<U1>(0xFF, 0x0)); + EXPECT_POISONED(applyNot<U1>(0xFF, 0xFF)); + + EXPECT_POISONED(applyNot<void*>((void*)0xFFFFFF, (void*)(-1))); + EXPECT_NOT_POISONED(applyNot<void*>((void*)0xFFFFFF, (void*)(-2))); } TEST(MemorySanitizer, Shift) { U4 *up = GetPoisoned<U4>(); ((U1*)up)[0] = 0; ((U1*)up)[3] = 0xff; - v_u4 = *up >> 30; - v_u4 = *up >> 24; - EXPECT_POISONED(v_u4 = *up >> 23); - EXPECT_POISONED(v_u4 = *up >> 10); + EXPECT_NOT_POISONED(*up >> 30); + EXPECT_NOT_POISONED(*up >> 24); + EXPECT_POISONED(*up >> 23); + EXPECT_POISONED(*up >> 10); - v_u4 = *up << 30; - v_u4 = *up << 24; - EXPECT_POISONED(v_u4 = *up << 23); - EXPECT_POISONED(v_u4 = *up << 10); + EXPECT_NOT_POISONED(*up << 30); + EXPECT_NOT_POISONED(*up << 24); + EXPECT_POISONED(*up << 23); + EXPECT_POISONED(*up << 10); S4 *sp = (S4*)up; - v_s4 = *sp >> 30; - v_s4 = *sp >> 24; - EXPECT_POISONED(v_s4 = *sp >> 23); - EXPECT_POISONED(v_s4 = *sp >> 10); + EXPECT_NOT_POISONED(*sp >> 30); + EXPECT_NOT_POISONED(*sp >> 24); + EXPECT_POISONED(*sp >> 23); + EXPECT_POISONED(*sp >> 10); sp = GetPoisoned<S4>(); ((S1*)sp)[1] = 0; ((S1*)sp)[2] = 0; - EXPECT_POISONED(v_s4 = *sp >> 31); + EXPECT_POISONED(*sp >> 31); - v_s4 = 100; - EXPECT_POISONED(v_s4 = v_s4 >> *GetPoisoned<S4>()); - v_u4 = 100; - EXPECT_POISONED(v_u4 = v_u4 >> *GetPoisoned<S4>()); - v_u4 = 100; - EXPECT_POISONED(v_u4 = v_u4 << *GetPoisoned<S4>()); + EXPECT_POISONED(100 >> *GetPoisoned<S4>()); + EXPECT_POISONED(100U >> *GetPoisoned<S4>()); } NOINLINE static int GetPoisonedZero() { @@ -374,14 +412,14 @@ NOINLINE static int GetPoisonedZero() { TEST(MemorySanitizer, LoadFromDirtyAddress) { int *a = new int; *a = 0; - EXPECT_POISONED(__msan_break_optimization((void*)(U8)a[GetPoisonedZero()])); + EXPECT_UMR(break_optimization((void*)(U8)a[GetPoisonedZero()])); delete a; } TEST(MemorySanitizer, StoreToDirtyAddress) { int *a = new int; - EXPECT_POISONED(a[GetPoisonedZero()] = 0); - __msan_break_optimization(a); + EXPECT_UMR(a[GetPoisonedZero()] = 0); + break_optimization(a); delete a; } @@ -393,19 +431,19 @@ NOINLINE void StackTestFunc() { S2 ok2 = 1; S1 p1; S1 ok1 = 1; - __msan_break_optimization(&p4); - __msan_break_optimization(&ok4); - __msan_break_optimization(&p2); - __msan_break_optimization(&ok2); - __msan_break_optimization(&p1); - __msan_break_optimization(&ok1); + break_optimization(&p4); + break_optimization(&ok4); + break_optimization(&p2); + break_optimization(&ok2); + break_optimization(&p1); + break_optimization(&ok1); - EXPECT_POISONED(v_s4 = p4); - EXPECT_POISONED(v_s2 = p2); - EXPECT_POISONED(v_s1 = p1); - v_s1 = ok1; - v_s2 = ok2; - v_s4 = ok4; + EXPECT_POISONED(p4); + EXPECT_POISONED(p2); + EXPECT_POISONED(p1); + EXPECT_NOT_POISONED(ok1); + EXPECT_NOT_POISONED(ok2); + EXPECT_NOT_POISONED(ok4); } TEST(MemorySanitizer, StackTest) { @@ -414,7 +452,7 @@ TEST(MemorySanitizer, StackTest) { NOINLINE void StackStressFunc() { int foo[10000]; - __msan_break_optimization(foo); + break_optimization(foo); } TEST(MemorySanitizer, DISABLED_StackStressTest) { @@ -426,12 +464,12 @@ template<class T> void TestFloatingPoint() { static volatile T v; static T g[100]; - __msan_break_optimization(&g); + break_optimization(&g); T *x = GetPoisoned<T>(); T *y = GetPoisoned<T>(1); - EXPECT_POISONED(v = *x); - EXPECT_POISONED(v_s8 = *x); - EXPECT_POISONED(v_s4 = *x); + EXPECT_POISONED(*x); + EXPECT_POISONED((long long)*x); + EXPECT_POISONED((int)*x); g[0] = *x; g[1] = *x + *y; g[2] = *x - *y; @@ -447,7 +485,7 @@ TEST(MemorySanitizer, DynMem) { S4 x = 0; S4 *y = GetPoisoned<S4>(); memcpy(y, &x, g_one * sizeof(S4)); - v_s4 = *y; + EXPECT_NOT_POISONED(*y); } static char *DynRetTestStr; @@ -455,7 +493,7 @@ static char *DynRetTestStr; TEST(MemorySanitizer, DynRet) { if (!__msan_has_dynamic_component()) return; ReturnPoisoned<S8>(); - v_s4 = clearenv(); + EXPECT_NOT_POISONED(clearenv()); } @@ -486,8 +524,8 @@ LargeStruct LargeRetTest() { TEST(MemorySanitizer, LargeRet) { LargeStruct a = LargeRetTest(); - EXPECT_POISONED(v_s4 = a.x[0]); - EXPECT_POISONED(v_s4 = a.x[9]); + EXPECT_POISONED(a.x[0]); + EXPECT_POISONED(a.x[9]); } TEST(MemorySanitizer, fread) { @@ -495,9 +533,9 @@ TEST(MemorySanitizer, fread) { FILE *f = fopen("/proc/self/stat", "r"); assert(f); fread(x, 1, 32, f); - v_s1 = x[0]; - v_s1 = x[16]; - v_s1 = x[31]; + EXPECT_NOT_POISONED(x[0]); + EXPECT_NOT_POISONED(x[16]); + EXPECT_NOT_POISONED(x[31]); fclose(f); delete x; } @@ -508,9 +546,9 @@ TEST(MemorySanitizer, read) { assert(fd > 0); int sz = read(fd, x, 32); assert(sz == 32); - v_s1 = x[0]; - v_s1 = x[16]; - v_s1 = x[31]; + EXPECT_NOT_POISONED(x[0]); + EXPECT_NOT_POISONED(x[16]); + EXPECT_NOT_POISONED(x[31]); close(fd); delete x; } @@ -521,9 +559,9 @@ TEST(MemorySanitizer, pread) { assert(fd > 0); int sz = pread(fd, x, 32, 0); assert(sz == 32); - v_s1 = x[0]; - v_s1 = x[16]; - v_s1 = x[31]; + EXPECT_NOT_POISONED(x[0]); + EXPECT_NOT_POISONED(x[16]); + EXPECT_NOT_POISONED(x[31]); close(fd); delete x; } @@ -532,13 +570,13 @@ TEST(MemorySanitizer, pread) { TEST(MemorySanitizer, DISABLED_ioctl) { struct winsize ws; EXPECT_EQ(ioctl(2, TIOCGWINSZ, &ws), 0); - v_s4 = ws.ws_col; + EXPECT_NOT_POISONED(ws.ws_col); } TEST(MemorySanitizer, readlink) { char *x = new char[1000]; readlink("/proc/self/exe", x, 1000); - v_s1 = x[0]; + EXPECT_NOT_POISONED(x[0]); delete [] x; } @@ -547,35 +585,205 @@ TEST(MemorySanitizer, stat) { struct stat* st = new struct stat; int res = stat("/proc/self/stat", st); assert(!res); - v_u8 = st->st_dev; - v_u8 = st->st_mode; - v_u8 = st->st_size; + EXPECT_NOT_POISONED(st->st_dev); + EXPECT_NOT_POISONED(st->st_mode); + EXPECT_NOT_POISONED(st->st_size); } TEST(MemorySanitizer, statfs) { struct statfs* st = new struct statfs; int res = statfs("/", st); assert(!res); - v_u8 = st->f_type; - v_u8 = st->f_bfree; - v_u8 = st->f_namelen; + EXPECT_NOT_POISONED(st->f_type); + EXPECT_NOT_POISONED(st->f_bfree); + EXPECT_NOT_POISONED(st->f_namelen); } TEST(MemorySanitizer, pipe) { int* pipefd = new int[2]; int res = pipe(pipefd); assert(!res); - v_u8 = pipefd[0]; - v_u8 = pipefd[1]; + EXPECT_NOT_POISONED(pipefd[0]); + EXPECT_NOT_POISONED(pipefd[1]); close(pipefd[0]); close(pipefd[1]); } +TEST(MemorySanitizer, pipe2) { + int* pipefd = new int[2]; + int res = pipe2(pipefd, O_NONBLOCK); + assert(!res); + EXPECT_NOT_POISONED(pipefd[0]); + EXPECT_NOT_POISONED(pipefd[1]); + close(pipefd[0]); + close(pipefd[1]); +} + +TEST(MemorySanitizer, socketpair) { + int sv[2]; + int res = socketpair(AF_UNIX, SOCK_STREAM, 0, sv); + assert(!res); + EXPECT_NOT_POISONED(sv[0]); + EXPECT_NOT_POISONED(sv[1]); + close(sv[0]); + close(sv[1]); +} + +TEST(MemorySanitizer, bind_getsockname) { + int sock = socket(AF_UNIX, SOCK_STREAM, 0); + + struct sockaddr_in sai; + memset(&sai, 0, sizeof(sai)); + sai.sin_family = AF_UNIX; + int res = bind(sock, (struct sockaddr *)&sai, sizeof(sai)); + + assert(!res); + char buf[200]; + socklen_t addrlen; + EXPECT_UMR(getsockname(sock, (struct sockaddr *)&buf, &addrlen)); + + addrlen = sizeof(buf); + res = getsockname(sock, (struct sockaddr *)&buf, &addrlen); + EXPECT_NOT_POISONED(addrlen); + EXPECT_NOT_POISONED(buf[0]); + EXPECT_NOT_POISONED(buf[addrlen - 1]); + EXPECT_POISONED(buf[addrlen]); + close(sock); +} + +#define EXPECT_HOSTENT_NOT_POISONED(he) \ + do { \ + EXPECT_NOT_POISONED(*(he)); \ + ASSERT_NE((void *) 0, (he)->h_name); \ + ASSERT_NE((void *) 0, (he)->h_aliases); \ + ASSERT_NE((void *) 0, (he)->h_addr_list); \ + EXPECT_NOT_POISONED(strlen((he)->h_name)); \ + char **p = (he)->h_aliases; \ + while (*p) { \ + EXPECT_NOT_POISONED(strlen(*p)); \ + ++p; \ + } \ + char **q = (he)->h_addr_list; \ + while (*q) { \ + EXPECT_NOT_POISONED(*q[0]); \ + ++q; \ + } \ + EXPECT_NOT_POISONED(*q); \ + } while (0) + +TEST(MemorySanitizer, gethostent) { + struct hostent *he = gethostent(); + ASSERT_NE((void *)NULL, he); + EXPECT_HOSTENT_NOT_POISONED(he); +} + +TEST(MemorySanitizer, gethostbyname) { + struct hostent *he = gethostbyname("localhost"); + ASSERT_NE((void *)NULL, he); + EXPECT_HOSTENT_NOT_POISONED(he); +} + +TEST(MemorySanitizer, gethostbyname2) { + struct hostent *he = gethostbyname2("localhost", AF_INET); + ASSERT_NE((void *)NULL, he); + EXPECT_HOSTENT_NOT_POISONED(he); +} + +TEST(MemorySanitizer, gethostbyaddr) { + in_addr_t addr = inet_addr("127.0.0.1"); + EXPECT_NOT_POISONED(addr); + struct hostent *he = gethostbyaddr(&addr, sizeof(addr), AF_INET); + ASSERT_NE((void *)NULL, he); + EXPECT_HOSTENT_NOT_POISONED(he); +} + +TEST(MemorySanitizer, gethostent_r) { + char buf[2000]; + struct hostent he; + struct hostent *result; + int err; + int res = gethostent_r(&he, buf, sizeof(buf), &result, &err); + ASSERT_EQ(0, res); + EXPECT_NOT_POISONED(result); + ASSERT_NE((void *)NULL, result); + EXPECT_HOSTENT_NOT_POISONED(result); + EXPECT_NOT_POISONED(err); +} + +TEST(MemorySanitizer, gethostbyname_r) { + char buf[2000]; + struct hostent he; + struct hostent *result; + int err; + int res = gethostbyname_r("localhost", &he, buf, sizeof(buf), &result, &err); + ASSERT_EQ(0, res); + EXPECT_NOT_POISONED(result); + ASSERT_NE((void *)NULL, result); + EXPECT_HOSTENT_NOT_POISONED(result); + EXPECT_NOT_POISONED(err); +} + +TEST(MemorySanitizer, gethostbyname2_r) { + char buf[2000]; + struct hostent he; + struct hostent *result; + int err; + int res = gethostbyname2_r("localhost", AF_INET, &he, buf, sizeof(buf), + &result, &err); + ASSERT_EQ(0, res); + EXPECT_NOT_POISONED(result); + ASSERT_NE((void *)NULL, result); + EXPECT_HOSTENT_NOT_POISONED(result); + EXPECT_NOT_POISONED(err); +} + +TEST(MemorySanitizer, gethostbyaddr_r) { + char buf[2000]; + struct hostent he; + struct hostent *result; + int err; + in_addr_t addr = inet_addr("127.0.0.1"); + EXPECT_NOT_POISONED(addr); + int res = gethostbyaddr_r(&addr, sizeof(addr), AF_INET, &he, buf, sizeof(buf), + &result, &err); + ASSERT_EQ(0, res); + EXPECT_NOT_POISONED(result); + ASSERT_NE((void *)NULL, result); + EXPECT_HOSTENT_NOT_POISONED(result); + EXPECT_NOT_POISONED(err); +} + +TEST(MemorySanitizer, getsockopt) { + int sock = socket(AF_UNIX, SOCK_STREAM, 0); + struct linger l[2]; + socklen_t sz = sizeof(l[0]); + int res = getsockopt(sock, SOL_SOCKET, SO_LINGER, &l[0], &sz); + ASSERT_EQ(0, res); + ASSERT_EQ(sizeof(l[0]), sz); + EXPECT_NOT_POISONED(l[0]); + EXPECT_POISONED(*(char *)(l + 1)); +} + TEST(MemorySanitizer, getcwd) { char path[PATH_MAX + 1]; char* res = getcwd(path, sizeof(path)); assert(res); - v_s1 = path[0]; + EXPECT_NOT_POISONED(path[0]); +} + +TEST(MemorySanitizer, getcwd_gnu) { + char* res = getcwd(NULL, 0); + assert(res); + EXPECT_NOT_POISONED(res[0]); + free(res); +} + +TEST(MemorySanitizer, readdir) { + DIR *dir = opendir("."); + struct dirent *d = readdir(dir); + assert(d); + EXPECT_NOT_POISONED(d->d_name[0]); + closedir(dir); } TEST(MemorySanitizer, realpath) { @@ -583,7 +791,7 @@ TEST(MemorySanitizer, realpath) { char path[PATH_MAX + 1]; char* res = realpath(relpath, path); assert(res); - v_s1 = path[0]; + EXPECT_NOT_POISONED(path[0]); } TEST(MemorySanitizer, memcpy) { @@ -592,8 +800,8 @@ TEST(MemorySanitizer, memcpy) { x[0] = 1; x[1] = *GetPoisoned<char>(); memcpy(y, x, 2); - v_s4 = y[0]; - EXPECT_POISONED(v_s4 = y[1]); + EXPECT_NOT_POISONED(y[0]); + EXPECT_POISONED(y[1]); } TEST(MemorySanitizer, memmove) { @@ -602,29 +810,57 @@ TEST(MemorySanitizer, memmove) { x[0] = 1; x[1] = *GetPoisoned<char>(); memmove(y, x, 2); - v_s4 = y[0]; - EXPECT_POISONED(v_s4 = y[1]); + EXPECT_NOT_POISONED(y[0]); + EXPECT_POISONED(y[1]); } TEST(MemorySanitizer, strdup) { - char *x = strdup("zzz"); - v_s1 = *x; + char buf[4] = "abc"; + __msan_poison(buf + 2, sizeof(*buf)); + char *x = strdup(buf); + EXPECT_NOT_POISONED(x[0]); + EXPECT_NOT_POISONED(x[1]); + EXPECT_POISONED(x[2]); + EXPECT_NOT_POISONED(x[3]); + free(x); +} + +TEST(MemorySanitizer, strndup) { + char buf[4] = "abc"; + __msan_poison(buf + 2, sizeof(*buf)); + char *x = strndup(buf, 3); + EXPECT_NOT_POISONED(x[0]); + EXPECT_NOT_POISONED(x[1]); + EXPECT_POISONED(x[2]); + EXPECT_NOT_POISONED(x[3]); + free(x); +} + +TEST(MemorySanitizer, strndup_short) { + char buf[4] = "abc"; + __msan_poison(buf + 1, sizeof(*buf)); + __msan_poison(buf + 2, sizeof(*buf)); + char *x = strndup(buf, 2); + EXPECT_NOT_POISONED(x[0]); + EXPECT_POISONED(x[1]); + EXPECT_NOT_POISONED(x[2]); free(x); } + template<class T, int size> void TestOverlapMemmove() { T *x = new T[size]; assert(size >= 3); x[2] = 0; memmove(x, x + 1, (size - 1) * sizeof(T)); - v_s8 = x[1]; + EXPECT_NOT_POISONED(x[1]); if (!__msan_has_dynamic_component()) { // FIXME: under DR we will lose this information // because accesses in memmove will unpoisin the shadow. // We need to use our own memove implementation instead of libc's. - EXPECT_POISONED(v_s8 = x[0]); - EXPECT_POISONED(v_s8 = x[2]); + EXPECT_POISONED(x[0]); + EXPECT_POISONED(x[2]); } delete [] x; } @@ -643,9 +879,9 @@ TEST(MemorySanitizer, strcpy) { // NOLINT x[1] = *GetPoisoned<char>(1, 1); x[2] = 0; strcpy(y, x); // NOLINT - v_s4 = y[0]; - EXPECT_POISONED(v_s4 = y[1]); - v_s4 = y[2]; + EXPECT_NOT_POISONED(y[0]); + EXPECT_POISONED(y[1]); + EXPECT_NOT_POISONED(y[2]); } TEST(MemorySanitizer, strncpy) { // NOLINT @@ -655,57 +891,57 @@ TEST(MemorySanitizer, strncpy) { // NOLINT x[1] = *GetPoisoned<char>(1, 1); x[2] = 0; strncpy(y, x, 2); // NOLINT - v_s4 = y[0]; - EXPECT_POISONED(v_s4 = y[1]); - EXPECT_POISONED(v_s4 = y[2]); + EXPECT_NOT_POISONED(y[0]); + EXPECT_POISONED(y[1]); + EXPECT_POISONED(y[2]); } TEST(MemorySanitizer, strtol) { char *e; assert(1 == strtol("1", &e, 10)); - v_s8 = (S8) e; + EXPECT_NOT_POISONED((S8) e); } TEST(MemorySanitizer, strtoll) { char *e; assert(1 == strtoll("1", &e, 10)); - v_s8 = (S8) e; + EXPECT_NOT_POISONED((S8) e); } TEST(MemorySanitizer, strtoul) { char *e; assert(1 == strtoul("1", &e, 10)); - v_s8 = (S8) e; + EXPECT_NOT_POISONED((S8) e); } TEST(MemorySanitizer, strtoull) { char *e; assert(1 == strtoull("1", &e, 10)); - v_s8 = (S8) e; + EXPECT_NOT_POISONED((S8) e); } TEST(MemorySanitizer, strtod) { char *e; assert(0 != strtod("1.5", &e)); - v_s8 = (S8) e; + EXPECT_NOT_POISONED((S8) e); } TEST(MemorySanitizer, strtof) { char *e; assert(0 != strtof("1.5", &e)); - v_s8 = (S8) e; + EXPECT_NOT_POISONED((S8) e); } TEST(MemorySanitizer, strtold) { char *e; assert(0 != strtold("1.5", &e)); - v_s8 = (S8) e; + EXPECT_NOT_POISONED((S8) e); } TEST(MemorySanitizer, sprintf) { // NOLINT char buff[10]; - __msan_break_optimization(buff); - EXPECT_POISONED(v_s1 = buff[0]); + break_optimization(buff); + EXPECT_POISONED(buff[0]); int res = sprintf(buff, "%d", 1234567); // NOLINT assert(res == 7); assert(buff[0] == '1'); @@ -713,13 +949,13 @@ TEST(MemorySanitizer, sprintf) { // NOLINT assert(buff[2] == '3'); assert(buff[6] == '7'); assert(buff[7] == 0); - EXPECT_POISONED(v_s1 = buff[8]); + EXPECT_POISONED(buff[8]); } TEST(MemorySanitizer, snprintf) { char buff[10]; - __msan_break_optimization(buff); - EXPECT_POISONED(v_s1 = buff[0]); + break_optimization(buff); + EXPECT_POISONED(buff[0]); int res = snprintf(buff, sizeof(buff), "%d", 1234567); assert(res == 7); assert(buff[0] == '1'); @@ -727,14 +963,14 @@ TEST(MemorySanitizer, snprintf) { assert(buff[2] == '3'); assert(buff[6] == '7'); assert(buff[7] == 0); - EXPECT_POISONED(v_s1 = buff[8]); + EXPECT_POISONED(buff[8]); } TEST(MemorySanitizer, swprintf) { wchar_t buff[10]; assert(sizeof(wchar_t) == 4); - __msan_break_optimization(buff); - EXPECT_POISONED(v_s1 = buff[0]); + break_optimization(buff); + EXPECT_POISONED(buff[0]); int res = swprintf(buff, 9, L"%d", 1234567); assert(res == 7); assert(buff[0] == '1'); @@ -742,7 +978,7 @@ TEST(MemorySanitizer, swprintf) { assert(buff[2] == '3'); assert(buff[6] == '7'); assert(buff[7] == 0); - EXPECT_POISONED(v_s4 = buff[8]); + EXPECT_POISONED(buff[8]); } TEST(MemorySanitizer, wcstombs) { @@ -758,19 +994,99 @@ TEST(MemorySanitizer, wcstombs) { TEST(MemorySanitizer, gettimeofday) { struct timeval tv; struct timezone tz; - __msan_break_optimization(&tv); - __msan_break_optimization(&tz); + break_optimization(&tv); + break_optimization(&tz); assert(sizeof(tv) == 16); assert(sizeof(tz) == 8); - EXPECT_POISONED(v_s8 = tv.tv_sec); - EXPECT_POISONED(v_s8 = tv.tv_usec); - EXPECT_POISONED(v_s4 = tz.tz_minuteswest); - EXPECT_POISONED(v_s4 = tz.tz_dsttime); + EXPECT_POISONED(tv.tv_sec); + EXPECT_POISONED(tv.tv_usec); + EXPECT_POISONED(tz.tz_minuteswest); + EXPECT_POISONED(tz.tz_dsttime); assert(0 == gettimeofday(&tv, &tz)); - v_s8 = tv.tv_sec; - v_s8 = tv.tv_usec; - v_s4 = tz.tz_minuteswest; - v_s4 = tz.tz_dsttime; + EXPECT_NOT_POISONED(tv.tv_sec); + EXPECT_NOT_POISONED(tv.tv_usec); + EXPECT_NOT_POISONED(tz.tz_minuteswest); + EXPECT_NOT_POISONED(tz.tz_dsttime); +} + +TEST(MemorySanitizer, clock_gettime) { + struct timespec tp; + EXPECT_POISONED(tp.tv_sec); + EXPECT_POISONED(tp.tv_nsec); + assert(0 == clock_gettime(CLOCK_REALTIME, &tp)); + EXPECT_NOT_POISONED(tp.tv_sec); + EXPECT_NOT_POISONED(tp.tv_nsec); +} + +TEST(MemorySanitizer, clock_getres) { + struct timespec tp; + EXPECT_POISONED(tp.tv_sec); + EXPECT_POISONED(tp.tv_nsec); + assert(0 == clock_getres(CLOCK_REALTIME, 0)); + EXPECT_POISONED(tp.tv_sec); + EXPECT_POISONED(tp.tv_nsec); + assert(0 == clock_getres(CLOCK_REALTIME, &tp)); + EXPECT_NOT_POISONED(tp.tv_sec); + EXPECT_NOT_POISONED(tp.tv_nsec); +} + +TEST(MemorySanitizer, getitimer) { + struct itimerval it1, it2; + int res; + EXPECT_POISONED(it1.it_interval.tv_sec); + EXPECT_POISONED(it1.it_interval.tv_usec); + EXPECT_POISONED(it1.it_value.tv_sec); + EXPECT_POISONED(it1.it_value.tv_usec); + res = getitimer(ITIMER_VIRTUAL, &it1); + assert(!res); + EXPECT_NOT_POISONED(it1.it_interval.tv_sec); + EXPECT_NOT_POISONED(it1.it_interval.tv_usec); + EXPECT_NOT_POISONED(it1.it_value.tv_sec); + EXPECT_NOT_POISONED(it1.it_value.tv_usec); + + it1.it_interval.tv_sec = it1.it_value.tv_sec = 10000; + it1.it_interval.tv_usec = it1.it_value.tv_usec = 0; + + res = setitimer(ITIMER_VIRTUAL, &it1, &it2); + assert(!res); + EXPECT_NOT_POISONED(it2.it_interval.tv_sec); + EXPECT_NOT_POISONED(it2.it_interval.tv_usec); + EXPECT_NOT_POISONED(it2.it_value.tv_sec); + EXPECT_NOT_POISONED(it2.it_value.tv_usec); + + // Check that old_value can be 0, and disable the timer. + memset(&it1, 0, sizeof(it1)); + res = setitimer(ITIMER_VIRTUAL, &it1, 0); + assert(!res); +} + +TEST(MemorySanitizer, time) { + time_t t; + EXPECT_POISONED(t); + time_t t2 = time(&t); + assert(t2 != (time_t)-1); + EXPECT_NOT_POISONED(t); +} + +TEST(MemorySanitizer, localtime) { + time_t t = 123; + struct tm *time = localtime(&t); + assert(time != 0); + EXPECT_NOT_POISONED(time->tm_sec); + EXPECT_NOT_POISONED(time->tm_hour); + EXPECT_NOT_POISONED(time->tm_year); + EXPECT_NOT_POISONED(time->tm_isdst); +} + +TEST(MemorySanitizer, localtime_r) { + time_t t = 123; + struct tm time; + struct tm *res = localtime_r(&t, &time); + assert(res != 0); + EXPECT_NOT_POISONED(time.tm_sec); + EXPECT_NOT_POISONED(time.tm_hour); + EXPECT_NOT_POISONED(time.tm_year); + EXPECT_NOT_POISONED(time.tm_isdst); } TEST(MemorySanitizer, mmap) { @@ -787,7 +1103,7 @@ TEST(MemorySanitizer, mmap) { munmap(p2, size); } if (p1 == p2) { - v_s1 = *(char*)p2; + EXPECT_NOT_POISONED(*(char*)p2); munmap(p2, size); } } @@ -796,76 +1112,143 @@ TEST(MemorySanitizer, mmap) { // FIXME: check why msandr does nt handle fcvt. TEST(MemorySanitizer, fcvt) { int a, b; - __msan_break_optimization(&a); - __msan_break_optimization(&b); - EXPECT_POISONED(v_s4 = a); - EXPECT_POISONED(v_s4 = b); + break_optimization(&a); + break_optimization(&b); + EXPECT_POISONED(a); + EXPECT_POISONED(b); char *str = fcvt(12345.6789, 10, &a, &b); - v_s4 = a; - v_s4 = b; + EXPECT_NOT_POISONED(a); + EXPECT_NOT_POISONED(b); } -TEST(MemorySanitizer, LoadUnpoisoned) { - S8 s = *GetPoisoned<S8>(); - EXPECT_POISONED(v_s8 = s); - S8 safe = *GetPoisoned<S8>(); - __msan_load_unpoisoned(&s, sizeof(s), &safe); - v_s8 = safe; +TEST(MemorySanitizer, frexp) { + int x; + x = *GetPoisoned<int>(); + double r = frexp(1.1, &x); + EXPECT_NOT_POISONED(r); + EXPECT_NOT_POISONED(x); + + x = *GetPoisoned<int>(); + float rf = frexpf(1.1, &x); + EXPECT_NOT_POISONED(rf); + EXPECT_NOT_POISONED(x); + + x = *GetPoisoned<int>(); + double rl = frexpl(1.1, &x); + EXPECT_NOT_POISONED(rl); + EXPECT_NOT_POISONED(x); +} + +namespace { + +static int cnt; + +void SigactionHandler(int signo, siginfo_t* si, void* uc) { + assert(signo == SIGPROF); + assert(si); + EXPECT_NOT_POISONED(si->si_errno); + EXPECT_NOT_POISONED(si->si_pid); +#if __linux__ +# if defined(__x86_64__) + EXPECT_NOT_POISONED(((ucontext_t*)uc)->uc_mcontext.gregs[REG_RIP]); +# elif defined(__i386__) + EXPECT_NOT_POISONED(((ucontext_t*)uc)->uc_mcontext.gregs[REG_EIP]); +# endif +#endif + ++cnt; +} + +TEST(MemorySanitizer, sigaction) { + struct sigaction act = {}; + struct sigaction oldact = {}; + struct sigaction origact = {}; + + sigaction(SIGPROF, 0, &origact); + + act.sa_flags |= SA_SIGINFO; + act.sa_sigaction = &SigactionHandler; + sigaction(SIGPROF, &act, 0); + + kill(getpid(), SIGPROF); + + act.sa_flags &= ~SA_SIGINFO; + act.sa_handler = SIG_DFL; + sigaction(SIGPROF, &act, 0); + + act.sa_flags &= ~SA_SIGINFO; + act.sa_handler = SIG_IGN; + sigaction(SIGPROF, &act, &oldact); + EXPECT_FALSE(oldact.sa_flags & SA_SIGINFO); + EXPECT_EQ(SIG_DFL, oldact.sa_handler); + kill(getpid(), SIGPROF); + + act.sa_flags |= SA_SIGINFO; + act.sa_sigaction = &SigactionHandler; + sigaction(SIGPROF, &act, &oldact); + EXPECT_FALSE(oldact.sa_flags & SA_SIGINFO); + EXPECT_EQ(SIG_IGN, oldact.sa_handler); + kill(getpid(), SIGPROF); + + act.sa_flags &= ~SA_SIGINFO; + act.sa_handler = SIG_DFL; + sigaction(SIGPROF, &act, &oldact); + EXPECT_TRUE(oldact.sa_flags & SA_SIGINFO); + EXPECT_EQ(&SigactionHandler, oldact.sa_sigaction); + EXPECT_EQ(2, cnt); + + sigaction(SIGPROF, &origact, 0); } +} // namespace + struct StructWithDtor { ~StructWithDtor(); }; NOINLINE StructWithDtor::~StructWithDtor() { - __msan_break_optimization(0); -} - -NOINLINE void ExpectGood(int a) { v_s4 = a; } -NOINLINE void ExpectPoisoned(int a) { - EXPECT_POISONED(v_s4 = a); + break_optimization(0); } TEST(MemorySanitizer, Invoke) { StructWithDtor s; // Will cause the calls to become invokes. - ExpectGood(0); - ExpectPoisoned(*GetPoisoned<int>()); - ExpectGood(0); - ExpectPoisoned(*GetPoisoned<int>()); - EXPECT_POISONED(v_s4 = ReturnPoisoned<S4>()); + EXPECT_NOT_POISONED(0); + EXPECT_POISONED(*GetPoisoned<int>()); + EXPECT_NOT_POISONED(0); + EXPECT_POISONED(*GetPoisoned<int>()); + EXPECT_POISONED(ReturnPoisoned<S4>()); } TEST(MemorySanitizer, ptrtoint) { // Test that shadow is propagated through pointer-to-integer conversion. void* p = (void*)0xABCD; __msan_poison(((char*)&p) + 1, sizeof(p)); - v_u1 = (((uptr)p) & 0xFF) == 0; + EXPECT_NOT_POISONED((((uintptr_t)p) & 0xFF) == 0); void* q = (void*)0xABCD; __msan_poison(&q, sizeof(q) - 1); - EXPECT_POISONED(v_u1 = (((uptr)q) & 0xFF) == 0); + EXPECT_POISONED((((uintptr_t)q) & 0xFF) == 0); } static void vaargsfn2(int guard, ...) { va_list vl; va_start(vl, guard); - v_s4 = va_arg(vl, int); - v_s4 = va_arg(vl, int); - v_s4 = va_arg(vl, int); - EXPECT_POISONED(v_d = va_arg(vl, double)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_POISONED(va_arg(vl, double)); va_end(vl); } static void vaargsfn(int guard, ...) { va_list vl; va_start(vl, guard); - v_s4 = va_arg(vl, int); - EXPECT_POISONED(v_s4 = va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_POISONED(va_arg(vl, int)); // The following call will overwrite __msan_param_tls. // Checks after it test that arg shadow was somehow saved across the call. vaargsfn2(1, 2, 3, 4, *GetPoisoned<double>()); - v_s4 = va_arg(vl, int); - EXPECT_POISONED(v_s4 = va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_POISONED(va_arg(vl, int)); va_end(vl); } @@ -878,16 +1261,16 @@ TEST(MemorySanitizer, VAArgTest) { static void vaargsfn_many(int guard, ...) { va_list vl; va_start(vl, guard); - v_s4 = va_arg(vl, int); - EXPECT_POISONED(v_s4 = va_arg(vl, int)); - v_s4 = va_arg(vl, int); - v_s4 = va_arg(vl, int); - v_s4 = va_arg(vl, int); - v_s4 = va_arg(vl, int); - v_s4 = va_arg(vl, int); - v_s4 = va_arg(vl, int); - v_s4 = va_arg(vl, int); - EXPECT_POISONED(v_s4 = va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_POISONED(va_arg(vl, int)); va_end(vl); } @@ -898,15 +1281,15 @@ TEST(MemorySanitizer, VAArgManyTest) { } static void vaargsfn_pass2(va_list vl) { - v_s4 = va_arg(vl, int); - v_s4 = va_arg(vl, int); - EXPECT_POISONED(v_s4 = va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_POISONED(va_arg(vl, int)); } static void vaargsfn_pass(int guard, ...) { va_list vl; va_start(vl, guard); - EXPECT_POISONED(v_s4 = va_arg(vl, int)); + EXPECT_POISONED(va_arg(vl, int)); vaargsfn_pass2(vl); va_end(vl); } @@ -918,20 +1301,20 @@ TEST(MemorySanitizer, VAArgPass) { } static void vaargsfn_copy2(va_list vl) { - v_s4 = va_arg(vl, int); - EXPECT_POISONED(v_s4 = va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_POISONED(va_arg(vl, int)); } static void vaargsfn_copy(int guard, ...) { va_list vl; va_start(vl, guard); - v_s4 = va_arg(vl, int); - EXPECT_POISONED(v_s4 = va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_POISONED(va_arg(vl, int)); va_list vl2; va_copy(vl2, vl); vaargsfn_copy2(vl2); - v_s4 = va_arg(vl, int); - EXPECT_POISONED(v_s4 = va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_POISONED(va_arg(vl, int)); va_end(vl); } @@ -944,10 +1327,10 @@ TEST(MemorySanitizer, VAArgCopy) { static void vaargsfn_ptr(int guard, ...) { va_list vl; va_start(vl, guard); - v_p = va_arg(vl, int*); - EXPECT_POISONED(v_p = va_arg(vl, int*)); - v_p = va_arg(vl, int*); - EXPECT_POISONED(v_p = va_arg(vl, double*)); + EXPECT_NOT_POISONED(va_arg(vl, int*)); + EXPECT_POISONED(va_arg(vl, int*)); + EXPECT_NOT_POISONED(va_arg(vl, int*)); + EXPECT_POISONED(va_arg(vl, double*)); va_end(vl); } @@ -961,33 +1344,33 @@ TEST(MemorySanitizer, VAArgPtr) { static void vaargsfn_overflow(int guard, ...) { va_list vl; va_start(vl, guard); - v_s4 = va_arg(vl, int); - v_s4 = va_arg(vl, int); - EXPECT_POISONED(v_s4 = va_arg(vl, int)); - v_s4 = va_arg(vl, int); - v_s4 = va_arg(vl, int); - v_s4 = va_arg(vl, int); - - v_d = va_arg(vl, double); - v_d = va_arg(vl, double); - v_d = va_arg(vl, double); - EXPECT_POISONED(v_d = va_arg(vl, double)); - v_d = va_arg(vl, double); - EXPECT_POISONED(v_p = va_arg(vl, int*)); - v_d = va_arg(vl, double); - v_d = va_arg(vl, double); - - EXPECT_POISONED(v_s4 = va_arg(vl, int)); - EXPECT_POISONED(v_d = va_arg(vl, double)); - EXPECT_POISONED(v_p = va_arg(vl, int*)); - - v_s4 = va_arg(vl, int); - v_d = va_arg(vl, double); - v_p = va_arg(vl, int*); - - EXPECT_POISONED(v_s4 = va_arg(vl, int)); - EXPECT_POISONED(v_d = va_arg(vl, double)); - EXPECT_POISONED(v_p = va_arg(vl, int*)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, int)); + + EXPECT_NOT_POISONED(va_arg(vl, double)); + EXPECT_NOT_POISONED(va_arg(vl, double)); + EXPECT_NOT_POISONED(va_arg(vl, double)); + EXPECT_POISONED(va_arg(vl, double)); + EXPECT_NOT_POISONED(va_arg(vl, double)); + EXPECT_POISONED(va_arg(vl, int*)); + EXPECT_NOT_POISONED(va_arg(vl, double)); + EXPECT_NOT_POISONED(va_arg(vl, double)); + + EXPECT_POISONED(va_arg(vl, int)); + EXPECT_POISONED(va_arg(vl, double)); + EXPECT_POISONED(va_arg(vl, int*)); + + EXPECT_NOT_POISONED(va_arg(vl, int)); + EXPECT_NOT_POISONED(va_arg(vl, double)); + EXPECT_NOT_POISONED(va_arg(vl, int*)); + + EXPECT_POISONED(va_arg(vl, int)); + EXPECT_POISONED(va_arg(vl, double)); + EXPECT_POISONED(va_arg(vl, int*)); va_end(vl); } @@ -1009,7 +1392,7 @@ TEST(MemorySanitizer, VAArgOverflow) { static void vaargsfn_tlsoverwrite2(int guard, ...) { va_list vl; va_start(vl, guard); - v_s4 = va_arg(vl, int); + EXPECT_NOT_POISONED(va_arg(vl, int)); va_end(vl); } @@ -1018,7 +1401,7 @@ static void vaargsfn_tlsoverwrite(int guard, ...) { vaargsfn_tlsoverwrite2(2, 42); va_list vl; va_start(vl, guard); - EXPECT_POISONED(v_s4 = va_arg(vl, int)); + EXPECT_POISONED(va_arg(vl, int)); va_end(vl); } @@ -1032,12 +1415,12 @@ struct StructByVal { }; NOINLINE void StructByValTestFunc(struct StructByVal s) { - v_s4 = s.a; - EXPECT_POISONED(v_s4 = s.b); - v_s4 = s.c; - EXPECT_POISONED(v_s4 = s.d); - v_s4 = s.e; - EXPECT_POISONED(v_s4 = s.f); + EXPECT_NOT_POISONED(s.a); + EXPECT_POISONED(s.b); + EXPECT_NOT_POISONED(s.c); + EXPECT_POISONED(s.d); + EXPECT_NOT_POISONED(s.e); + EXPECT_POISONED(s.f); } NOINLINE void StructByValTestFunc1(struct StructByVal s) { @@ -1064,13 +1447,13 @@ TEST(MemorySanitizer, StructByVal) { #if MSAN_HAS_M128 -NOINLINE __m128i m128Eq(__m128i *a, __m128i *b) { return *a == *b; } -NOINLINE __m128i m128Lt(__m128i *a, __m128i *b) { return *a < *b; } +NOINLINE __m128i m128Eq(__m128i *a, __m128i *b) { return _mm_cmpeq_epi16(*a, *b); } +NOINLINE __m128i m128Lt(__m128i *a, __m128i *b) { return _mm_cmplt_epi16(*a, *b); } TEST(MemorySanitizer, m128) { __m128i a = _mm_set1_epi16(0x1234); __m128i b = _mm_set1_epi16(0x7890); - v_m128 = m128Eq(&a, &b); - v_m128 = m128Lt(&a, &b); + EXPECT_NOT_POISONED(m128Eq(&a, &b)); + EXPECT_NOT_POISONED(m128Lt(&a, &b)); } // FIXME: add more tests for __m128i. #endif // MSAN_HAS_M128 @@ -1092,7 +1475,7 @@ NOINLINE StructWithHole ReturnStructWithHole() { TEST(MemorySanitizer, StructWithHole) { StructWithHole a = ReturnStructWithHole(); - __msan_break_optimization(&a); + break_optimization(&a); } template <class T> @@ -1106,8 +1489,8 @@ NOINLINE T ReturnStruct() { template <class T> NOINLINE void TestReturnStruct() { T s1 = ReturnStruct<T>(); - v_s4 = s1.a; - EXPECT_POISONED(v_s4 = s1.b); + EXPECT_NOT_POISONED(s1.a); + EXPECT_POISONED(s1.b); } struct SSS1 { @@ -1177,27 +1560,27 @@ NOINLINE LongStruct ReturnLongStruct2() { TEST(MemorySanitizer, LongStruct) { LongStruct s1 = ReturnLongStruct1(); __msan_print_shadow(&s1, sizeof(s1)); - v_u1 = s1.a1; - v_u2 = s1.a2; - v_u4 = s1.a4; - v_u8 = s1.a8; + EXPECT_NOT_POISONED(s1.a1); + EXPECT_NOT_POISONED(s1.a2); + EXPECT_NOT_POISONED(s1.a4); + EXPECT_NOT_POISONED(s1.a8); - EXPECT_POISONED(v_u1 = s1.b1); - EXPECT_POISONED(v_u2 = s1.b2); - EXPECT_POISONED(v_u4 = s1.b4); - EXPECT_POISONED(v_u8 = s1.b8); + EXPECT_POISONED(s1.b1); + EXPECT_POISONED(s1.b2); + EXPECT_POISONED(s1.b4); + EXPECT_POISONED(s1.b8); LongStruct s2 = ReturnLongStruct2(); __msan_print_shadow(&s2, sizeof(s2)); - v_u1 = s2.b1; - v_u2 = s2.b2; - v_u4 = s2.b4; - v_u8 = s2.b8; + EXPECT_NOT_POISONED(s2.b1); + EXPECT_NOT_POISONED(s2.b2); + EXPECT_NOT_POISONED(s2.b4); + EXPECT_NOT_POISONED(s2.b8); - EXPECT_POISONED(v_u1 = s2.a1); - EXPECT_POISONED(v_u2 = s2.a2); - EXPECT_POISONED(v_u4 = s2.a4); - EXPECT_POISONED(v_u8 = s2.a8); + EXPECT_POISONED(s2.a1); + EXPECT_POISONED(s2.a2); + EXPECT_POISONED(s2.a4); + EXPECT_POISONED(s2.a8); } TEST(MemorySanitizer, getrlimit) { @@ -1205,9 +1588,8 @@ TEST(MemorySanitizer, getrlimit) { __msan_poison(&limit, sizeof(limit)); int result = getrlimit(RLIMIT_DATA, &limit); assert(result == 0); - volatile rlim_t t; - t = limit.rlim_cur; - t = limit.rlim_max; + EXPECT_NOT_POISONED(limit.rlim_cur); + EXPECT_NOT_POISONED(limit.rlim_max); } TEST(MemorySanitizer, getrusage) { @@ -1215,18 +1597,35 @@ TEST(MemorySanitizer, getrusage) { __msan_poison(&usage, sizeof(usage)); int result = getrusage(RUSAGE_SELF, &usage); assert(result == 0); - volatile struct timeval t; - v_u8 = usage.ru_utime.tv_sec; - v_u8 = usage.ru_utime.tv_usec; - v_u8 = usage.ru_stime.tv_sec; - v_u8 = usage.ru_stime.tv_usec; - v_s8 = usage.ru_maxrss; - v_s8 = usage.ru_minflt; - v_s8 = usage.ru_majflt; - v_s8 = usage.ru_inblock; - v_s8 = usage.ru_oublock; - v_s8 = usage.ru_nvcsw; - v_s8 = usage.ru_nivcsw; + EXPECT_NOT_POISONED(usage.ru_utime.tv_sec); + EXPECT_NOT_POISONED(usage.ru_utime.tv_usec); + EXPECT_NOT_POISONED(usage.ru_stime.tv_sec); + EXPECT_NOT_POISONED(usage.ru_stime.tv_usec); + EXPECT_NOT_POISONED(usage.ru_maxrss); + EXPECT_NOT_POISONED(usage.ru_minflt); + EXPECT_NOT_POISONED(usage.ru_majflt); + EXPECT_NOT_POISONED(usage.ru_inblock); + EXPECT_NOT_POISONED(usage.ru_oublock); + EXPECT_NOT_POISONED(usage.ru_nvcsw); + EXPECT_NOT_POISONED(usage.ru_nivcsw); +} + +#ifdef __GLIBC__ +extern char *program_invocation_name; +#else // __GLIBC__ +# error "TODO: port this" +#endif + +// Compute the path to our loadable DSO. We assume it's in the same +// directory. Only use string routines that we intercept so far to do this. +static int PathToLoadable(char *buf, size_t sz) { + const char *basename = "libmsan_loadable.x86_64.so"; + char *argv0 = program_invocation_name; + char *last_slash = strrchr(argv0, '/'); + assert(last_slash); + int res = + snprintf(buf, sz, "%.*s/%s", int(last_slash - argv0), argv0, basename); + return res < sz ? 0 : res; } static void dladdr_testfn() {} @@ -1236,14 +1635,75 @@ TEST(MemorySanitizer, dladdr) { __msan_poison(&info, sizeof(info)); int result = dladdr((const void*)dladdr_testfn, &info); assert(result != 0); - v_u8 = (unsigned long)info.dli_fname; + EXPECT_NOT_POISONED((unsigned long)info.dli_fname); if (info.dli_fname) - v_u8 = strlen(info.dli_fname); - v_u8 = (unsigned long)info.dli_fbase; - v_u8 = (unsigned long)info.dli_sname; + EXPECT_NOT_POISONED(strlen(info.dli_fname)); + EXPECT_NOT_POISONED((unsigned long)info.dli_fbase); + EXPECT_NOT_POISONED((unsigned long)info.dli_sname); if (info.dli_sname) - v_u8 = strlen(info.dli_sname); - v_u8 = (unsigned long)info.dli_saddr; + EXPECT_NOT_POISONED(strlen(info.dli_sname)); + EXPECT_NOT_POISONED((unsigned long)info.dli_saddr); +} + +static int dl_phdr_callback(struct dl_phdr_info *info, size_t size, void *data) { + (*(int *)data)++; + EXPECT_NOT_POISONED(info->dlpi_addr); + EXPECT_NOT_POISONED(strlen(info->dlpi_name)); + EXPECT_NOT_POISONED(info->dlpi_phnum); + for (int i = 0; i < info->dlpi_phnum; ++i) + EXPECT_NOT_POISONED(info->dlpi_phdr[i]); + return 0; +} + +TEST(MemorySanitizer, dl_iterate_phdr) { + char path[4096]; + int res = PathToLoadable(path, sizeof(path)); + assert(!res); + + // Having at least one dlopen'ed library in the process makes this more + // entertaining. + void *lib = dlopen(path, RTLD_LAZY); + ASSERT_NE((void*)0, lib); + + int count = 0; + int result = dl_iterate_phdr(dl_phdr_callback, &count); + assert(count > 0); + + dlclose(lib); +} + + +TEST(MemorySanitizer, dlopen) { + char path[4096]; + int res = PathToLoadable(path, sizeof(path)); + assert(!res); + + // We need to clear shadow for globals when doing dlopen. In order to test + // this, we have to poison the shadow for the DSO before we load it. In + // general this is difficult, but the loader tends to reload things in the + // same place, so we open, close, and then reopen. The global should always + // start out clean after dlopen. + for (int i = 0; i < 2; i++) { + void *lib = dlopen(path, RTLD_LAZY); + if (lib == NULL) { + printf("dlerror: %s\n", dlerror()); + assert(lib != NULL); + } + void **(*get_dso_global)() = (void **(*)())dlsym(lib, "get_dso_global"); + assert(get_dso_global); + void **dso_global = get_dso_global(); + EXPECT_NOT_POISONED(*dso_global); + __msan_poison(dso_global, sizeof(*dso_global)); + EXPECT_POISONED(*dso_global); + dlclose(lib); + } +} + +// Regression test for a crash in dlopen() interceptor. +TEST(MemorySanitizer, dlopenFailed) { + const char *path = "/libmsan_loadable_does_not_exist.x86_64.so"; + void *lib = dlopen(path, RTLD_LAZY); + ASSERT_EQ(0, lib); } TEST(MemorySanitizer, scanf) { @@ -1253,27 +1713,28 @@ TEST(MemorySanitizer, scanf) { int res = sscanf(input, "%d %5s", d, s); printf("res %d\n", res); assert(res == 2); - v_s4 = *d; - v_u1 = s[0]; - v_u1 = s[1]; - v_u1 = s[2]; - v_u1 = s[3]; - v_u1 = s[4]; - v_u1 = s[5]; - EXPECT_POISONED(v_u1 = s[6]); + EXPECT_NOT_POISONED(*d); + EXPECT_NOT_POISONED(s[0]); + EXPECT_NOT_POISONED(s[1]); + EXPECT_NOT_POISONED(s[2]); + EXPECT_NOT_POISONED(s[3]); + EXPECT_NOT_POISONED(s[4]); + EXPECT_NOT_POISONED(s[5]); + EXPECT_POISONED(s[6]); delete s; delete d; } -static void* SimpleThread_threadfn(void* data) { +static void *SimpleThread_threadfn(void* data) { return new int; } TEST(MemorySanitizer, SimpleThread) { pthread_t t; - void* p; + void *p; int res = pthread_create(&t, NULL, SimpleThread_threadfn, NULL); assert(!res); + EXPECT_NOT_POISONED(t); res = pthread_join(t, &p); assert(!res); if (!__msan_has_dynamic_component()) // FIXME: intercept pthread_join (?). @@ -1281,56 +1742,256 @@ TEST(MemorySanitizer, SimpleThread) { delete (int*)p; } +static void *SmallStackThread_threadfn(void* data) { + return 0; +} + +TEST(MemorySanitizer, SmallStackThread) { + pthread_attr_t attr; + pthread_t t; + void *p; + int res; + res = pthread_attr_init(&attr); + ASSERT_EQ(0, res); + res = pthread_attr_setstacksize(&attr, 64 * 1024); + ASSERT_EQ(0, res); + res = pthread_create(&t, &attr, SmallStackThread_threadfn, NULL); + ASSERT_EQ(0, res); + res = pthread_join(t, &p); + ASSERT_EQ(0, res); + res = pthread_attr_destroy(&attr); + ASSERT_EQ(0, res); +} + +TEST(MemorySanitizer, PreAllocatedStackThread) { + pthread_attr_t attr; + pthread_t t; + int res; + res = pthread_attr_init(&attr); + ASSERT_EQ(0, res); + void *stack; + const size_t kStackSize = 64 * 1024; + res = posix_memalign(&stack, 4096, kStackSize); + ASSERT_EQ(0, res); + res = pthread_attr_setstack(&attr, stack, kStackSize); + ASSERT_EQ(0, res); + // A small self-allocated stack can not be extended by the tool. + // In this case pthread_create is expected to fail. + res = pthread_create(&t, &attr, SmallStackThread_threadfn, NULL); + EXPECT_NE(0, res); + res = pthread_attr_destroy(&attr); + ASSERT_EQ(0, res); +} + +TEST(MemorySanitizer, pthread_getschedparam) { + int policy; + struct sched_param param; + int res = pthread_getschedparam(pthread_self(), &policy, ¶m); + ASSERT_EQ(0, res); + EXPECT_NOT_POISONED(policy); + EXPECT_NOT_POISONED(param.sched_priority); +} + +TEST(MemorySanitizer, posix_memalign) { + void *p; + EXPECT_POISONED(p); + int res = posix_memalign(&p, 4096, 13); + ASSERT_EQ(0, res); + EXPECT_NOT_POISONED(p); + free(p); +} + +TEST(MemorySanitizer, inet_pton) { + const char *s = "1:0:0:0:0:0:0:8"; + unsigned char buf[sizeof(struct in6_addr)]; + int res = inet_pton(AF_INET6, s, buf); + ASSERT_EQ(1, res); + EXPECT_NOT_POISONED(buf[0]); + EXPECT_NOT_POISONED(buf[sizeof(struct in6_addr) - 1]); + + char s_out[INET6_ADDRSTRLEN]; + EXPECT_POISONED(s_out[3]); + const char *q = inet_ntop(AF_INET6, buf, s_out, INET6_ADDRSTRLEN); + ASSERT_NE((void*)0, q); + EXPECT_NOT_POISONED(s_out[3]); +} + TEST(MemorySanitizer, uname) { struct utsname u; int res = uname(&u); assert(!res); - v_u8 = strlen(u.sysname); - v_u8 = strlen(u.nodename); - v_u8 = strlen(u.release); - v_u8 = strlen(u.version); - v_u8 = strlen(u.machine); + EXPECT_NOT_POISONED(strlen(u.sysname)); + EXPECT_NOT_POISONED(strlen(u.nodename)); + EXPECT_NOT_POISONED(strlen(u.release)); + EXPECT_NOT_POISONED(strlen(u.version)); + EXPECT_NOT_POISONED(strlen(u.machine)); +} + +TEST(MemorySanitizer, gethostname) { + char buf[100]; + int res = gethostname(buf, 100); + assert(!res); + EXPECT_NOT_POISONED(strlen(buf)); +} + +TEST(MemorySanitizer, getpwuid) { + struct passwd *p = getpwuid(0); // root + assert(p); + EXPECT_NOT_POISONED(p->pw_name); + assert(p->pw_name); + EXPECT_NOT_POISONED(p->pw_name[0]); + EXPECT_NOT_POISONED(p->pw_uid); + assert(p->pw_uid == 0); +} + +TEST(MemorySanitizer, getpwnam_r) { + struct passwd pwd; + struct passwd *pwdres; + char buf[10000]; + int res = getpwnam_r("root", &pwd, buf, sizeof(buf), &pwdres); + assert(!res); + EXPECT_NOT_POISONED(pwd.pw_name); + assert(pwd.pw_name); + EXPECT_NOT_POISONED(pwd.pw_name[0]); + EXPECT_NOT_POISONED(pwd.pw_uid); + assert(pwd.pw_uid == 0); +} + +TEST(MemorySanitizer, getpwnam_r_positive) { + struct passwd pwd; + struct passwd *pwdres; + char s[5]; + strncpy(s, "abcd", 5); + __msan_poison(s, 5); + char buf[10000]; + int res; + EXPECT_UMR(res = getpwnam_r(s, &pwd, buf, sizeof(buf), &pwdres)); +} + +TEST(MemorySanitizer, getgrnam_r) { + struct group grp; + struct group *grpres; + char buf[10000]; + int res = getgrnam_r("root", &grp, buf, sizeof(buf), &grpres); + assert(!res); + EXPECT_NOT_POISONED(grp.gr_name); + assert(grp.gr_name); + EXPECT_NOT_POISONED(grp.gr_name[0]); + EXPECT_NOT_POISONED(grp.gr_gid); } template<class T> -static void testSlt(T value, T shadow) { +static bool applySlt(T value, T shadow) { __msan_partial_poison(&value, &shadow, sizeof(T)); volatile bool zzz = true; // This "|| zzz" trick somehow makes LLVM emit "icmp slt" instead of // a shift-and-trunc to get at the highest bit. - volatile bool v_T = value < 0 || zzz; + volatile bool v = value < 0 || zzz; + return v; } TEST(MemorySanitizer, SignedCompareWithZero) { - testSlt<S4>(0xF, 0xF); - testSlt<S4>(0xF, 0xFF); - testSlt<S4>(0xF, 0xFFFFFF); - testSlt<S4>(0xF, 0x7FFFFFF); - EXPECT_POISONED(testSlt<S4>(0xF, 0x80FFFFFF)); - EXPECT_POISONED(testSlt<S4>(0xF, 0xFFFFFFFF)); + EXPECT_NOT_POISONED(applySlt<S4>(0xF, 0xF)); + EXPECT_NOT_POISONED(applySlt<S4>(0xF, 0xFF)); + EXPECT_NOT_POISONED(applySlt<S4>(0xF, 0xFFFFFF)); + EXPECT_NOT_POISONED(applySlt<S4>(0xF, 0x7FFFFFF)); + EXPECT_UMR(applySlt<S4>(0xF, 0x80FFFFFF)); + EXPECT_UMR(applySlt<S4>(0xF, 0xFFFFFFFF)); +} + +template <class T, class S> +static T poisoned(T Va, S Sa) { + char SIZE_CHECK1[(ssize_t)sizeof(T) - (ssize_t)sizeof(S)]; + char SIZE_CHECK2[(ssize_t)sizeof(S) - (ssize_t)sizeof(T)]; + T a; + a = Va; + __msan_partial_poison(&a, &Sa, sizeof(T)); + return a; +} + +TEST(MemorySanitizer, ICmpRelational) { + EXPECT_NOT_POISONED(poisoned(0, 0) < poisoned(0, 0)); + EXPECT_NOT_POISONED(poisoned(0U, 0) < poisoned(0U, 0)); + EXPECT_NOT_POISONED(poisoned(0LL, 0LLU) < poisoned(0LL, 0LLU)); + EXPECT_NOT_POISONED(poisoned(0LLU, 0LLU) < poisoned(0LLU, 0LLU)); + EXPECT_POISONED(poisoned(0xFF, 0xFF) < poisoned(0xFF, 0xFF)); + EXPECT_POISONED(poisoned(0xFFFFFFFFU, 0xFFFFFFFFU) < + poisoned(0xFFFFFFFFU, 0xFFFFFFFFU)); + EXPECT_POISONED(poisoned(-1, 0xFFFFFFFFU) < + poisoned(-1, 0xFFFFFFFFU)); + + EXPECT_NOT_POISONED(poisoned(0, 0) <= poisoned(0, 0)); + EXPECT_NOT_POISONED(poisoned(0U, 0) <= poisoned(0U, 0)); + EXPECT_NOT_POISONED(poisoned(0LL, 0LLU) <= poisoned(0LL, 0LLU)); + EXPECT_NOT_POISONED(poisoned(0LLU, 0LLU) <= poisoned(0LLU, 0LLU)); + EXPECT_POISONED(poisoned(0xFF, 0xFF) <= poisoned(0xFF, 0xFF)); + EXPECT_POISONED(poisoned(0xFFFFFFFFU, 0xFFFFFFFFU) <= + poisoned(0xFFFFFFFFU, 0xFFFFFFFFU)); + EXPECT_POISONED(poisoned(-1, 0xFFFFFFFFU) <= + poisoned(-1, 0xFFFFFFFFU)); + + EXPECT_NOT_POISONED(poisoned(0, 0) > poisoned(0, 0)); + EXPECT_NOT_POISONED(poisoned(0U, 0) > poisoned(0U, 0)); + EXPECT_NOT_POISONED(poisoned(0LL, 0LLU) > poisoned(0LL, 0LLU)); + EXPECT_NOT_POISONED(poisoned(0LLU, 0LLU) > poisoned(0LLU, 0LLU)); + EXPECT_POISONED(poisoned(0xFF, 0xFF) > poisoned(0xFF, 0xFF)); + EXPECT_POISONED(poisoned(0xFFFFFFFFU, 0xFFFFFFFFU) > + poisoned(0xFFFFFFFFU, 0xFFFFFFFFU)); + EXPECT_POISONED(poisoned(-1, 0xFFFFFFFFU) > + poisoned(-1, 0xFFFFFFFFU)); + + EXPECT_NOT_POISONED(poisoned(0, 0) >= poisoned(0, 0)); + EXPECT_NOT_POISONED(poisoned(0U, 0) >= poisoned(0U, 0)); + EXPECT_NOT_POISONED(poisoned(0LL, 0LLU) >= poisoned(0LL, 0LLU)); + EXPECT_NOT_POISONED(poisoned(0LLU, 0LLU) >= poisoned(0LLU, 0LLU)); + EXPECT_POISONED(poisoned(0xFF, 0xFF) >= poisoned(0xFF, 0xFF)); + EXPECT_POISONED(poisoned(0xFFFFFFFFU, 0xFFFFFFFFU) >= + poisoned(0xFFFFFFFFU, 0xFFFFFFFFU)); + EXPECT_POISONED(poisoned(-1, 0xFFFFFFFFU) >= + poisoned(-1, 0xFFFFFFFFU)); + + EXPECT_POISONED(poisoned(6, 0xF) > poisoned(7, 0)); + EXPECT_POISONED(poisoned(0xF, 0xF) > poisoned(7, 0)); + + EXPECT_NOT_POISONED(poisoned(-1, 0x80000000U) >= poisoned(-1, 0U)); } -extern "C" { -NOINLINE void ZZZZZZZZZZZZZZ() { - __msan_break_optimization(0); - - // v_s1 = ReturnPoisoned<S1>(); - // a_s8[g_zero] = *GetPoisoned<S8>() - 1; - // v_s4 = a_s4[g_zero]; - __msan_break_optimization(0); -} +#if MSAN_HAS_M128 +TEST(MemorySanitizer, ICmpVectorRelational) { + EXPECT_NOT_POISONED( + _mm_cmplt_epi16(poisoned(_mm_set1_epi16(0), _mm_set1_epi16(0)), + poisoned(_mm_set1_epi16(0), _mm_set1_epi16(0)))); + EXPECT_NOT_POISONED( + _mm_cmplt_epi16(poisoned(_mm_set1_epi32(0), _mm_set1_epi32(0)), + poisoned(_mm_set1_epi32(0), _mm_set1_epi32(0)))); + EXPECT_POISONED( + _mm_cmplt_epi16(poisoned(_mm_set1_epi16(0), _mm_set1_epi16(0xFFFF)), + poisoned(_mm_set1_epi16(0), _mm_set1_epi16(0xFFFF)))); + EXPECT_POISONED(_mm_cmpgt_epi16(poisoned(_mm_set1_epi16(6), _mm_set1_epi16(0xF)), + poisoned(_mm_set1_epi16(7), _mm_set1_epi16(0)))); } +#endif + +// Volatile bitfield store is implemented as load-mask-store +// Test that we don't warn on the store of (uninitialized) padding. +struct VolatileBitfieldStruct { + volatile unsigned x : 1; + unsigned y : 1; +}; -TEST(MemorySanitizer, ZZZTest) { - ZZZZZZZZZZZZZZ(); +TEST(MemorySanitizer, VolatileBitfield) { + VolatileBitfieldStruct *S = new VolatileBitfieldStruct; + S->x = 1; + EXPECT_NOT_POISONED((unsigned)S->x); + EXPECT_POISONED((unsigned)S->y); } TEST(MemorySanitizerDr, StoreInDSOTest) { if (!__msan_has_dynamic_component()) return; char* s = new char[10]; dso_memfill(s, 9); - v_s1 = s[5]; - EXPECT_POISONED(v_s1 = s[9]); + EXPECT_NOT_POISONED(s[5]); + EXPECT_POISONED(s[9]); } int return_poisoned_int() { @@ -1339,20 +2000,20 @@ int return_poisoned_int() { TEST(MemorySanitizerDr, ReturnFromDSOTest) { if (!__msan_has_dynamic_component()) return; - v_u8 = dso_callfn(return_poisoned_int); + EXPECT_NOT_POISONED(dso_callfn(return_poisoned_int)); } NOINLINE int TrashParamTLS(long long x, long long y, long long z) { //NOLINT - EXPECT_POISONED(v_s8 = x); - EXPECT_POISONED(v_s8 = y); - EXPECT_POISONED(v_s8 = z); + EXPECT_POISONED(x); + EXPECT_POISONED(y); + EXPECT_POISONED(z); return 0; } static int CheckParamTLS(long long x, long long y, long long z) { //NOLINT - v_s8 = x; - v_s8 = y; - v_s8 = z; + EXPECT_NOT_POISONED(x); + EXPECT_NOT_POISONED(y); + EXPECT_NOT_POISONED(z); return 0; } @@ -1361,13 +2022,13 @@ TEST(MemorySanitizerDr, CallFromDSOTest) { S8* x = GetPoisoned<S8>(); S8* y = GetPoisoned<S8>(); S8* z = GetPoisoned<S8>(); - v_s4 = TrashParamTLS(*x, *y, *z); - v_u8 = dso_callfn1(CheckParamTLS); + EXPECT_NOT_POISONED(TrashParamTLS(*x, *y, *z)); + EXPECT_NOT_POISONED(dso_callfn1(CheckParamTLS)); } static void StackStoreInDSOFn(int* x, int* y) { - v_s4 = *x; - v_s4 = *y; + EXPECT_NOT_POISONED(*x); + EXPECT_NOT_POISONED(*y); } TEST(MemorySanitizerDr, StackStoreInDSOTest) { @@ -1399,7 +2060,7 @@ TEST(MemorySanitizerOrigins, DISABLED_InitializedStoreDoesNotChangeOrigin) { if (!TrackingOrigins()) return; S s; - u32 origin = rand(); // NOLINT + U4 origin = rand(); // NOLINT s.a = *GetPoisonedO<U2>(0, origin); EXPECT_EQ(origin, __msan_get_origin(&s.a)); EXPECT_EQ(origin, __msan_get_origin(&s.b)); @@ -1413,33 +2074,33 @@ TEST(MemorySanitizerOrigins, DISABLED_InitializedStoreDoesNotChangeOrigin) { template<class T, class BinaryOp> INLINE void BinaryOpOriginTest(BinaryOp op) { - u32 ox = rand(); //NOLINT - u32 oy = rand(); //NOLINT + U4 ox = rand(); //NOLINT + U4 oy = rand(); //NOLINT T *x = GetPoisonedO<T>(0, ox, 0); T *y = GetPoisonedO<T>(1, oy, 0); T *z = GetPoisonedO<T>(2, 0, 0); *z = op(*x, *y); - u32 origin = __msan_get_origin(z); - EXPECT_POISONED_O(v_s8 = *z, origin); + U4 origin = __msan_get_origin(z); + EXPECT_POISONED_O(*z, origin); EXPECT_EQ(true, origin == ox || origin == oy); // y is poisoned, x is not. *x = 10101; *y = *GetPoisonedO<T>(1, oy); - __msan_break_optimization(x); + break_optimization(x); __msan_set_origin(z, sizeof(*z), 0); *z = op(*x, *y); - EXPECT_POISONED_O(v_s8 = *z, oy); + EXPECT_POISONED_O(*z, oy); EXPECT_EQ(__msan_get_origin(z), oy); // x is poisoned, y is not. *x = *GetPoisonedO<T>(0, ox); *y = 10101010; - __msan_break_optimization(y); + break_optimization(y); __msan_set_origin(z, sizeof(*z), 0); *z = op(*x, *y); - EXPECT_POISONED_O(v_s8 = *z, ox); + EXPECT_POISONED_O(*z, ox); EXPECT_EQ(__msan_get_origin(z), ox); } @@ -1466,51 +2127,52 @@ TEST(MemorySanitizerOrigins, BinaryOp) { TEST(MemorySanitizerOrigins, Unary) { if (!TrackingOrigins()) return; - EXPECT_POISONED_O(v_s8 = *GetPoisonedO<S8>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_s4 = *GetPoisonedO<S8>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_s2 = *GetPoisonedO<S8>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_s1 = *GetPoisonedO<S8>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S8>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S8>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S8>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S8>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_s8 = *GetPoisonedO<S4>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_s4 = *GetPoisonedO<S4>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_s2 = *GetPoisonedO<S4>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_s1 = *GetPoisonedO<S4>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S4>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S4>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S4>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S4>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_s8 = *GetPoisonedO<U4>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_s4 = *GetPoisonedO<U4>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_s2 = *GetPoisonedO<U4>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_s1 = *GetPoisonedO<U4>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<U4>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<U4>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<U4>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<U4>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_u8 = *GetPoisonedO<S4>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_u4 = *GetPoisonedO<S4>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_u2 = *GetPoisonedO<S4>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_u1 = *GetPoisonedO<S4>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S4>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S4>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S4>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S4>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_p = (void*)*GetPoisonedO<S8>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_u8 = (U8)*GetPoisonedO<void*>(0, __LINE__), __LINE__); + EXPECT_POISONED_O((void*)*GetPoisonedO<S8>(0, __LINE__), __LINE__); + EXPECT_POISONED_O((U8)*GetPoisonedO<void*>(0, __LINE__), __LINE__); } TEST(MemorySanitizerOrigins, EQ) { if (!TrackingOrigins()) return; - EXPECT_POISONED_O(v_u1 = *GetPoisonedO<S4>(0, __LINE__) <= 11, __LINE__); - EXPECT_POISONED_O(v_u1 = *GetPoisonedO<S4>(0, __LINE__) == 11, __LINE__); - EXPECT_POISONED_O(v_u1 = *GetPoisonedO<float>(0, __LINE__) == 1.1, __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S4>(0, __LINE__) <= 11, __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S4>(0, __LINE__) == 11, __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<float>(0, __LINE__) == 1.1, __LINE__); } TEST(MemorySanitizerOrigins, DIV) { if (!TrackingOrigins()) return; - EXPECT_POISONED_O(v_u8 = *GetPoisonedO<U8>(0, __LINE__) / 100, __LINE__); - EXPECT_POISONED_O(v_s4 = 100 / *GetPoisonedO<S4>(0, __LINE__, 1), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<U8>(0, __LINE__) / 100, __LINE__); + unsigned o = __LINE__; + EXPECT_UMR_O(volatile unsigned y = 100 / *GetPoisonedO<S4>(0, o, 1), o); } TEST(MemorySanitizerOrigins, SHIFT) { if (!TrackingOrigins()) return; - EXPECT_POISONED_O(v_u8 = *GetPoisonedO<U8>(0, __LINE__) >> 10, __LINE__); - EXPECT_POISONED_O(v_s8 = *GetPoisonedO<S8>(0, __LINE__) >> 10, __LINE__); - EXPECT_POISONED_O(v_s8 = *GetPoisonedO<S8>(0, __LINE__) << 10, __LINE__); - EXPECT_POISONED_O(v_u8 = 10U << *GetPoisonedO<U8>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_s8 = -10 >> *GetPoisonedO<S8>(0, __LINE__), __LINE__); - EXPECT_POISONED_O(v_s8 = -10 << *GetPoisonedO<S8>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<U8>(0, __LINE__) >> 10, __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S8>(0, __LINE__) >> 10, __LINE__); + EXPECT_POISONED_O(*GetPoisonedO<S8>(0, __LINE__) << 10, __LINE__); + EXPECT_POISONED_O(10U << *GetPoisonedO<U8>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(-10 >> *GetPoisonedO<S8>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(-10 << *GetPoisonedO<S8>(0, __LINE__), __LINE__); } template<class T, int N> @@ -1523,16 +2185,16 @@ void MemCpyTest() { __msan_set_origin(x, N * sizeof(T), ox); __msan_set_origin(y, N * sizeof(T), 777777); __msan_set_origin(z, N * sizeof(T), 888888); - v_p = x; - memcpy(y, v_p, N * sizeof(T)); - EXPECT_POISONED_O(v_s1 = y[0], ox); - EXPECT_POISONED_O(v_s1 = y[N/2], ox); - EXPECT_POISONED_O(v_s1 = y[N-1], ox); - v_p = x; - memmove(z, v_p, N * sizeof(T)); - EXPECT_POISONED_O(v_s1 = z[0], ox); - EXPECT_POISONED_O(v_s1 = z[N/2], ox); - EXPECT_POISONED_O(v_s1 = z[N-1], ox); + EXPECT_NOT_POISONED(x); + memcpy(y, x, N * sizeof(T)); + EXPECT_POISONED_O(y[0], ox); + EXPECT_POISONED_O(y[N/2], ox); + EXPECT_POISONED_O(y[N-1], ox); + EXPECT_NOT_POISONED(x); + memmove(z, x, N * sizeof(T)); + EXPECT_POISONED_O(z[0], ox); + EXPECT_POISONED_O(z[N/2], ox); + EXPECT_POISONED_O(z[N-1], ox); } TEST(MemorySanitizerOrigins, LargeMemCpy) { @@ -1550,42 +2212,42 @@ TEST(MemorySanitizerOrigins, SmallMemCpy) { TEST(MemorySanitizerOrigins, Select) { if (!TrackingOrigins()) return; - v_s8 = g_one ? 1 : *GetPoisonedO<S4>(0, __LINE__); - EXPECT_POISONED_O(v_s8 = *GetPoisonedO<S4>(0, __LINE__), __LINE__); + EXPECT_NOT_POISONED(g_one ? 1 : *GetPoisonedO<S4>(0, __LINE__)); + EXPECT_POISONED_O(*GetPoisonedO<S4>(0, __LINE__), __LINE__); S4 x; - __msan_break_optimization(&x); + break_optimization(&x); x = g_1 ? *GetPoisonedO<S4>(0, __LINE__) : 0; - EXPECT_POISONED_O(v_s8 = g_1 ? *GetPoisonedO<S4>(0, __LINE__) : 1, __LINE__); - EXPECT_POISONED_O(v_s8 = g_0 ? 1 : *GetPoisonedO<S4>(0, __LINE__), __LINE__); + EXPECT_POISONED_O(g_1 ? *GetPoisonedO<S4>(0, __LINE__) : 1, __LINE__); + EXPECT_POISONED_O(g_0 ? 1 : *GetPoisonedO<S4>(0, __LINE__), __LINE__); } extern "C" -NOINLINE void AllocaTOTest() { +NOINLINE char AllocaTO() { int ar[100]; - __msan_break_optimization(ar); - v_s8 = ar[10]; + break_optimization(ar); + return ar[10]; // fprintf(stderr, "Descr: %s\n", // __msan_get_origin_descr_if_stack(__msan_get_origin_tls())); } TEST(MemorySanitizerOrigins, Alloca) { if (!TrackingOrigins()) return; - EXPECT_POISONED_S(AllocaTOTest(), "ar@AllocaTOTest"); - EXPECT_POISONED_S(AllocaTOTest(), "ar@AllocaTOTest"); - EXPECT_POISONED_S(AllocaTOTest(), "ar@AllocaTOTest"); - EXPECT_POISONED_S(AllocaTOTest(), "ar@AllocaTOTest"); + EXPECT_POISONED_S(AllocaTO(), "ar@AllocaTO"); + EXPECT_POISONED_S(AllocaTO(), "ar@AllocaTO"); + EXPECT_POISONED_S(AllocaTO(), "ar@AllocaTO"); + EXPECT_POISONED_S(AllocaTO(), "ar@AllocaTO"); } // FIXME: replace with a lit-like test. TEST(MemorySanitizerOrigins, DISABLED_AllocaDeath) { if (!TrackingOrigins()) return; - EXPECT_DEATH(AllocaTOTest(), "ORIGIN: stack allocation: ar@AllocaTOTest"); + EXPECT_DEATH(AllocaTO(), "ORIGIN: stack allocation: ar@AllocaTO"); } -NOINLINE int RetvalOriginTest(u32 origin) { +NOINLINE int RetvalOriginTest(U4 origin) { int *a = new int; - __msan_break_optimization(a); + break_optimization(a); __msan_set_origin(a, sizeof(*a), origin); int res = *a; delete a; @@ -1594,18 +2256,18 @@ NOINLINE int RetvalOriginTest(u32 origin) { TEST(MemorySanitizerOrigins, Retval) { if (!TrackingOrigins()) return; - EXPECT_POISONED_O(v_s4 = RetvalOriginTest(__LINE__), __LINE__); + EXPECT_POISONED_O(RetvalOriginTest(__LINE__), __LINE__); } -NOINLINE void ParamOriginTest(int param, u32 origin) { - EXPECT_POISONED_O(v_s4 = param, origin); +NOINLINE void ParamOriginTest(int param, U4 origin) { + EXPECT_POISONED_O(param, origin); } TEST(MemorySanitizerOrigins, Param) { if (!TrackingOrigins()) return; int *a = new int; - u32 origin = __LINE__; - __msan_break_optimization(a); + U4 origin = __LINE__; + break_optimization(a); __msan_set_origin(a, sizeof(*a), origin); ParamOriginTest(*a, origin); delete a; @@ -1614,35 +2276,35 @@ TEST(MemorySanitizerOrigins, Param) { TEST(MemorySanitizerOrigins, Invoke) { if (!TrackingOrigins()) return; StructWithDtor s; // Will cause the calls to become invokes. - EXPECT_POISONED_O(v_s4 = RetvalOriginTest(__LINE__), __LINE__); + EXPECT_POISONED_O(RetvalOriginTest(__LINE__), __LINE__); } TEST(MemorySanitizerOrigins, strlen) { S8 alignment; - __msan_break_optimization(&alignment); + break_optimization(&alignment); char x[4] = {'a', 'b', 0, 0}; __msan_poison(&x[2], 1); - u32 origin = __LINE__; + U4 origin = __LINE__; __msan_set_origin(x, sizeof(x), origin); - EXPECT_POISONED_O(v_s4 = strlen(x), origin); + EXPECT_UMR_O(volatile unsigned y = strlen(x), origin); } TEST(MemorySanitizerOrigins, wcslen) { wchar_t w[3] = {'a', 'b', 0}; - u32 origin = __LINE__; + U4 origin = __LINE__; __msan_set_origin(w, sizeof(w), origin); __msan_poison(&w[2], sizeof(wchar_t)); - EXPECT_POISONED_O(v_s4 = wcslen(w), origin); + EXPECT_UMR_O(volatile unsigned y = wcslen(w), origin); } #if MSAN_HAS_M128 TEST(MemorySanitizerOrigins, StoreIntrinsic) { __m128 x, y; - u32 origin = __LINE__; + U4 origin = __LINE__; __msan_set_origin(&x, sizeof(x), origin); __msan_poison(&x, sizeof(x)); __builtin_ia32_storeups((float*)&y, x); - EXPECT_POISONED_O(v_m128 = y, origin); + EXPECT_POISONED_O(y, origin); } #endif @@ -1653,8 +2315,8 @@ NOINLINE void RecursiveMalloc(int depth) { printf("RecursiveMalloc: %d\n", count); int *x1 = new int; int *x2 = new int; - __msan_break_optimization(x1); - __msan_break_optimization(x2); + break_optimization(x1); + break_optimization(x2); if (depth > 0) { RecursiveMalloc(depth-1); RecursiveMalloc(depth-1); @@ -1663,13 +2325,14 @@ NOINLINE void RecursiveMalloc(int depth) { delete x2; } -TEST(MemorySanitizerStress, DISABLED_MallocStackTrace) { - RecursiveMalloc(22); +TEST(MemorySanitizer, CallocOverflow) { + size_t kArraySize = 4096; + volatile size_t kMaxSizeT = std::numeric_limits<size_t>::max(); + volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10; + void *p = calloc(kArraySize, kArraySize2); // Should return 0. + EXPECT_EQ(0L, Ident(p)); } -int main(int argc, char **argv) { - __msan_set_poison_in_malloc(1); - testing::InitGoogleTest(&argc, argv); - int res = RUN_ALL_TESTS(); - return res; +TEST(MemorySanitizerStress, DISABLED_MallocStackTrace) { + RecursiveMalloc(22); } diff --git a/lib/msan/tests/msan_test_config.h b/lib/msan/tests/msan_test_config.h new file mode 100644 index 000000000000..5404c434d09f --- /dev/null +++ b/lib/msan/tests/msan_test_config.h @@ -0,0 +1,20 @@ +//===-- msan_test_config.h ----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of MemorySanitizer. +// +// MemorySanitizer unit tests. +//===----------------------------------------------------------------------===// + +#ifndef MSAN_TEST_CONFIG_H +#define MSAN_TEST_CONFIG_H + +#include "gtest/gtest.h" + +#endif // MSAN_TEST_CONFIG_H diff --git a/lib/msan/tests/msan_test_main.cc b/lib/msan/tests/msan_test_main.cc new file mode 100644 index 000000000000..c8c5fefb19f5 --- /dev/null +++ b/lib/msan/tests/msan_test_main.cc @@ -0,0 +1,21 @@ +//===-- msan_test_main.cc -------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of MemorySanitizer. +// +//===----------------------------------------------------------------------===// +#ifndef MSAN_EXTERNAL_TEST_CONFIG +#include "msan_test_config.h" +#endif // MSAN_EXTERNAL_TEST_CONFIG + +int main(int argc, char **argv) { + testing::GTEST_FLAG(death_test_style) = "threadsafe"; + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/lib/msan/tests/msandr_test_so.cc b/lib/msan/tests/msandr_test_so.cc index fddd8ded8964..eb605d4dba12 100644 --- a/lib/msan/tests/msandr_test_so.cc +++ b/lib/msan/tests/msandr_test_so.cc @@ -34,3 +34,5 @@ int dso_stack_store(void (*fn)(int*, int*), int x) { fn(&x, &y); return y; } + +void break_optimization(void *x) {} diff --git a/lib/msan/tests/msandr_test_so.h b/lib/msan/tests/msandr_test_so.h index 6119542ee50e..cd75ff34f387 100644 --- a/lib/msan/tests/msandr_test_so.h +++ b/lib/msan/tests/msandr_test_so.h @@ -19,5 +19,6 @@ void dso_memfill(char* s, unsigned n); int dso_callfn(int (*fn)(void)); int dso_callfn1(int (*fn)(long long, long long, long long)); //NOLINT int dso_stack_store(void (*fn)(int*, int*), int x); +void break_optimization(void *x); #endif diff --git a/lib/msandr/CMakeLists.txt b/lib/msandr/CMakeLists.txt new file mode 100644 index 000000000000..5a96a9dcc9e9 --- /dev/null +++ b/lib/msandr/CMakeLists.txt @@ -0,0 +1,26 @@ + +if(DynamoRIO_DIR AND DrMemoryFramework_DIR) + set(CMAKE_COMPILER_IS_GNUCC 1) + find_package(DynamoRIO) + find_package(DrMemoryFramework) + + set(arch "x86_64") + add_library(clang_rt.msandr-${arch} SHARED msandr.cc) + configure_DynamoRIO_client(clang_rt.msandr-${arch}) + + function(append_target_cflags tgt cflags) + get_property(old_cflags TARGET clang_rt.msandr-${arch} PROPERTY COMPILE_FLAGS) + set_property(TARGET clang_rt.msandr-${arch} PROPERTY COMPILE_FLAGS "${old_cflags} ${cflags}") + endfunction(append_target_cflags) + + append_target_cflags(clang_rt.msandr-${arch} "-Wno-c++11-extensions") + + use_DynamoRIO_extension(clang_rt.msandr-${arch} drutil) + use_DynamoRIO_extension(clang_rt.msandr-${arch} drmgr) + use_DynamoRIO_extension(clang_rt.msandr-${arch} drsyscall) + + set_target_properties(clang_rt.msandr-${arch} PROPERTIES + LIBRARY_OUTPUT_DIRECTORY ${COMPILER_RT_LIBRARY_OUTPUT_DIR}) + install(TARGETS clang_rt.msandr-${arch} + LIBRARY DESTINATION ${COMPILER_RT_LIBRARY_INSTALL_DIR}) +endif() diff --git a/lib/msandr/README.txt b/lib/msandr/README.txt new file mode 100644 index 000000000000..b328910c53ba --- /dev/null +++ b/lib/msandr/README.txt @@ -0,0 +1,33 @@ +Experimental DynamoRIO-MSAN plugin (codename "MSanDR"). +Supports Linux/x86_64 only. + +Building: + 1. First, download and build DynamoRIO: + (svn co https://dynamorio.googlecode.com/svn/trunk dr && \ + cd dr && mkdir build && cd build && \ + cmake -DDR_EXT_DRMGR_STATIC=ON -DDR_EXT_DRSYMS_STATIC=ON \ + -DDR_EXT_DRUTIL_STATIC=ON -DDR_EXT_DRWRAP_STATIC=ON .. && \ + make -j10 && make install) + + 2. Download and build DrMemory (for DrSyscall extension) + (svn co http://drmemory.googlecode.com/svn/trunk/ drmemory && \ + cd drmemory && mkdir build && cd build && \ + cmake -DDynamoRIO_DIR=`pwd`/../../dr/exports/cmake .. && \ + make -j10 && make install) + + NOTE: The line above will build a shared DrSyscall library in a non-standard + location. This will require the use of LD_LIBRARY_PATH when running MSanDR. + To build a static DrSyscall library (and link it into MSanDR), add + -DDR_EXT_DRSYSCALL_STATIC=ON to the CMake invocation above, but + beware: DrSyscall is LGPL. + + 3. Now, build LLVM with two extra CMake flags: + -DDynamoRIO_DIR=<path_to_dynamorio>/exports/cmake + -DDrMemoryFramework_DIR=<path_to_drmemory>/exports64/drmf + + This will build a lib/clang/$VERSION/lib/linux/libclang_rt.msandr-x86_64.so + +Running: + <path_to_dynamorio>/exports/bin64/drrun -c lib/clang/$VERSION/lib/linux/libclang_rt.msandr-x86_64.so -- test_binary + +MSan unit tests contain several tests for MSanDR (use MemorySanitizerDr.* gtest filter). diff --git a/lib/msandr/msandr.cc b/lib/msandr/msandr.cc new file mode 100644 index 000000000000..27b1c9427d9d --- /dev/null +++ b/lib/msandr/msandr.cc @@ -0,0 +1,726 @@ +//===-- msandr.cc ---------------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of MemorySanitizer. +// +// DynamoRio client for MemorySanitizer. +// +// MemorySanitizer requires that all program code is instrumented. Any memory +// store that can turn an uninitialized value into an initialized value must be +// observed by the tool, otherwise we risk reporting a false UMR. +// +// This also includes any libraries that the program depends on. +// +// In the case when rebuilding all program dependencies with MemorySanitizer is +// problematic, an experimental MSanDR tool (the code you are currently looking +// at) can be used. It is a DynamoRio-based tool that uses dynamic +// instrumentation to +// * Unpoison all memory stores. +// * Unpoison TLS slots used by MemorySanitizer to pass function arguments and +// return value shadow on anything that looks like a function call or a return +// from a function. +// +// This tool does not detect the use of uninitialized values in uninstrumented +// libraries. It merely gets rid of false positives by marking all data that +// passes through uninstrumented code as fully initialized. +//===----------------------------------------------------------------------===// + +#include <dr_api.h> +#include <drutil.h> +#include <drmgr.h> +#include <drsyscall.h> + +#include <sys/mman.h> +#include <sys/syscall.h> /* for SYS_mmap */ + +#include <algorithm> +#include <string> +#include <set> +#include <vector> +#include <string.h> + +#define TESTALL(mask, var) (((mask) & (var)) == (mask)) +#define TESTANY(mask, var) (((mask) & (var)) != 0) + +#define CHECK_IMPL(condition, file, line) \ + do { \ + if (!(condition)) { \ + dr_printf("Check failed: `%s`\nat %s:%d\n", #condition, file, line); \ + dr_abort(); \ + } \ + } while (0) // TODO: stacktrace + +#define CHECK(condition) CHECK_IMPL(condition, __FILE__, __LINE__) + +#define VERBOSITY 0 + +namespace { + +class ModuleData { +public: + ModuleData(); + ModuleData(const module_data_t *info); + // Yes, we want default copy, assign, and dtor semantics. + +public: + app_pc start_; + app_pc end_; + // Full path to the module. + std::string path_; + module_handle_t handle_; + bool should_instrument_; + bool executed_; +}; + +std::string g_app_path; + +int msan_retval_tls_offset; +int msan_param_tls_offset; + +// A vector of loaded modules sorted by module bounds. We lookup the current PC +// in here from the bb event. This is better than an rb tree because the lookup +// is faster and the bb event occurs far more than the module load event. +std::vector<ModuleData> g_module_list; + +ModuleData::ModuleData() + : start_(NULL), end_(NULL), path_(""), handle_(NULL), + should_instrument_(false), executed_(false) { +} + +ModuleData::ModuleData(const module_data_t *info) + : start_(info->start), end_(info->end), path_(info->full_path), + handle_(info->handle), + // We'll check the black/white lists later and adjust this. + should_instrument_(true), executed_(false) { +} + +int(*__msan_get_retval_tls_offset)(); +int(*__msan_get_param_tls_offset)(); +void (*__msan_unpoison)(void *base, size_t size); +bool (*__msan_is_in_loader)(); + +static generic_func_t LookupCallback(module_data_t *app, const char *name) { + generic_func_t callback = dr_get_proc_address(app->handle, name); + if (callback == NULL) { + dr_printf("Couldn't find `%s` in %s\n", name, app->full_path); + CHECK(callback); + } + return callback; +} + +void InitializeMSanCallbacks() { + module_data_t *app = dr_lookup_module_by_name(dr_get_application_name()); + if (!app) { + dr_printf("%s - oops, dr_lookup_module_by_name failed!\n", + dr_get_application_name()); + CHECK(app); + } + g_app_path = app->full_path; + + __msan_get_retval_tls_offset = (int (*)()) + LookupCallback(app, "__msan_get_retval_tls_offset"); + __msan_get_param_tls_offset = (int (*)()) + LookupCallback(app, "__msan_get_param_tls_offset"); + __msan_unpoison = (void(*)(void *, size_t)) + LookupCallback(app, "__msan_unpoison"); + __msan_is_in_loader = (bool (*)()) + LookupCallback(app, "__msan_is_in_loader"); + + dr_free_module_data(app); +} + +// FIXME: Handle absolute addresses and PC-relative addresses. +// FIXME: Handle TLS accesses via FS or GS. DR assumes all other segments have +// a zero base anyway. +bool OperandIsInteresting(opnd_t opnd) { + return (opnd_is_base_disp(opnd) && opnd_get_segment(opnd) != DR_SEG_FS && + opnd_get_segment(opnd) != DR_SEG_GS); +} + +bool WantToInstrument(instr_t *instr) { + // TODO: skip push instructions? + switch (instr_get_opcode(instr)) { + // FIXME: support the instructions excluded below: + case OP_rep_cmps: + // f3 a6 rep cmps %ds:(%rsi) %es:(%rdi) %rsi %rdi %rcx -> %rsi %rdi %rcx + return false; + } + + // Labels appear due to drutil_expand_rep_string() + if (instr_is_label(instr)) + return false; + + CHECK(instr_ok_to_mangle(instr) == true); + + if (instr_writes_memory(instr)) { + for (int d = 0; d < instr_num_dsts(instr); d++) { + opnd_t op = instr_get_dst(instr, d); + if (OperandIsInteresting(op)) + return true; + } + } + + return false; +} + +#define PRE(at, what) instrlist_meta_preinsert(bb, at, INSTR_CREATE_##what); +#define PREF(at, what) instrlist_meta_preinsert(bb, at, what); + +void InstrumentMops(void *drcontext, instrlist_t *bb, instr_t *instr, opnd_t op, + bool is_write) { + bool need_to_restore_eflags = false; + uint flags = instr_get_arith_flags(instr); + // TODO: do something smarter with flags and spills in general? + // For example, spill them only once for a sequence of instrumented + // instructions that don't change/read flags. + + if (!TESTALL(EFLAGS_WRITE_6, flags) || TESTANY(EFLAGS_READ_6, flags)) { + if (VERBOSITY > 1) + dr_printf("Spilling eflags...\n"); + need_to_restore_eflags = true; + // TODO: Maybe sometimes don't need to 'seto'. + // TODO: Maybe sometimes don't want to spill XAX here? + // TODO: No need to spill XAX here if XAX is not used in the BB. + dr_save_reg(drcontext, bb, instr, DR_REG_XAX, SPILL_SLOT_1); + dr_save_arith_flags_to_xax(drcontext, bb, instr); + dr_save_reg(drcontext, bb, instr, DR_REG_XAX, SPILL_SLOT_3); + dr_restore_reg(drcontext, bb, instr, DR_REG_XAX, SPILL_SLOT_1); + } + +#if 0 + dr_printf("==DRMSAN== DEBUG: %d %d %d %d %d %d\n", + opnd_is_memory_reference(op), opnd_is_base_disp(op), + opnd_is_base_disp(op) ? opnd_get_index(op) : -1, + opnd_is_far_memory_reference(op), opnd_is_reg_pointer_sized(op), + opnd_is_base_disp(op) ? opnd_get_disp(op) : -1); +#endif + + reg_id_t R1; + bool address_in_R1 = false; + if (opnd_is_base_disp(op) && opnd_get_index(op) == DR_REG_NULL && + opnd_get_disp(op) == 0) { + // If this is a simple access with no offset or index, we can just use the + // base for R1. + address_in_R1 = true; + R1 = opnd_get_base(op); + } else { + // Otherwise, we need to compute the addr into R1. + // TODO: reuse some spare register? e.g. r15 on x64 + // TODO: might be used as a non-mem-ref register? + R1 = DR_REG_XAX; + } + CHECK(reg_is_pointer_sized(R1)); // otherwise R2 may be wrong. + + // Pick R2 that's not R1 or used by the operand. It's OK if the instr uses + // R2 elsewhere, since we'll restore it before instr. + reg_id_t GPR_TO_USE_FOR_R2[] = { + DR_REG_XAX, DR_REG_XBX, DR_REG_XCX, DR_REG_XDX + // Don't forget to update the +4 below if you add anything else! + }; + std::set<reg_id_t> unused_registers(GPR_TO_USE_FOR_R2, GPR_TO_USE_FOR_R2 + 4); + unused_registers.erase(R1); + for (int j = 0; j < opnd_num_regs_used(op); j++) { + unused_registers.erase(opnd_get_reg_used(op, j)); + } + + CHECK(unused_registers.size() > 0); + reg_id_t R2 = *unused_registers.begin(); + CHECK(R1 != R2); + + // Save the current values of R1 and R2. + dr_save_reg(drcontext, bb, instr, R1, SPILL_SLOT_1); + // TODO: Something smarter than spilling a "fixed" register R2? + dr_save_reg(drcontext, bb, instr, R2, SPILL_SLOT_2); + + if (!address_in_R1) + CHECK(drutil_insert_get_mem_addr(drcontext, bb, instr, op, R1, R2)); + PRE(instr, mov_imm(drcontext, opnd_create_reg(R2), + OPND_CREATE_INT64(0xffffbfffffffffff))); + PRE(instr, and(drcontext, opnd_create_reg(R1), opnd_create_reg(R2))); + // There is no mov_st of a 64-bit immediate, so... + opnd_size_t op_size = opnd_get_size(op); + CHECK(op_size != OPSZ_NA); + uint access_size = opnd_size_in_bytes(op_size); + if (access_size <= 4) { + PRE(instr, + mov_st(drcontext, opnd_create_base_disp(R1, DR_REG_NULL, 0, 0, op_size), + opnd_create_immed_int((ptr_int_t) 0, op_size))); + } else { + // FIXME: tail? + for (uint ofs = 0; ofs < access_size; ofs += 4) { + PRE(instr, + mov_st(drcontext, OPND_CREATE_MEM32(R1, ofs), OPND_CREATE_INT32(0))); + } + } + + // Restore the registers and flags. + dr_restore_reg(drcontext, bb, instr, R1, SPILL_SLOT_1); + dr_restore_reg(drcontext, bb, instr, R2, SPILL_SLOT_2); + + if (need_to_restore_eflags) { + if (VERBOSITY > 1) + dr_printf("Restoring eflags\n"); + // TODO: Check if it's reverse to the dr_restore_reg above and optimize. + dr_save_reg(drcontext, bb, instr, DR_REG_XAX, SPILL_SLOT_1); + dr_restore_reg(drcontext, bb, instr, DR_REG_XAX, SPILL_SLOT_3); + dr_restore_arith_flags_from_xax(drcontext, bb, instr); + dr_restore_reg(drcontext, bb, instr, DR_REG_XAX, SPILL_SLOT_1); + } + + // The original instruction is left untouched. The above instrumentation is just + // a prefix. +} + +void InstrumentReturn(void *drcontext, instrlist_t *bb, instr_t *instr) { + dr_save_reg(drcontext, bb, instr, DR_REG_XAX, SPILL_SLOT_1); + + // Clobbers nothing except xax. + bool res = + dr_insert_get_seg_base(drcontext, bb, instr, DR_SEG_FS, DR_REG_XAX); + CHECK(res); + + // TODO: unpoison more bytes? + PRE(instr, + mov_st(drcontext, OPND_CREATE_MEM64(DR_REG_XAX, msan_retval_tls_offset), + OPND_CREATE_INT32(0))); + + dr_restore_reg(drcontext, bb, instr, DR_REG_XAX, SPILL_SLOT_1); + + // The original instruction is left untouched. The above instrumentation is just + // a prefix. +} + +void InstrumentIndirectBranch(void *drcontext, instrlist_t *bb, + instr_t *instr) { + dr_save_reg(drcontext, bb, instr, DR_REG_XAX, SPILL_SLOT_1); + + // Clobbers nothing except xax. + bool res = + dr_insert_get_seg_base(drcontext, bb, instr, DR_SEG_FS, DR_REG_XAX); + CHECK(res); + + // TODO: unpoison more bytes? + for (int i = 0; i < 6; ++i) { + PRE(instr, + mov_st(drcontext, OPND_CREATE_MEMPTR(DR_REG_XAX, msan_param_tls_offset + + i * sizeof(void *)), + OPND_CREATE_INT32(0))); + } + + dr_restore_reg(drcontext, bb, instr, DR_REG_XAX, SPILL_SLOT_1); + + // The original instruction is left untouched. The above instrumentation is just + // a prefix. +} + +// For use with binary search. Modules shouldn't overlap, so we shouldn't have +// to look at end_. If that can happen, we won't support such an application. +bool ModuleDataCompareStart(const ModuleData &left, const ModuleData &right) { + return left.start_ < right.start_; +} + +// Look up the module containing PC. Should be relatively fast, as its called +// for each bb instrumentation. +ModuleData *LookupModuleByPC(app_pc pc) { + ModuleData fake_mod_data; + fake_mod_data.start_ = pc; + std::vector<ModuleData>::iterator it = + lower_bound(g_module_list.begin(), g_module_list.end(), fake_mod_data, + ModuleDataCompareStart); + // if (it == g_module_list.end()) + // return NULL; + if (it == g_module_list.end() || pc < it->start_) + --it; + CHECK(it->start_ <= pc); + if (pc >= it->end_) { + // We're past the end of this module. We shouldn't be in the next module, + // or lower_bound lied to us. + ++it; + CHECK(it == g_module_list.end() || pc < it->start_); + return NULL; + } + + // OK, we found the module. + return &*it; +} + +bool ShouldInstrumentNonModuleCode() { return true; } + +bool ShouldInstrumentModule(ModuleData *mod_data) { + // TODO(rnk): Flags for blacklist would get wired in here. + generic_func_t p = + dr_get_proc_address(mod_data->handle_, "__msan_track_origins"); + return !p; +} + +bool ShouldInstrumentPc(app_pc pc, ModuleData **pmod_data) { + ModuleData *mod_data = LookupModuleByPC(pc); + if (pmod_data) + *pmod_data = mod_data; + if (mod_data != NULL) { + // This module is on a blacklist. + if (!mod_data->should_instrument_) { + return false; + } + } else if (!ShouldInstrumentNonModuleCode()) { + return false; + } + return true; +} + +// TODO(rnk): Make sure we instrument after __msan_init. +dr_emit_flags_t +event_basic_block_app2app(void *drcontext, void *tag, instrlist_t *bb, + bool for_trace, bool translating) { + app_pc pc = dr_fragment_app_pc(tag); + + if (ShouldInstrumentPc(pc, NULL)) + CHECK(drutil_expand_rep_string(drcontext, bb)); + + return DR_EMIT_PERSISTABLE; +} + +dr_emit_flags_t event_basic_block(void *drcontext, void *tag, instrlist_t *bb, + bool for_trace, bool translating) { + app_pc pc = dr_fragment_app_pc(tag); + ModuleData *mod_data; + + if (!ShouldInstrumentPc(pc, &mod_data)) + return DR_EMIT_PERSISTABLE; + + if (VERBOSITY > 1) + dr_printf("============================================================\n"); + if (VERBOSITY > 0) { + std::string mod_path = (mod_data ? mod_data->path_ : "<no module, JITed?>"); + if (mod_data && !mod_data->executed_) { + mod_data->executed_ = true; // Nevermind this race. + dr_printf("Executing from new module: %s\n", mod_path.c_str()); + } + dr_printf("BB to be instrumented: %p [from %s]; translating = %s\n", pc, + mod_path.c_str(), translating ? "true" : "false"); + if (mod_data) { + // Match standard sanitizer trace format for free symbols. + // #0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45) + dr_printf(" #0 %p (%s+%p)\n", pc, mod_data->path_.c_str(), + pc - mod_data->start_); + } + } + if (VERBOSITY > 1) { + instrlist_disassemble(drcontext, pc, bb, STDOUT); + instr_t *instr; + for (instr = instrlist_first(bb); instr; instr = instr_get_next(instr)) { + dr_printf("opcode: %d\n", instr_get_opcode(instr)); + } + } + + for (instr_t *i = instrlist_first(bb); i != NULL; i = instr_get_next(i)) { + int opcode = instr_get_opcode(i); + if (opcode == OP_ret || opcode == OP_ret_far) { + InstrumentReturn(drcontext, bb, i); + continue; + } + + // These instructions hopefully cover all cases where control is transferred + // to a function in a different module (we only care about calls into + // compiler-instrumented modules). + // * call_ind is used for normal indirect calls. + // * jmp_ind is used for indirect tail calls, and calls through PLT (PLT + // stub includes a jump to an address from GOT). + if (opcode == OP_call_ind || opcode == OP_call_far_ind || + opcode == OP_jmp_ind || opcode == OP_jmp_far_ind) { + InstrumentIndirectBranch(drcontext, bb, i); + continue; + } + + if (!WantToInstrument(i)) + continue; + + if (VERBOSITY > 1) { + app_pc orig_pc = dr_fragment_app_pc(tag); + uint flags = instr_get_arith_flags(i); + dr_printf("+%d -> to be instrumented! [opcode=%d, flags = 0x%08X]\n", + instr_get_app_pc(i) - orig_pc, instr_get_opcode(i), flags); + } + + if (instr_writes_memory(i)) { + // Instrument memory writes + // bool instrumented_anything = false; + for (int d = 0; d < instr_num_dsts(i); d++) { + opnd_t op = instr_get_dst(i, d); + if (!OperandIsInteresting(op)) + continue; + + // CHECK(!instrumented_anything); + // instrumented_anything = true; + InstrumentMops(drcontext, bb, i, op, true); + break; // only instrumenting the first dst + } + } + } + +// TODO: optimize away redundant restore-spill pairs? + + if (VERBOSITY > 1) { + pc = dr_fragment_app_pc(tag); + dr_printf("\nFinished instrumenting dynamorio_basic_block(PC=" PFX ")\n", pc); + instrlist_disassemble(drcontext, pc, bb, STDOUT); + } + return DR_EMIT_PERSISTABLE; +} + +void event_module_load(void *drcontext, const module_data_t *info, + bool loaded) { + // Insert the module into the list while maintaining the ordering. + ModuleData mod_data(info); + std::vector<ModuleData>::iterator it = + upper_bound(g_module_list.begin(), g_module_list.end(), mod_data, + ModuleDataCompareStart); + it = g_module_list.insert(it, mod_data); + // Check if we should instrument this module. + it->should_instrument_ = ShouldInstrumentModule(&*it); + dr_module_set_should_instrument(info->handle, it->should_instrument_); + + if (VERBOSITY > 0) + dr_printf("==DRMSAN== Loaded module: %s [%p...%p], instrumentation is %s\n", + info->full_path, info->start, info->end, + it->should_instrument_ ? "on" : "off"); +} + +void event_module_unload(void *drcontext, const module_data_t *info) { + if (VERBOSITY > 0) + dr_printf("==DRMSAN== Unloaded module: %s [%p...%p]\n", info->full_path, + info->start, info->end); + + // Remove the module from the list. + ModuleData mod_data(info); + std::vector<ModuleData>::iterator it = + lower_bound(g_module_list.begin(), g_module_list.end(), mod_data, + ModuleDataCompareStart); + // It's a bug if we didn't actually find the module. + CHECK(it != g_module_list.end() && it->start_ == mod_data.start_ && + it->end_ == mod_data.end_ && it->path_ == mod_data.path_); + g_module_list.erase(it); +} + +void event_exit() { + // Clean up so DR doesn't tell us we're leaking memory. + drsys_exit(); + drutil_exit(); + drmgr_exit(); + + if (VERBOSITY > 0) + dr_printf("==DRMSAN== DONE\n"); +} + +bool event_filter_syscall(void *drcontext, int sysnum) { + // FIXME: only intercept syscalls with memory effects. + return true; /* intercept everything */ +} + +bool drsys_iter_memarg_cb(drsys_arg_t *arg, void *user_data) { + CHECK(arg->valid); + + if (arg->pre) + return true; + if (!TESTANY(DRSYS_PARAM_OUT, arg->mode)) + return true; + + size_t sz = arg->size; + + if (sz > 0xFFFFFFFF) { + drmf_status_t res; + drsys_syscall_t *syscall = (drsys_syscall_t *)user_data; + const char *name; + res = drsys_syscall_name(syscall, &name); + CHECK(res == DRMF_SUCCESS); + + dr_printf("SANITY: syscall '%s' arg %d writes %llu bytes memory?!" + " Clipping to %llu.\n", + name, arg->ordinal, (unsigned long long) sz, + (unsigned long long)(sz & 0xFFFFFFFF)); + } + + if (VERBOSITY > 0) { + drmf_status_t res; + drsys_syscall_t *syscall = (drsys_syscall_t *)user_data; + const char *name; + res = drsys_syscall_name(syscall, &name); + dr_printf("drsyscall: syscall '%s' arg %d wrote range [%p, %p)\n", + name, arg->ordinal, arg->start_addr, + (char *)arg->start_addr + sz); + } + + // We don't switch to the app context because __msan_unpoison() doesn't need + // TLS segments. + __msan_unpoison(arg->start_addr, sz); + + return true; /* keep going */ +} + +bool event_pre_syscall(void *drcontext, int sysnum) { + drsys_syscall_t *syscall; + drsys_sysnum_t sysnum_full; + bool known; + drsys_param_type_t ret_type; + drmf_status_t res; + const char *name; + + res = drsys_cur_syscall(drcontext, &syscall); + CHECK(res == DRMF_SUCCESS); + + res = drsys_syscall_number(syscall, &sysnum_full); + CHECK(res == DRMF_SUCCESS); + CHECK(sysnum == sysnum_full.number); + + res = drsys_syscall_is_known(syscall, &known); + CHECK(res == DRMF_SUCCESS); + + res = drsys_syscall_name(syscall, &name); + CHECK(res == DRMF_SUCCESS); + + res = drsys_syscall_return_type(syscall, &ret_type); + CHECK(res == DRMF_SUCCESS); + CHECK(ret_type != DRSYS_TYPE_INVALID); + CHECK(!known || ret_type != DRSYS_TYPE_UNKNOWN); + + res = drsys_iterate_memargs(drcontext, drsys_iter_memarg_cb, NULL); + CHECK(res == DRMF_SUCCESS); + + return true; +} + +static bool IsInLoader(void *drcontext) { + // TODO: This segment swap is inefficient. DR should just let us query the + // app segment base, which it has. Alternatively, if we disable + // -mangle_app_seg, then we won't need the swap. + bool need_swap = !dr_using_app_state(drcontext); + if (need_swap) + dr_switch_to_app_state(drcontext); + bool is_in_loader = __msan_is_in_loader(); + if (need_swap) + dr_switch_to_dr_state(drcontext); + return is_in_loader; +} + +void event_post_syscall(void *drcontext, int sysnum) { + drsys_syscall_t *syscall; + drsys_sysnum_t sysnum_full; + bool success = false; + drmf_status_t res; + + res = drsys_cur_syscall(drcontext, &syscall); + CHECK(res == DRMF_SUCCESS); + + res = drsys_syscall_number(syscall, &sysnum_full); + CHECK(res == DRMF_SUCCESS); + CHECK(sysnum == sysnum_full.number); + + res = drsys_syscall_succeeded(syscall, dr_syscall_get_result(drcontext), + &success); + CHECK(res == DRMF_SUCCESS); + + if (success) { + res = + drsys_iterate_memargs(drcontext, drsys_iter_memarg_cb, (void *)syscall); + CHECK(res == DRMF_SUCCESS); + } + + // Our normal mmap interceptor can't intercept calls from the loader itself. + // This means we don't clear the shadow for calls to dlopen. For now, we + // solve this by intercepting mmap from ld.so here, but ideally we'd have a + // solution that doesn't rely on msandr. + // + // Be careful not to intercept maps done by the msan rtl. Otherwise we end up + // unpoisoning vast regions of memory and OOMing. + // TODO: __msan_unpoison() could "flush" large regions of memory like tsan + // does instead of doing a large memset. However, we need the memory to be + // zeroed, where as tsan does not, so plain madvise is not enough. + if (success && (sysnum == SYS_mmap IF_NOT_X64(|| sysnum == SYS_mmap2))) { + if (IsInLoader(drcontext)) { + app_pc base = (app_pc)dr_syscall_get_result(drcontext); + ptr_uint_t size; + drmf_status_t res = drsys_pre_syscall_arg(drcontext, 1, &size); + CHECK(res == DRMF_SUCCESS); + if (VERBOSITY > 0) + dr_printf("unpoisoning for dlopen: [%p-%p]\n", base, base + size); + // We don't switch to the app context because __msan_unpoison() doesn't + // need TLS segments. + __msan_unpoison(base, size); + } + } +} + +} // namespace + +DR_EXPORT void dr_init(client_id_t id) { + drmf_status_t res; + + drmgr_init(); + drutil_init(); + + std::string app_name = dr_get_application_name(); + // This blacklist will still run these apps through DR's code cache. On the + // other hand, we are able to follow children of these apps. + // FIXME: Once DR has detach, we could just detach here. Alternatively, + // if DR had a fork or exec hook to let us decide there, that would be nice. + // FIXME: make the blacklist cmd-adjustable. + if (app_name == "python" || app_name == "python2.7" || app_name == "bash" || + app_name == "sh" || app_name == "true" || app_name == "exit" || + app_name == "yes" || app_name == "echo") + return; + + drsys_options_t ops; + memset(&ops, 0, sizeof(ops)); + ops.struct_size = sizeof(ops); + ops.analyze_unknown_syscalls = false; + + res = drsys_init(id, &ops); + CHECK(res == DRMF_SUCCESS); + + dr_register_filter_syscall_event(event_filter_syscall); + drmgr_register_pre_syscall_event(event_pre_syscall); + drmgr_register_post_syscall_event(event_post_syscall); + res = drsys_filter_all_syscalls(); + CHECK(res == DRMF_SUCCESS); + + InitializeMSanCallbacks(); + + // FIXME: the shadow is initialized earlier when DR calls one of our wrapper + // functions. This may change one day. + // TODO: make this more robust. + + void *drcontext = dr_get_current_drcontext(); + + dr_switch_to_app_state(drcontext); + msan_retval_tls_offset = __msan_get_retval_tls_offset(); + msan_param_tls_offset = __msan_get_param_tls_offset(); + dr_switch_to_dr_state(drcontext); + if (VERBOSITY > 0) { + dr_printf("__msan_retval_tls offset: %d\n", msan_retval_tls_offset); + dr_printf("__msan_param_tls offset: %d\n", msan_param_tls_offset); + } + + // Standard DR events. + dr_register_exit_event(event_exit); + + drmgr_priority_t priority = { + sizeof(priority), /* size of struct */ + "msandr", /* name of our operation */ + NULL, /* optional name of operation we should precede */ + NULL, /* optional name of operation we should follow */ + 0 + }; /* numeric priority */ + + drmgr_register_bb_app2app_event(event_basic_block_app2app, &priority); + drmgr_register_bb_instru2instru_event(event_basic_block, &priority); + drmgr_register_module_load_event(event_module_load); + drmgr_register_module_unload_event(event_module_unload); + if (VERBOSITY > 0) + dr_printf("==MSANDR== Starting!\n"); +} diff --git a/lib/profile/CMakeLists.txt b/lib/profile/CMakeLists.txt new file mode 100644 index 000000000000..641f085c883f --- /dev/null +++ b/lib/profile/CMakeLists.txt @@ -0,0 +1,16 @@ +set(PROFILE_SOURCES + GCDAProfiling.c) + +filter_available_targets(PROFILE_SUPPORTED_ARCH x86_64 i386) + +if(APPLE) + add_compiler_rt_osx_static_runtime(clang_rt.profile_osx + ARCH ${PROFILE_SUPPORTED_ARCH} + SOURCES ${PROFILE_SOURCES} + CFLAGS --sysroot=${COMPILER_RT_DARWIN_SDK_SYSROOT}) +else() + foreach(arch ${PROFILE_SUPPORTED_ARCH}) + add_compiler_rt_static_runtime(clang_rt.profile-${arch} ${arch} + SOURCES ${PROFILE_SOURCES}) + endforeach() +endif() diff --git a/lib/profile/GCDAProfiling.c b/lib/profile/GCDAProfiling.c index 7c52a1740999..ce1b03c14de7 100644 --- a/lib/profile/GCDAProfiling.c +++ b/lib/profile/GCDAProfiling.c @@ -20,10 +20,12 @@ |* \*===----------------------------------------------------------------------===*/ +#include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> +#include <sys/mman.h> #include <sys/types.h> #ifdef _WIN32 #include <direct.h> @@ -42,17 +44,70 @@ typedef unsigned int uint64_t; * --- GCOV file format I/O primitives --- */ +/* + * The current file we're outputting. + */ static FILE *output_file = NULL; -static void write_int32(uint32_t i) { - fwrite(&i, 4, 1, output_file); +/* + * Buffer that we write things into. + */ +#define WRITE_BUFFER_SIZE (128 * 1024) +static char *write_buffer = NULL; +static uint64_t cur_buffer_size = 0; +static uint64_t cur_pos = 0; +static uint64_t file_size = 0; +static int new_file = 0; +static int fd = -1; + +/* + * A list of functions to write out the data. + */ +typedef void (*writeout_fn)(); + +struct writeout_fn_node { + writeout_fn fn; + struct writeout_fn_node *next; +}; + +static struct writeout_fn_node *writeout_fn_head = NULL; +static struct writeout_fn_node *writeout_fn_tail = NULL; + +/* + * A list of flush functions that our __gcov_flush() function should call. + */ +typedef void (*flush_fn)(); + +struct flush_fn_node { + flush_fn fn; + struct flush_fn_node *next; +}; + +static struct flush_fn_node *flush_fn_head = NULL; +static struct flush_fn_node *flush_fn_tail = NULL; + +static void resize_write_buffer(uint64_t size) { + if (!new_file) return; + size += cur_pos; + if (size <= cur_buffer_size) return; + size = (size - 1) / WRITE_BUFFER_SIZE + 1; + size *= WRITE_BUFFER_SIZE; + write_buffer = realloc(write_buffer, size); + cur_buffer_size = size; +} + +static void write_bytes(const char *s, size_t len) { + resize_write_buffer(len); + memcpy(&write_buffer[cur_pos], s, len); + cur_pos += len; +} + +static void write_32bit_value(uint32_t i) { + write_bytes((char*)&i, 4); } -static void write_int64(uint64_t i) { - uint32_t lo = i >> 0; - uint32_t hi = i >> 32; - write_int32(lo); - write_int32(hi); +static void write_64bit_value(uint64_t i) { + write_bytes((char*)&i, 8); } static uint32_t length_of_string(const char *s) { @@ -61,27 +116,31 @@ static uint32_t length_of_string(const char *s) { static void write_string(const char *s) { uint32_t len = length_of_string(s); - write_int32(len); - fwrite(s, strlen(s), 1, output_file); - fwrite("\0\0\0\0", 4 - (strlen(s) % 4), 1, output_file); + write_32bit_value(len); + write_bytes(s, strlen(s)); + write_bytes("\0\0\0\0", 4 - (strlen(s) % 4)); } -static uint32_t read_int32() { - uint32_t tmp; +static uint32_t read_32bit_value() { + uint32_t val; - if (fread(&tmp, 1, 4, output_file) != 4) + if (new_file) return (uint32_t)-1; - return tmp; + val = *(uint32_t*)&write_buffer[cur_pos]; + cur_pos += 4; + return val; } -static uint64_t read_int64() { - uint64_t tmp; +static uint64_t read_64bit_value() { + uint64_t val; - if (fread(&tmp, 1, 8, output_file) != 8) + if (new_file) return (uint64_t)-1; - return tmp; + val = *(uint64_t*)&write_buffer[cur_pos]; + cur_pos += 8; + return val; } static char *mangle_filename(const char *orig_filename) { @@ -91,13 +150,13 @@ static char *mangle_filename(const char *orig_filename) { int level = 0; const char *fname = orig_filename, *ptr = NULL; const char *prefix = getenv("GCOV_PREFIX"); - const char *tmp = getenv("GCOV_PREFIX_STRIP"); + const char *prefix_strip_str = getenv("GCOV_PREFIX_STRIP"); if (!prefix) return strdup(orig_filename); - if (tmp) { - prefix_strip = atoi(tmp); + if (prefix_strip_str) { + prefix_strip = atoi(prefix_strip_str); /* Negative GCOV_PREFIX_STRIP values are ignored */ if (prefix_strip < 0) @@ -137,6 +196,21 @@ static void recursive_mkdir(char *filename) { } } +static void map_file() { + fseek(output_file, 0L, SEEK_END); + file_size = ftell(output_file); + + write_buffer = mmap(0, file_size, PROT_READ | PROT_WRITE, + MAP_FILE | MAP_SHARED, fd, 0); +} + +static void unmap_file() { + msync(write_buffer, file_size, MS_SYNC); + munmap(write_buffer, file_size); + write_buffer = NULL; + file_size = 0; +} + /* * --- LLVM line counter API --- */ @@ -145,19 +219,23 @@ static void recursive_mkdir(char *filename) { * profiling enabled will emit to a different file. Only one file may be * started at a time. */ -void llvm_gcda_start_file(const char *orig_filename) { +void llvm_gcda_start_file(const char *orig_filename, const char version[4]) { char *filename = mangle_filename(orig_filename); + const char *mode = "r+b"; /* Try just opening the file. */ - output_file = fopen(filename, "r+b"); + new_file = 0; + fd = open(filename, O_RDWR); - if (!output_file) { + if (fd == -1) { /* Try opening the file, creating it if necessary. */ - output_file = fopen(filename, "w+b"); - if (!output_file) { + new_file = 1; + mode = "w+b"; + fd = open(filename, O_RDWR | O_CREAT, 0644); + if (fd == -1) { /* Try creating the directories first then opening the file. */ recursive_mkdir(filename); - output_file = fopen(filename, "w+b"); + fd = open(filename, O_RDWR | O_CREAT, 0644); if (!output_file) { /* Bah! It's hopeless. */ fprintf(stderr, "profiling:%s: cannot open\n", filename); @@ -167,12 +245,24 @@ void llvm_gcda_start_file(const char *orig_filename) { } } - /* gcda file, version 404*, stamp LLVM. */ -#ifdef __APPLE__ - fwrite("adcg*204MVLL", 12, 1, output_file); -#else - fwrite("adcg*404MVLL", 12, 1, output_file); -#endif + output_file = fdopen(fd, mode); + + /* Initialize the write buffer. */ + write_buffer = NULL; + cur_buffer_size = 0; + cur_pos = 0; + + if (new_file) { + resize_write_buffer(WRITE_BUFFER_SIZE); + memset(write_buffer, 0, WRITE_BUFFER_SIZE); + } else { + map_file(); + } + + /* gcda file, version, stamp LLVM. */ + write_bytes("adcg", 4); + write_bytes(version, 4); + write_bytes("MVLL", 4); free(filename); @@ -206,61 +296,68 @@ void llvm_gcda_increment_indirect_counter(uint32_t *predecessor, #endif } -void llvm_gcda_emit_function(uint32_t ident, const char *function_name) { +void llvm_gcda_emit_function(uint32_t ident, const char *function_name, + uint8_t use_extra_checksum) { + uint32_t len = 2; + + if (use_extra_checksum) + len++; #ifdef DEBUG_GCDAPROFILING - fprintf(stderr, "llvmgcda: function id=0x%08x\n", ident); + fprintf(stderr, "llvmgcda: function id=0x%08x name=%s\n", ident, + function_name ? function_name : "NULL"); #endif if (!output_file) return; - /* function tag */ - fwrite("\0\0\0\1", 4, 1, output_file); - write_int32(3 + 1 + length_of_string(function_name)); - write_int32(ident); - write_int32(0); - write_int32(0); - write_string(function_name); + /* function tag */ + write_bytes("\0\0\0\1", 4); + if (function_name) + len += 1 + length_of_string(function_name); + write_32bit_value(len); + write_32bit_value(ident); + write_32bit_value(0); + if (use_extra_checksum) + write_32bit_value(0); + if (function_name) + write_string(function_name); } void llvm_gcda_emit_arcs(uint32_t num_counters, uint64_t *counters) { uint32_t i; uint64_t *old_ctrs = NULL; uint32_t val = 0; - long pos = 0; + uint64_t save_cur_pos = cur_pos; if (!output_file) return; - pos = ftell(output_file); - val = read_int32(); + val = read_32bit_value(); if (val != (uint32_t)-1) { /* There are counters present in the file. Merge them. */ - uint32_t j; - if (val != 0x01a10000) { - fprintf(stderr, "profiling: invalid magic number (0x%08x)\n", val); + fprintf(stderr, "profiling:invalid magic number (0x%08x)\n", val); return; } - val = read_int32(); + val = read_32bit_value(); if (val == (uint32_t)-1 || val / 2 != num_counters) { - fprintf(stderr, "profiling: invalid number of counters (%d)\n", val); + fprintf(stderr, "profiling:invalid number of counters (%d)\n", val); return; } old_ctrs = malloc(sizeof(uint64_t) * num_counters); - - for (j = 0; j < num_counters; ++j) - old_ctrs[j] = read_int64(); + for (i = 0; i < num_counters; ++i) + old_ctrs[i] = read_64bit_value(); } - /* Reset for writing. */ - fseek(output_file, pos, SEEK_SET); + cur_pos = save_cur_pos; /* Counter #1 (arcs) tag */ - fwrite("\0\0\xa1\1", 4, 1, output_file); - write_int32(num_counters * 2); - for (i = 0; i < num_counters; ++i) - write_int64(counters[i] + (old_ctrs ? old_ctrs[i] : 0)); + write_bytes("\0\0\xa1\1", 4); + write_32bit_value(num_counters * 2); + for (i = 0; i < num_counters; ++i) { + counters[i] += (old_ctrs ? old_ctrs[i] : 0); + write_64bit_value(counters[i]); + } free(old_ctrs); @@ -274,11 +371,103 @@ void llvm_gcda_emit_arcs(uint32_t num_counters, uint64_t *counters) { void llvm_gcda_end_file() { /* Write out EOF record. */ if (!output_file) return; - fwrite("\0\0\0\0\0\0\0\0", 8, 1, output_file); + write_bytes("\0\0\0\0\0\0\0\0", 8); + + if (new_file) { + fwrite(write_buffer, cur_pos, 1, output_file); + free(write_buffer); + } else { + unmap_file(); + } + fclose(output_file); output_file = NULL; + write_buffer = NULL; #ifdef DEBUG_GCDAPROFILING fprintf(stderr, "llvmgcda: -----\n"); #endif } + +void llvm_register_writeout_function(writeout_fn fn) { + struct writeout_fn_node *new_node = malloc(sizeof(struct writeout_fn_node)); + new_node->fn = fn; + new_node->next = NULL; + + if (!writeout_fn_head) { + writeout_fn_head = writeout_fn_tail = new_node; + } else { + writeout_fn_tail->next = new_node; + writeout_fn_tail = new_node; + } +} + +void llvm_writeout_files() { + struct writeout_fn_node *curr = writeout_fn_head; + + while (curr) { + curr->fn(); + curr = curr->next; + } +} + +void llvm_delete_writeout_function_list() { + while (writeout_fn_head) { + struct writeout_fn_node *node = writeout_fn_head; + writeout_fn_head = writeout_fn_head->next; + free(node); + } + + writeout_fn_head = writeout_fn_tail = NULL; +} + +void llvm_register_flush_function(flush_fn fn) { + struct flush_fn_node *new_node = malloc(sizeof(struct flush_fn_node)); + new_node->fn = fn; + new_node->next = NULL; + + if (!flush_fn_head) { + flush_fn_head = flush_fn_tail = new_node; + } else { + flush_fn_tail->next = new_node; + flush_fn_tail = new_node; + } +} + +void __gcov_flush() { + struct flush_fn_node *curr = flush_fn_head; + + while (curr) { + curr->fn(); + curr = curr->next; + } +} + +void llvm_delete_flush_function_list() { + while (flush_fn_head) { + struct flush_fn_node *node = flush_fn_head; + flush_fn_head = flush_fn_head->next; + free(node); + } + + flush_fn_head = flush_fn_tail = NULL; +} + +void llvm_gcov_init(writeout_fn wfn, flush_fn ffn) { + static int atexit_ran = 0; + + if (wfn) + llvm_register_writeout_function(wfn); + + if (ffn) + llvm_register_flush_function(ffn); + + if (atexit_ran == 0) { + atexit_ran = 1; + + /* Make sure we write out the data and delete the data structures. */ + atexit(llvm_delete_flush_function_list); + atexit(llvm_delete_writeout_function_list); + atexit(llvm_writeout_files); + } +} diff --git a/lib/sanitizer_common/CMakeLists.txt b/lib/sanitizer_common/CMakeLists.txt index ee0e1237c1a9..2683a37a32ca 100644 --- a/lib/sanitizer_common/CMakeLists.txt +++ b/lib/sanitizer_common/CMakeLists.txt @@ -8,18 +8,27 @@ set(SANITIZER_SOURCES sanitizer_libc.cc sanitizer_linux.cc sanitizer_mac.cc + sanitizer_platform_limits_posix.cc sanitizer_posix.cc sanitizer_printf.cc sanitizer_stackdepot.cc sanitizer_stacktrace.cc - sanitizer_symbolizer.cc sanitizer_symbolizer_itanium.cc - sanitizer_symbolizer_linux.cc sanitizer_symbolizer_mac.cc sanitizer_symbolizer_win.cc + sanitizer_thread_registry.cc sanitizer_win.cc ) +set(SANITIZER_LIBCDEP_SOURCES + sanitizer_common_libcdep.cc + sanitizer_linux_libcdep.cc + sanitizer_posix_libcdep.cc + sanitizer_stoptheworld_linux_libcdep.cc + sanitizer_symbolizer_libcdep.cc + sanitizer_symbolizer_linux_libcdep.cc + ) + # Explicitly list all sanitizer_common headers. Not all of these are # included in sanitizer_common source files, but we need to depend on # headers when building our custom unit tests. @@ -31,10 +40,12 @@ set(SANITIZER_HEADERS sanitizer_common.h sanitizer_common_interceptors.inc sanitizer_common_interceptors_scanf.inc + sanitizer_common_syscalls.inc sanitizer_flags.h sanitizer_internal_defs.h sanitizer_lfstack.h sanitizer_libc.h + sanitizer_linux.h sanitizer_list.h sanitizer_mutex.h sanitizer_placement_new.h @@ -45,20 +56,24 @@ set(SANITIZER_HEADERS sanitizer_stackdepot.h sanitizer_stacktrace.h sanitizer_symbolizer.h + sanitizer_thread_registry.h ) -set(SANITIZER_CFLAGS ${SANITIZER_COMMON_CFLAGS}) +set(SANITIZER_CFLAGS + ${SANITIZER_COMMON_CFLAGS} + -fno-rtti) set(SANITIZER_RUNTIME_LIBRARIES) if(APPLE) # Build universal binary on APPLE. - add_library(RTSanitizerCommon.osx OBJECT ${SANITIZER_SOURCES}) - set_target_compile_flags(RTSanitizerCommon.osx ${SANITIZER_CFLAGS}) - set_target_properties(RTSanitizerCommon.osx PROPERTIES - OSX_ARCHITECTURES "${SANITIZER_COMMON_SUPPORTED_ARCH}") + add_compiler_rt_osx_object_library(RTSanitizerCommon + ARCH ${SANITIZER_COMMON_SUPPORTED_ARCH} + SOURCES ${SANITIZER_SOURCES} ${SANITIZER_LIBCDEP_SOURCES} + CFLAGS ${SANITIZER_CFLAGS}) list(APPEND SANITIZER_RUNTIME_LIBRARIES RTSanitizerCommon.osx) elseif(ANDROID) - add_library(RTSanitizerCommon.arm.android OBJECT ${SANITIZER_SOURCES}) + add_library(RTSanitizerCommon.arm.android OBJECT + ${SANITIZER_SOURCES} ${SANITIZER_LIBCDEP_SOURCES}) set_target_compile_flags(RTSanitizerCommon.arm.android ${SANITIZER_CFLAGS}) list(APPEND SANITIZER_RUNTIME_LIBRARIES RTSanitizerCommon.arm.android) @@ -67,6 +82,12 @@ else() foreach(arch ${SANITIZER_COMMON_SUPPORTED_ARCH}) add_compiler_rt_object_library(RTSanitizerCommon ${arch} SOURCES ${SANITIZER_SOURCES} CFLAGS ${SANITIZER_CFLAGS}) + add_compiler_rt_object_library(RTSanitizerCommonLibc ${arch} + SOURCES ${SANITIZER_LIBCDEP_SOURCES} CFLAGS ${SANITIZER_CFLAGS}) + add_compiler_rt_static_runtime(clang_rt.san-${arch} ${arch} + SOURCES $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + CFLAGS ${SANITIZER_CFLAGS}) list(APPEND SANITIZER_RUNTIME_LIBRARIES RTSanitizerCommon.${arch}) endforeach() endif() diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc index b13a7c6c14c0..a97a70937a43 100644 --- a/lib/sanitizer_common/sanitizer_allocator.cc +++ b/lib/sanitizer_common/sanitizer_allocator.cc @@ -15,16 +15,16 @@ // FIXME: We should probably use more low-level allocator that would // mmap some pages and split them into chunks to fulfill requests. -#if defined(__linux__) && !defined(__ANDROID__) +#if SANITIZER_LINUX && !SANITIZER_ANDROID extern "C" void *__libc_malloc(__sanitizer::uptr size); extern "C" void __libc_free(void *ptr); # define LIBC_MALLOC __libc_malloc # define LIBC_FREE __libc_free -#else // __linux__ && !ANDROID +#else // SANITIZER_LINUX && !SANITIZER_ANDROID # include <stdlib.h> # define LIBC_MALLOC malloc # define LIBC_FREE free -#endif // __linux__ && !ANDROID +#endif // SANITIZER_LINUX && !SANITIZER_ANDROID namespace __sanitizer { @@ -75,4 +75,10 @@ void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { low_level_alloc_callback = callback; } +bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) { + if (!size) return false; + uptr max = (uptr)-1L; + return (max / size) < n; +} + } // namespace __sanitizer diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h index ad89c3c870dc..0542addb7f37 100644 --- a/lib/sanitizer_common/sanitizer_allocator.h +++ b/lib/sanitizer_common/sanitizer_allocator.h @@ -25,16 +25,16 @@ namespace __sanitizer { // SizeClassMap maps allocation sizes into size classes and back. // Class 0 corresponds to size 0. -// Classes 1 - 16 correspond to sizes 8 - 128 (size = class_id * 8). -// Next 8 classes: 128 + i * 16 (i = 1 to 8). -// Next 8 classes: 256 + i * 32 (i = 1 to 8). +// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16). +// Next 4 classes: 256 + i * 64 (i = 1 to 4). +// Next 4 classes: 512 + i * 128 (i = 1 to 4). // ... -// Next 8 classes: 2^k + i * 2^(k-3) (i = 1 to 8). +// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4). // Last class corresponds to kMaxSize = 1 << kMaxSizeLog. // // This structure of the size class map gives us: // - Efficient table-free class-to-size and size-to-class functions. -// - Difference between two consequent size classes is betweed 12% and 6% +// - Difference between two consequent size classes is betweed 14% and 25% // // This class also gives a hint to a thread-caching allocator about the amount // of chunks that need to be cached per-thread: @@ -42,50 +42,70 @@ namespace __sanitizer { // - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class. // // Part of output of SizeClassMap::Print(): -// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0 -// c01 => s: 8 diff: +8 00% l 3 cached: 256 2048; id 1 -// c02 => s: 16 diff: +8 100% l 4 cached: 256 4096; id 2 -// ... -// c07 => s: 56 diff: +8 16% l 5 cached: 256 14336; id 7 +// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0 +// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1 +// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2 +// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3 +// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4 +// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5 +// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6 +// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7 // -// c08 => s: 64 diff: +8 14% l 6 cached: 256 16384; id 8 -// ... -// c15 => s: 120 diff: +8 07% l 6 cached: 256 30720; id 15 +// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8 +// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9 +// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10 +// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11 +// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12 +// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13 +// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14 +// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15 // -// c16 => s: 128 diff: +8 06% l 7 cached: 256 32768; id 16 -// c17 => s: 144 diff: +16 12% l 7 cached: 227 32688; id 17 -// ... -// c23 => s: 240 diff: +16 07% l 7 cached: 136 32640; id 23 +// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16 +// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17 +// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18 +// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19 // -// c24 => s: 256 diff: +16 06% l 8 cached: 128 32768; id 24 -// c25 => s: 288 diff: +32 12% l 8 cached: 113 32544; id 25 -// ... -// c31 => s: 480 diff: +32 07% l 8 cached: 68 32640; id 31 +// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20 +// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21 +// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22 +// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23 // -// c32 => s: 512 diff: +32 06% l 9 cached: 64 32768; id 32 - +// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24 +// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25 +// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26 +// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27 +// +// ... +// +// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48 +// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49 +// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50 +// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51 +// +// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52 -template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog, - uptr kMinBatchClassT> +template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog> class SizeClassMap { - static const uptr kMinSizeLog = 3; + static const uptr kMinSizeLog = 4; static const uptr kMidSizeLog = kMinSizeLog + 4; static const uptr kMinSize = 1 << kMinSizeLog; static const uptr kMidSize = 1 << kMidSizeLog; static const uptr kMidClass = kMidSize / kMinSize; - static const uptr S = 3; + static const uptr S = 2; static const uptr M = (1 << S) - 1; public: static const uptr kMaxNumCached = kMaxNumCachedT; + // We transfer chunks between central and thread-local free lists in batches. + // For small size classes we allocate batches separately. + // For large size classes we use one of the chunks to store the batch. struct TransferBatch { TransferBatch *next; uptr count; void *batch[kMaxNumCached]; }; - static const uptr kMinBatchClass = kMinBatchClassT; - static const uptr kMaxSize = 1 << kMaxSizeLog; + static const uptr kMaxSize = 1UL << kMaxSizeLog; static const uptr kNumClasses = kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1; COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256); @@ -106,7 +126,7 @@ class SizeClassMap { if (size <= kMidSize) return (size + kMinSize - 1) >> kMinSizeLog; if (size > kMaxSize) return 0; - uptr l = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size); + uptr l = MostSignificantSetBitIndex(size); uptr hbits = (size >> (l - S)) & M; uptr lbits = size & ((1 << (l - S)) - 1); uptr l1 = l - kMidSizeLog; @@ -116,7 +136,7 @@ class SizeClassMap { static uptr MaxCached(uptr class_id) { if (class_id == 0) return 0; uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id); - return Max(1UL, Min(kMaxNumCached, n)); + return Max<uptr>(1, Min(kMaxNumCached, n)); } static void Print() { @@ -128,7 +148,7 @@ class SizeClassMap { Printf("\n"); uptr d = s - prev_s; uptr p = prev_s ? (d * 100 / prev_s) : 0; - uptr l = SANITIZER_WORDSIZE - 1 - __builtin_clzl(s); + uptr l = s ? MostSignificantSetBitIndex(s) : 0; uptr cached = MaxCached(i) * s; Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd " "cached: %zd %zd; id %zd\n", @@ -139,10 +159,16 @@ class SizeClassMap { Printf("Total cached: %zd\n", total_cached); } + static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) { + return Size(class_id) < sizeof(TransferBatch) - + sizeof(uptr) * (kMaxNumCached - MaxCached(class_id)); + } + static void Validate() { for (uptr c = 1; c < kNumClasses; c++) { // Printf("Validate: c%zd\n", c); uptr s = Size(c); + CHECK_NE(s, 0U); CHECK_EQ(ClassID(s), c); if (c != kNumClasses - 1) CHECK_EQ(ClassID(s + 1), c + 1); @@ -160,26 +186,93 @@ class SizeClassMap { if (c > 0) CHECK_LT(Size(c-1), s); } - - // TransferBatch for kMinBatchClass must fit into the block itself. - const uptr batch_size = sizeof(TransferBatch) - - sizeof(void*) // NOLINT - * (kMaxNumCached - MaxCached(kMinBatchClass)); - CHECK_LE(batch_size, Size(kMinBatchClass)); - // TransferBatch for kMinBatchClass-1 must not fit into the block itself. - const uptr batch_size1 = sizeof(TransferBatch) - - sizeof(void*) // NOLINT - * (kMaxNumCached - MaxCached(kMinBatchClass - 1)); - CHECK_GT(batch_size1, Size(kMinBatchClass - 1)); } }; -typedef SizeClassMap<17, 256, 16, FIRST_32_SECOND_64(33, 36)> - DefaultSizeClassMap; -typedef SizeClassMap<17, 64, 14, FIRST_32_SECOND_64(25, 28)> - CompactSizeClassMap; +typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap; +typedef SizeClassMap<17, 64, 14> CompactSizeClassMap; template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache; +// Memory allocator statistics +enum AllocatorStat { + AllocatorStatMalloced, + AllocatorStatFreed, + AllocatorStatMmapped, + AllocatorStatUnmapped, + AllocatorStatCount +}; + +typedef u64 AllocatorStatCounters[AllocatorStatCount]; + +// Per-thread stats, live in per-thread cache. +class AllocatorStats { + public: + void Init() { + internal_memset(this, 0, sizeof(*this)); + } + + void Add(AllocatorStat i, u64 v) { + v += atomic_load(&stats_[i], memory_order_relaxed); + atomic_store(&stats_[i], v, memory_order_relaxed); + } + + void Set(AllocatorStat i, u64 v) { + atomic_store(&stats_[i], v, memory_order_relaxed); + } + + u64 Get(AllocatorStat i) const { + return atomic_load(&stats_[i], memory_order_relaxed); + } + + private: + friend class AllocatorGlobalStats; + AllocatorStats *next_; + AllocatorStats *prev_; + atomic_uint64_t stats_[AllocatorStatCount]; +}; + +// Global stats, used for aggregation and querying. +class AllocatorGlobalStats : public AllocatorStats { + public: + void Init() { + internal_memset(this, 0, sizeof(*this)); + next_ = this; + prev_ = this; + } + + void Register(AllocatorStats *s) { + SpinMutexLock l(&mu_); + s->next_ = next_; + s->prev_ = this; + next_->prev_ = s; + next_ = s; + } + + void Unregister(AllocatorStats *s) { + SpinMutexLock l(&mu_); + s->prev_->next_ = s->next_; + s->next_->prev_ = s->prev_; + for (int i = 0; i < AllocatorStatCount; i++) + Add(AllocatorStat(i), s->Get(AllocatorStat(i))); + } + + void Get(AllocatorStatCounters s) const { + internal_memset(s, 0, AllocatorStatCount * sizeof(u64)); + SpinMutexLock l(&mu_); + const AllocatorStats *stats = this; + for (;;) { + for (int i = 0; i < AllocatorStatCount; i++) + s[i] += stats->Get(AllocatorStat(i)); + stats = stats->next_; + if (stats == this) + break; + } + } + + private: + mutable SpinMutex mu_; +}; + // Allocators call these callbacks on mmap/munmap. struct NoOpMapUnmapCallback { void OnMap(uptr p, uptr size) const { } @@ -233,18 +326,20 @@ class SizeClassAllocator64 { alignment <= SizeClassMap::kMaxSize; } - Batch *NOINLINE AllocateBatch(AllocatorCache *c, uptr class_id) { + NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c, + uptr class_id) { CHECK_LT(class_id, kNumClasses); RegionInfo *region = GetRegionInfo(class_id); Batch *b = region->free_list.Pop(); if (b == 0) - b = PopulateFreeList(c, class_id, region); + b = PopulateFreeList(stat, c, class_id, region); region->n_allocated += b->count; return b; } - void NOINLINE DeallocateBatch(uptr class_id, Batch *b) { + NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) { RegionInfo *region = GetRegionInfo(class_id); + CHECK_GT(b->count, 0); region->free_list.Push(b); region->n_freed += b->count; } @@ -260,10 +355,12 @@ class SizeClassAllocator64 { void *GetBlockBegin(void *p) { uptr class_id = GetSizeClass(p); uptr size = SizeClassMap::Size(class_id); + if (!size) return 0; uptr chunk_idx = GetChunkIdx((uptr)p, size); uptr reg_beg = (uptr)p & ~(kRegionSize - 1); uptr beg = chunk_idx * size; uptr next_beg = beg + size; + if (class_id >= kNumClasses) return 0; RegionInfo *region = GetRegionInfo(class_id); if (region->mapped_user >= next_beg) return reinterpret_cast<void*>(reg_beg + beg); @@ -322,6 +419,38 @@ class SizeClassAllocator64 { } } + // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone + // introspection API. + void ForceLock() { + for (uptr i = 0; i < kNumClasses; i++) { + GetRegionInfo(i)->mutex.Lock(); + } + } + + void ForceUnlock() { + for (int i = (int)kNumClasses - 1; i >= 0; i--) { + GetRegionInfo(i)->mutex.Unlock(); + } + } + + // Iterate over existing chunks. May include chunks that are not currently + // allocated to the user (e.g. freed). + // The caller is expected to call ForceLock() before calling this function. + template<typename Callable> + void ForEachChunk(const Callable &callback) { + for (uptr class_id = 1; class_id < kNumClasses; class_id++) { + RegionInfo *region = GetRegionInfo(class_id); + uptr chunk_size = SizeClassMap::Size(class_id); + uptr region_beg = kSpaceBeg + class_id * kRegionSize; + for (uptr p = region_beg; + p < region_beg + region->allocated_user; + p += chunk_size) { + // Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p)); + callback((void *)p); + } + } + } + typedef SizeClassMap SizeClassMapT; static const uptr kNumClasses = SizeClassMap::kNumClasses; static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded; @@ -336,7 +465,7 @@ class SizeClassAllocator64 { // or with one element if its size is greater. static const uptr kPopulateSize = 1 << 14; // Call mmap for user memory with at least this size. - static const uptr kUserMapSize = 1 << 15; + static const uptr kUserMapSize = 1 << 16; // Call mmap for metadata memory with at least this size. static const uptr kMetaMapSize = 1 << 16; @@ -363,15 +492,16 @@ class SizeClassAllocator64 { } static uptr GetChunkIdx(uptr chunk, uptr size) { - u32 offset = chunk % kRegionSize; + uptr offset = chunk % kRegionSize; // Here we divide by a non-constant. This is costly. - // We require that kRegionSize is at least 2^32 so that offset is 32-bit. - // We save 2x by using 32-bit div, but may need to use a 256-way switch. - return offset / (u32)size; + // size always fits into 32-bits. If the offset fits too, use 32-bit div. + if (offset >> (SANITIZER_WORDSIZE / 2)) + return offset / size; + return (u32)offset / (u32)size; } - Batch *NOINLINE PopulateFreeList(AllocatorCache *c, uptr class_id, - RegionInfo *region) { + NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, + uptr class_id, RegionInfo *region) { BlockingMutexLock l(®ion->mutex); Batch *b = region->free_list.Pop(); if (b) @@ -388,6 +518,7 @@ class SizeClassAllocator64 { map_size += kUserMapSize; CHECK_GE(region->mapped_user + map_size, end_idx); MapWithCallback(region_beg + region->mapped_user, map_size); + stat->Add(AllocatorStatMmapped, map_size); region->mapped_user += map_size; } uptr total_count = (region->mapped_user - beg_idx - size) @@ -404,14 +535,14 @@ class SizeClassAllocator64 { region->mapped_meta += map_size; } CHECK_LE(region->allocated_meta, region->mapped_meta); - if (region->allocated_user + region->allocated_meta > kRegionSize) { - Printf("Out of memory. Dying.\n"); + if (region->mapped_user + region->mapped_meta > kRegionSize) { + Printf("%s: Out of memory. Dying. ", SanitizerToolName); Printf("The process has exhausted %zuMB for size class %zu.\n", kRegionSize / 1024 / 1024, size); Die(); } for (;;) { - if (class_id < SizeClassMap::kMinBatchClass) + if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id)) b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch))); else b = (Batch*)(region_beg + beg_idx); @@ -423,12 +554,37 @@ class SizeClassAllocator64 { beg_idx += count * size; if (beg_idx + count * size + size > region->mapped_user) break; + CHECK_GT(b->count, 0); region->free_list.Push(b); } return b; } }; +// Maps integers in rage [0, kSize) to u8 values. +template<u64 kSize> +class FlatByteMap { + public: + void TestOnlyInit() { + internal_memset(map_, 0, sizeof(map_)); + } + + void set(uptr idx, u8 val) { + CHECK_LT(idx, kSize); + CHECK_EQ(0U, map_[idx]); + map_[idx] = val; + } + u8 operator[] (uptr idx) { + CHECK_LT(idx, kSize); + // FIXME: CHECK may be too expensive here. + return map_[idx]; + } + private: + u8 map_[kSize]; +}; + +// FIXME: Also implement TwoLevelByteMap. + // SizeClassAllocator32 -- allocator for 32-bit address space. // This allocator can theoretically be used on 64-bit arch, but there it is less // efficient than SizeClassAllocator64. @@ -440,7 +596,7 @@ class SizeClassAllocator64 { // a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize). // Since the regions are aligned by kRegionSize, there are exactly // kNumPossibleRegions possible regions in the address space and so we keep -// an u8 array possible_regions[kNumPossibleRegions] to store the size classes. +// a ByteMap possible_regions to store the size classes of each Region. // 0 size class means the region is not used by the allocator. // // One Region is used to allocate chunks of a single size class. @@ -451,16 +607,19 @@ class SizeClassAllocator64 { // chache-line aligned. template <const uptr kSpaceBeg, const u64 kSpaceSize, const uptr kMetadataSize, class SizeClassMap, + const uptr kRegionSizeLog, + class ByteMap, class MapUnmapCallback = NoOpMapUnmapCallback> class SizeClassAllocator32 { public: typedef typename SizeClassMap::TransferBatch Batch; typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize, - SizeClassMap, MapUnmapCallback> ThisT; + SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT; typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache; void Init() { - state_ = reinterpret_cast<State *>(MapWithCallback(sizeof(State))); + possible_regions.TestOnlyInit(); + internal_memset(size_class_info_array, 0, sizeof(size_class_info_array)); } void *MapWithCallback(uptr size) { @@ -469,6 +628,7 @@ class SizeClassAllocator32 { MapUnmapCallback().OnMap((uptr)res, size); return res; } + void UnmapWithCallback(uptr beg, uptr size) { MapUnmapCallback().OnUnmap(beg, size); UnmapOrDie(reinterpret_cast<void *>(beg), size); @@ -490,22 +650,24 @@ class SizeClassAllocator32 { return reinterpret_cast<void*>(meta); } - Batch *NOINLINE AllocateBatch(AllocatorCache *c, uptr class_id) { + NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c, + uptr class_id) { CHECK_LT(class_id, kNumClasses); SizeClassInfo *sci = GetSizeClassInfo(class_id); SpinMutexLock l(&sci->mutex); if (sci->free_list.empty()) - PopulateFreeList(c, sci, class_id); + PopulateFreeList(stat, c, sci, class_id); CHECK(!sci->free_list.empty()); Batch *b = sci->free_list.front(); sci->free_list.pop_front(); return b; } - void NOINLINE DeallocateBatch(uptr class_id, Batch *b) { + NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) { CHECK_LT(class_id, kNumClasses); SizeClassInfo *sci = GetSizeClassInfo(class_id); SpinMutexLock l(&sci->mutex); + CHECK_GT(b->count, 0); sci->free_list.push_front(b); } @@ -514,7 +676,7 @@ class SizeClassAllocator32 { } uptr GetSizeClass(void *p) { - return state_->possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))]; + return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))]; } void *GetBlockBegin(void *p) { @@ -539,16 +701,48 @@ class SizeClassAllocator32 { // No need to lock here. uptr res = 0; for (uptr i = 0; i < kNumPossibleRegions; i++) - if (state_->possible_regions[i]) + if (possible_regions[i]) res += kRegionSize; return res; } void TestOnlyUnmap() { for (uptr i = 0; i < kNumPossibleRegions; i++) - if (state_->possible_regions[i]) + if (possible_regions[i]) UnmapWithCallback((i * kRegionSize), kRegionSize); - UnmapWithCallback(reinterpret_cast<uptr>(state_), sizeof(State)); + } + + // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone + // introspection API. + void ForceLock() { + for (uptr i = 0; i < kNumClasses; i++) { + GetSizeClassInfo(i)->mutex.Lock(); + } + } + + void ForceUnlock() { + for (int i = kNumClasses - 1; i >= 0; i--) { + GetSizeClassInfo(i)->mutex.Unlock(); + } + } + + // Iterate over existing chunks. May include chunks that are not currently + // allocated to the user (e.g. freed). + // The caller is expected to call ForceLock() before calling this function. + template<typename Callable> + void ForEachChunk(const Callable &callback) { + for (uptr region = 0; region < kNumPossibleRegions; region++) + if (possible_regions[region]) { + uptr chunk_size = SizeClassMap::Size(possible_regions[region]); + uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize); + uptr region_beg = region * kRegionSize; + for (uptr p = region_beg; + p < region_beg + max_chunks_in_region * chunk_size; + p += chunk_size) { + // Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p)); + callback((void *)p); + } + } } void PrintStats() { @@ -558,7 +752,6 @@ class SizeClassAllocator32 { static const uptr kNumClasses = SizeClassMap::kNumClasses; private: - static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20; static const uptr kRegionSize = 1 << kRegionSizeLog; static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize; @@ -579,31 +772,32 @@ class SizeClassAllocator32 { return mem & ~(kRegionSize - 1); } - uptr AllocateRegion(uptr class_id) { + uptr AllocateRegion(AllocatorStats *stat, uptr class_id) { CHECK_LT(class_id, kNumClasses); uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize, "SizeClassAllocator32")); MapUnmapCallback().OnMap(res, kRegionSize); + stat->Add(AllocatorStatMmapped, kRegionSize); CHECK_EQ(0U, (res & (kRegionSize - 1))); - CHECK_EQ(0U, state_->possible_regions[ComputeRegionId(res)]); - state_->possible_regions[ComputeRegionId(res)] = class_id; + possible_regions.set(ComputeRegionId(res), class_id); return res; } SizeClassInfo *GetSizeClassInfo(uptr class_id) { CHECK_LT(class_id, kNumClasses); - return &state_->size_class_info_array[class_id]; + return &size_class_info_array[class_id]; } - void PopulateFreeList(AllocatorCache *c, SizeClassInfo *sci, uptr class_id) { + void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, + SizeClassInfo *sci, uptr class_id) { uptr size = SizeClassMap::Size(class_id); - uptr reg = AllocateRegion(class_id); + uptr reg = AllocateRegion(stat, class_id); uptr n_chunks = kRegionSize / (size + kMetadataSize); uptr max_count = SizeClassMap::MaxCached(class_id); Batch *b = 0; for (uptr i = reg; i < reg + n_chunks * size; i += size) { if (b == 0) { - if (class_id < SizeClassMap::kMinBatchClass) + if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id)) b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch))); else b = (Batch*)i; @@ -611,19 +805,19 @@ class SizeClassAllocator32 { } b->batch[b->count++] = (void*)i; if (b->count == max_count) { + CHECK_GT(b->count, 0); sci->free_list.push_back(b); b = 0; } } - if (b) + if (b) { + CHECK_GT(b->count, 0); sci->free_list.push_back(b); + } } - struct State { - u8 possible_regions[kNumPossibleRegions]; - SizeClassInfo size_class_info_array[kNumClasses]; - }; - State *state_; + ByteMap possible_regions; + SizeClassInfo size_class_info_array[kNumClasses]; }; // Objects of this type should be used as local caches for SizeClassAllocator64 @@ -634,14 +828,22 @@ struct SizeClassAllocatorLocalCache { typedef SizeClassAllocator Allocator; static const uptr kNumClasses = SizeClassAllocator::kNumClasses; - // Don't need to call Init if the object is a global (i.e. zero-initialized). - void Init() { - internal_memset(this, 0, sizeof(*this)); + void Init(AllocatorGlobalStats *s) { + stats_.Init(); + if (s) + s->Register(&stats_); + } + + void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) { + Drain(allocator); + if (s) + s->Unregister(&stats_); } void *Allocate(SizeClassAllocator *allocator, uptr class_id) { CHECK_NE(class_id, 0UL); CHECK_LT(class_id, kNumClasses); + stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id)); PerClass *c = &per_class_[class_id]; if (UNLIKELY(c->count == 0)) Refill(allocator, class_id); @@ -653,7 +855,12 @@ struct SizeClassAllocatorLocalCache { void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) { CHECK_NE(class_id, 0UL); CHECK_LT(class_id, kNumClasses); + // If the first allocator call on a new thread is a deallocation, then + // max_count will be zero, leading to check failure. + InitCache(); + stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id)); PerClass *c = &per_class_[class_id]; + CHECK_NE(c->max_count, 0UL); if (UNLIKELY(c->count == c->max_count)) Drain(allocator, class_id); c->batch[c->count++] = p; @@ -676,9 +883,10 @@ struct SizeClassAllocatorLocalCache { void *batch[2 * SizeClassMap::kMaxNumCached]; }; PerClass per_class_[kNumClasses]; + AllocatorStats stats_; void InitCache() { - if (per_class_[0].max_count) + if (per_class_[1].max_count) return; for (uptr i = 0; i < kNumClasses; i++) { PerClass *c = &per_class_[i]; @@ -686,22 +894,23 @@ struct SizeClassAllocatorLocalCache { } } - void NOINLINE Refill(SizeClassAllocator *allocator, uptr class_id) { + NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) { InitCache(); PerClass *c = &per_class_[class_id]; - Batch *b = allocator->AllocateBatch(this, class_id); + Batch *b = allocator->AllocateBatch(&stats_, this, class_id); + CHECK_GT(b->count, 0); for (uptr i = 0; i < b->count; i++) c->batch[i] = b->batch[i]; c->count = b->count; - if (class_id < SizeClassMap::kMinBatchClass) + if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id)) Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b); } - void NOINLINE Drain(SizeClassAllocator *allocator, uptr class_id) { + NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) { InitCache(); PerClass *c = &per_class_[class_id]; Batch *b; - if (class_id < SizeClassMap::kMinBatchClass) + if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id)) b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch))); else b = (Batch*)c->batch[0]; @@ -712,7 +921,8 @@ struct SizeClassAllocatorLocalCache { } b->count = cnt; c->count -= cnt; - allocator->DeallocateBatch(class_id, b); + CHECK_GT(b->count, 0); + allocator->DeallocateBatch(&stats_, class_id, b); } }; @@ -727,7 +937,7 @@ class LargeMmapAllocator { page_size_ = GetPageSizeCached(); } - void *Allocate(uptr size, uptr alignment) { + void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) { CHECK(IsPowerOfTwo(alignment)); uptr map_size = RoundUpMapSize(size); if (alignment > page_size_) @@ -746,7 +956,7 @@ class LargeMmapAllocator { h->size = size; h->map_beg = map_beg; h->map_size = map_size; - uptr size_log = SANITIZER_WORDSIZE - __builtin_clzl(map_size) - 1; + uptr size_log = MostSignificantSetBitIndex(map_size); CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log)); { SpinMutexLock l(&mutex_); @@ -758,11 +968,13 @@ class LargeMmapAllocator { stats.currently_allocated += map_size; stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated); stats.by_size_log[size_log]++; + stat->Add(AllocatorStatMalloced, map_size); + stat->Add(AllocatorStatMmapped, map_size); } return reinterpret_cast<void*>(res); } - void Deallocate(void *p) { + void Deallocate(AllocatorStats *stat, void *p) { Header *h = GetHeader(p); { SpinMutexLock l(&mutex_); @@ -774,6 +986,8 @@ class LargeMmapAllocator { n_chunks_--; stats.n_frees++; stats.currently_allocated -= h->map_size; + stat->Add(AllocatorStatFreed, h->map_size); + stat->Add(AllocatorStatUnmapped, h->map_size); } MapUnmapCallback().OnUnmap(h->map_beg, h->map_size); UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size); @@ -822,7 +1036,7 @@ class LargeMmapAllocator { CHECK_GE(nearest_chunk, h->map_beg); CHECK_LT(nearest_chunk, h->map_beg + h->map_size); CHECK_LE(nearest_chunk, p); - if (h->map_beg + h->map_size < p) + if (h->map_beg + h->map_size <= p) return 0; return GetUser(h); } @@ -840,6 +1054,25 @@ class LargeMmapAllocator { Printf("\n"); } + // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone + // introspection API. + void ForceLock() { + mutex_.Lock(); + } + + void ForceUnlock() { + mutex_.Unlock(); + } + + // Iterate over existing chunks. May include chunks that are not currently + // allocated to the user (e.g. freed). + // The caller is expected to call ForceLock() before calling this function. + template<typename Callable> + void ForEachChunk(const Callable &callback) { + for (uptr i = 0; i < n_chunks_; i++) + callback(GetUser(chunks_[i])); + } + private: static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18); struct Header { @@ -886,6 +1119,7 @@ class CombinedAllocator { void Init() { primary_.Init(); secondary_.Init(); + stats_.Init(); } void *Allocate(AllocatorCache *cache, uptr size, uptr alignment, @@ -901,7 +1135,7 @@ class CombinedAllocator { if (primary_.CanAllocate(size, alignment)) res = cache->Allocate(&primary_, primary_.ClassID(size)); else - res = secondary_.Allocate(size, alignment); + res = secondary_.Allocate(&stats_, size, alignment); if (alignment > 8) CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0); if (cleared && res) @@ -914,7 +1148,7 @@ class CombinedAllocator { if (primary_.PointerIsMine(p)) cache->Deallocate(&primary_, primary_.GetSizeClass(p), p); else - secondary_.Deallocate(p); + secondary_.Deallocate(&stats_, p); } void *Reallocate(AllocatorCache *cache, void *p, uptr new_size, @@ -969,20 +1203,57 @@ class CombinedAllocator { void TestOnlyUnmap() { primary_.TestOnlyUnmap(); } + void InitCache(AllocatorCache *cache) { + cache->Init(&stats_); + } + + void DestroyCache(AllocatorCache *cache) { + cache->Destroy(&primary_, &stats_); + } + void SwallowCache(AllocatorCache *cache) { cache->Drain(&primary_); } + void GetStats(AllocatorStatCounters s) const { + stats_.Get(s); + } + void PrintStats() { primary_.PrintStats(); secondary_.PrintStats(); } + // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone + // introspection API. + void ForceLock() { + primary_.ForceLock(); + secondary_.ForceLock(); + } + + void ForceUnlock() { + secondary_.ForceUnlock(); + primary_.ForceUnlock(); + } + + // Iterate over existing chunks. May include chunks that are not currently + // allocated to the user (e.g. freed). + // The caller is expected to call ForceLock() before calling this function. + template<typename Callable> + void ForEachChunk(const Callable &callback) { + primary_.ForEachChunk(callback); + secondary_.ForEachChunk(callback); + } + private: PrimaryAllocator primary_; SecondaryAllocator secondary_; + AllocatorGlobalStats stats_; }; +// Returns true if calloc(size, n) should return 0 due to overflow in size*n. +bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n); + } // namespace __sanitizer #endif // SANITIZER_ALLOCATOR_H diff --git a/lib/sanitizer_common/sanitizer_atomic_clang.h b/lib/sanitizer_common/sanitizer_atomic_clang.h index 7f73df3bd455..30158b49683c 100644 --- a/lib/sanitizer_common/sanitizer_atomic_clang.h +++ b/lib/sanitizer_common/sanitizer_atomic_clang.h @@ -113,9 +113,9 @@ INLINE bool atomic_compare_exchange_strong(volatile T *a, template<typename T> INLINE bool atomic_compare_exchange_weak(volatile T *a, - typename T::Type *cmp, - typename T::Type xchg, - memory_order mo) { + typename T::Type *cmp, + typename T::Type xchg, + memory_order mo) { return atomic_compare_exchange_strong(a, cmp, xchg, mo); } diff --git a/lib/sanitizer_common/sanitizer_atomic_msvc.h b/lib/sanitizer_common/sanitizer_atomic_msvc.h index 58a6a20ec9c5..dc22ef05e589 100644 --- a/lib/sanitizer_common/sanitizer_atomic_msvc.h +++ b/lib/sanitizer_common/sanitizer_atomic_msvc.h @@ -134,6 +134,27 @@ INLINE u16 atomic_exchange(volatile atomic_uint16_t *a, return v; } +INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a, + u8 *cmp, + u8 xchgv, + memory_order mo) { + (void)mo; + DCHECK(!((uptr)a % sizeof(*a))); + u8 cmpv = *cmp; + u8 prev; + __asm { + mov al, cmpv + mov ecx, a + mov dl, xchgv + lock cmpxchg [ecx], dl + mov prev, al + } + if (prev == cmpv) + return true; + *cmp = prev; + return false; +} + INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a, uptr *cmp, uptr xchg, @@ -149,9 +170,9 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a, template<typename T> INLINE bool atomic_compare_exchange_weak(volatile T *a, - typename T::Type *cmp, - typename T::Type xchg, - memory_order mo) { + typename T::Type *cmp, + typename T::Type xchg, + memory_order mo) { return atomic_compare_exchange_strong(a, cmp, xchg, mo); } diff --git a/lib/sanitizer_common/sanitizer_common.cc b/lib/sanitizer_common/sanitizer_common.cc index 4a8d9a749bf8..abbe5f92d1a9 100644 --- a/lib/sanitizer_common/sanitizer_common.cc +++ b/lib/sanitizer_common/sanitizer_common.cc @@ -16,6 +16,9 @@ namespace __sanitizer { +const char *SanitizerToolName = "SanitizerTool"; +uptr SanitizerVerbosity = 0; + uptr GetPageSizeCached() { static uptr PageSize; if (!PageSize) @@ -28,11 +31,11 @@ static bool log_to_file = false; // Set to true by __sanitizer_set_report_path // By default, dump to stderr. If |log_to_file| is true and |report_fd_pid| // isn't equal to the current PID, try to obtain file descriptor by opening // file "report_path_prefix.<PID>". -static fd_t report_fd = kStderrFd; +fd_t report_fd = kStderrFd; static char report_path_prefix[4096]; // Set via __sanitizer_set_report_path. // PID of process that opened |report_fd|. If a fork() occurs, the PID of the // child thread will be different from |report_fd_pid|. -static int report_fd_pid = 0; +static uptr report_fd_pid = 0; static void (*DieCallback)(void); void SetDieCallback(void (*callback)(void)) { @@ -43,7 +46,7 @@ void NORETURN Die() { if (DieCallback) { DieCallback(); } - Exit(1); + internal__exit(1); } static CheckFailedCallbackType CheckFailedCallback; @@ -61,29 +64,24 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond, Die(); } -static void MaybeOpenReportFile() { - if (!log_to_file || (report_fd_pid == GetPid())) return; - char report_path_full[4096]; - internal_snprintf(report_path_full, sizeof(report_path_full), - "%s.%d", report_path_prefix, GetPid()); - fd_t fd = internal_open(report_path_full, true); - if (fd == kInvalidFd) { +void MaybeOpenReportFile() { + if (!log_to_file || (report_fd_pid == internal_getpid())) return; + InternalScopedBuffer<char> report_path_full(4096); + internal_snprintf(report_path_full.data(), report_path_full.size(), + "%s.%d", report_path_prefix, internal_getpid()); + uptr openrv = OpenFile(report_path_full.data(), true); + if (internal_iserror(openrv)) { report_fd = kStderrFd; log_to_file = false; - Report("ERROR: Can't open file: %s\n", report_path_full); + Report("ERROR: Can't open file: %s\n", report_path_full.data()); Die(); } if (report_fd != kInvalidFd) { // We're in the child. Close the parent's log. internal_close(report_fd); } - report_fd = fd; - report_fd_pid = GetPid(); -} - -bool PrintsToTty() { - MaybeOpenReportFile(); - return internal_isatty(report_fd); + report_fd = openrv; + report_fd_pid = internal_getpid(); } void RawWrite(const char *buffer) { @@ -105,8 +103,9 @@ uptr ReadFileToBuffer(const char *file_name, char **buff, *buff_size = 0; // The files we usually open are not seekable, so try different buffer sizes. for (uptr size = kMinFileLen; size <= max_len; size *= 2) { - fd_t fd = internal_open(file_name, /*write*/ false); - if (fd == kInvalidFd) return 0; + uptr openrv = OpenFile(file_name, /*write*/ false); + if (internal_iserror(openrv)) return 0; + fd_t fd = openrv; UnmapOrDie(*buff, *buff_size); *buff = (char*)MmapOrDie(size, __FUNCTION__); *buff_size = size; @@ -128,45 +127,15 @@ uptr ReadFileToBuffer(const char *file_name, char **buff, return read_len; } -// We don't want to use std::sort to avoid including <algorithm>, as -// we may end up with two implementation of std::sort - one in instrumented -// code, and the other in runtime. -// qsort() from stdlib won't work as it calls malloc(), which results -// in deadlock in ASan allocator. -// We re-implement in-place sorting w/o recursion as straightforward heapsort. +typedef bool UptrComparisonFunction(const uptr &a, const uptr &b); + +template<class T> +static inline bool CompareLess(const T &a, const T &b) { + return a < b; +} + void SortArray(uptr *array, uptr size) { - if (size < 2) - return; - // Stage 1: insert elements to the heap. - for (uptr i = 1; i < size; i++) { - uptr j, p; - for (j = i; j > 0; j = p) { - p = (j - 1) / 2; - if (array[j] > array[p]) - Swap(array[j], array[p]); - else - break; - } - } - // Stage 2: swap largest element with the last one, - // and sink the new top. - for (uptr i = size - 1; i > 0; i--) { - Swap(array[0], array[i]); - uptr j, max_ind; - for (j = 0; j < i; j = max_ind) { - uptr left = 2 * j + 1; - uptr right = 2 * j + 2; - max_ind = j; - if (left < i && array[left] > array[max_ind]) - max_ind = left; - if (right < i && array[right] > array[max_ind]) - max_ind = right; - if (max_ind != j) - Swap(array[j], array[max_ind]); - else - break; - } - } + InternalSort<uptr*, UptrComparisonFunction>(&array, size, CompareLess); } // We want to map a chunk of address space aligned to 'alignment'. @@ -190,6 +159,16 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { return (void*)res; } +void ReportErrorSummary(const char *error_type, const char *file, + int line, const char *function) { + const int kMaxSize = 1024; // We don't want a summary too long. + InternalScopedBuffer<char> buff(kMaxSize); + internal_snprintf(buff.data(), kMaxSize, "%s: %s %s:%d %s", + SanitizerToolName, error_type, + file ? file : "??", line, function ? function : "??"); + __sanitizer_report_error_summary(buff.data()); +} + } // namespace __sanitizer using namespace __sanitizer; // NOLINT @@ -222,4 +201,8 @@ void NOINLINE __sanitizer_sandbox_on_notify(void *reserved) { (void)reserved; PrepareForSandboxing(); } + +void __sanitizer_report_error_summary(const char *error_summary) { + Printf("SUMMARY: %s\n", error_summary); +} } // extern "C" diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h index 1d002398c785..d800360169fb 100644 --- a/lib/sanitizer_common/sanitizer_common.h +++ b/lib/sanitizer_common/sanitizer_common.h @@ -17,8 +17,11 @@ #define SANITIZER_COMMON_H #include "sanitizer_internal_defs.h" +#include "sanitizer_libc.h" +#include "sanitizer_mutex.h" namespace __sanitizer { +struct StackTrace; // Constants. const uptr kWordSize = SANITIZER_WORDSIZE / 8; @@ -30,15 +33,19 @@ const uptr kCacheLineSize = 128; const uptr kCacheLineSize = 64; #endif +extern const char *SanitizerToolName; // Can be changed by the tool. +extern uptr SanitizerVerbosity; + uptr GetPageSize(); uptr GetPageSizeCached(); uptr GetMmapGranularity(); // Threads -int GetPid(); uptr GetTid(); uptr GetThreadSelf(); void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, uptr *stack_bottom); +void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, + uptr *tls_addr, uptr *tls_size); // Memory management void *MmapOrDie(uptr size, const char *mem_type); @@ -104,7 +111,12 @@ bool PrintsToTty(); void Printf(const char *format, ...); void Report(const char *format, ...); void SetPrintfAndReportCallback(void (*callback)(const char *)); +// Can be used to prevent mixing error reports from different sanitizers. +extern StaticSpinMutex CommonSanitizerReportMutex; +void MaybeOpenReportFile(); +extern fd_t report_fd; +uptr OpenFile(const char *filename, bool write); // Opens the file 'file_name" and reads up to 'max_len' bytes. // The resulting buffer is mmaped and stored in '*buff'. // The size of the mmaped region is stored in '*buff_size', @@ -121,21 +133,26 @@ void DisableCoreDumper(); void DumpProcessMap(); bool FileExists(const char *filename); const char *GetEnv(const char *name); +bool SetEnv(const char *name, const char *value); const char *GetPwd(); +u32 GetUid(); void ReExec(); bool StackSizeIsUnlimited(); void SetStackSizeLimitInBytes(uptr limit); void PrepareForSandboxing(); +void InitTlsSize(); +uptr GetTlsSize(); + // Other void SleepForSeconds(int seconds); void SleepForMillis(int millis); +u64 NanoTime(); int Atexit(void (*function)(void)); void SortArray(uptr *array, uptr size); // Exit void NORETURN Abort(); -void NORETURN Exit(int exitcode); void NORETURN Die(); void NORETURN SANITIZER_INTERFACE_ATTRIBUTE CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); @@ -154,20 +171,79 @@ typedef void (*CheckFailedCallbackType)(const char *, int, const char *, u64, u64); void SetCheckFailedCallback(CheckFailedCallbackType callback); +// Construct a one-line string like +// SanitizerToolName: error_type file:line function +// and call __sanitizer_report_error_summary on it. +void ReportErrorSummary(const char *error_type, const char *file, + int line, const char *function); + // Math +#if SANITIZER_WINDOWS && !defined(__clang__) +extern "C" { +unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT +unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT +#if defined(_WIN64) +unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT +unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT +#endif +} +#endif + +INLINE uptr MostSignificantSetBitIndex(uptr x) { + CHECK_NE(x, 0U); + unsigned long up; // NOLINT +#if !SANITIZER_WINDOWS || defined(__clang__) + up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x); +#elif defined(_WIN64) + _BitScanReverse64(&up, x); +#else + _BitScanReverse(&up, x); +#endif + return up; +} + INLINE bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; } + +INLINE uptr RoundUpToPowerOfTwo(uptr size) { + CHECK(size); + if (IsPowerOfTwo(size)) return size; + + uptr up = MostSignificantSetBitIndex(size); + CHECK(size < (1ULL << (up + 1))); + CHECK(size > (1ULL << up)); + return 1UL << (up + 1); +} + INLINE uptr RoundUpTo(uptr size, uptr boundary) { CHECK(IsPowerOfTwo(boundary)); return (size + boundary - 1) & ~(boundary - 1); } + INLINE uptr RoundDownTo(uptr x, uptr boundary) { return x & ~(boundary - 1); } + INLINE bool IsAligned(uptr a, uptr alignment) { return (a & (alignment - 1)) == 0; } + +INLINE uptr Log2(uptr x) { + CHECK(IsPowerOfTwo(x)); +#if !SANITIZER_WINDOWS || defined(__clang__) + return __builtin_ctzl(x); +#elif defined(_WIN64) + unsigned long ret; // NOLINT + _BitScanForward64(&ret, x); + return ret; +#else + unsigned long ret; // NOLINT + _BitScanForward(&ret, x); + return ret; +#endif +} + // Don't use std::min, std::max or std::swap, to minimize dependency // on libstdc++. template<class T> T Min(T a, T b) { return a < b ? a : b; } @@ -196,6 +272,113 @@ INLINE int ToLower(int c) { # define FIRST_32_SECOND_64(a, b) (a) #endif +// A low-level vector based on mmap. May incur a significant memory overhead for +// small vectors. +// WARNING: The current implementation supports only POD types. +template<typename T> +class InternalVector { + public: + explicit InternalVector(uptr initial_capacity) { + CHECK_GT(initial_capacity, 0); + capacity_ = initial_capacity; + size_ = 0; + data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalVector"); + } + ~InternalVector() { + UnmapOrDie(data_, capacity_ * sizeof(T)); + } + T &operator[](uptr i) { + CHECK_LT(i, size_); + return data_[i]; + } + const T &operator[](uptr i) const { + CHECK_LT(i, size_); + return data_[i]; + } + void push_back(const T &element) { + CHECK_LE(size_, capacity_); + if (size_ == capacity_) { + uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1); + Resize(new_capacity); + } + data_[size_++] = element; + } + T &back() { + CHECK_GT(size_, 0); + return data_[size_ - 1]; + } + void pop_back() { + CHECK_GT(size_, 0); + size_--; + } + uptr size() const { + return size_; + } + const T *data() const { + return data_; + } + uptr capacity() const { + return capacity_; + } + + private: + void Resize(uptr new_capacity) { + CHECK_GT(new_capacity, 0); + CHECK_LE(size_, new_capacity); + T *new_data = (T *)MmapOrDie(new_capacity * sizeof(T), + "InternalVector"); + internal_memcpy(new_data, data_, size_ * sizeof(T)); + T *old_data = data_; + data_ = new_data; + UnmapOrDie(old_data, capacity_ * sizeof(T)); + capacity_ = new_capacity; + } + // Disallow evil constructors. + InternalVector(const InternalVector&); + void operator=(const InternalVector&); + + T *data_; + uptr capacity_; + uptr size_; +}; + +// HeapSort for arrays and InternalVector. +template<class Container, class Compare> +void InternalSort(Container *v, uptr size, Compare comp) { + if (size < 2) + return; + // Stage 1: insert elements to the heap. + for (uptr i = 1; i < size; i++) { + uptr j, p; + for (j = i; j > 0; j = p) { + p = (j - 1) / 2; + if (comp((*v)[p], (*v)[j])) + Swap((*v)[j], (*v)[p]); + else + break; + } + } + // Stage 2: swap largest element with the last one, + // and sink the new top. + for (uptr i = size - 1; i > 0; i--) { + Swap((*v)[0], (*v)[i]); + uptr j, max_ind; + for (j = 0; j < i; j = max_ind) { + uptr left = 2 * j + 1; + uptr right = 2 * j + 2; + max_ind = j; + if (left < i && comp((*v)[max_ind], (*v)[left])) + max_ind = left; + if (right < i && comp((*v)[max_ind], (*v)[right])) + max_ind = right; + if (max_ind != j) + Swap((*v)[j], (*v)[max_ind]); + else + break; + } + } +} + } // namespace __sanitizer #endif // SANITIZER_COMMON_H diff --git a/lib/sanitizer_common/sanitizer_common_interceptors.inc b/lib/sanitizer_common/sanitizer_common_interceptors.inc index 8bc2e8b5c292..8c0fb55f3ce9 100644 --- a/lib/sanitizer_common/sanitizer_common_interceptors.inc +++ b/lib/sanitizer_common/sanitizer_common_interceptors.inc @@ -24,9 +24,97 @@ #include <stdarg.h> +#if SANITIZER_WINDOWS +#define va_copy(dst, src) ((dst) = (src)) +#endif // _WIN32 + +#if SANITIZER_INTERCEPT_STRCASECMP +static inline int CharCaseCmp(unsigned char c1, unsigned char c2) { + int c1_low = ToLower(c1); + int c2_low = ToLower(c2); + return c1_low - c2_low; +} + +INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, strcasecmp, s1, s2); + unsigned char c1 = 0, c2 = 0; + uptr i; + for (i = 0; ; i++) { + c1 = (unsigned char)s1[i]; + c2 = (unsigned char)s2[i]; + if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') + break; + } + COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, i + 1); + COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, i + 1); + return CharCaseCmp(c1, c2); +} + +INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T n) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, n); + unsigned char c1 = 0, c2 = 0; + uptr i; + for (i = 0; i < n; i++) { + c1 = (unsigned char)s1[i]; + c2 = (unsigned char)s2[i]; + if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') + break; + } + COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, n)); + COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, n)); + return CharCaseCmp(c1, c2); +} + +#define INIT_STRCASECMP INTERCEPT_FUNCTION(strcasecmp) +#define INIT_STRNCASECMP INTERCEPT_FUNCTION(strncasecmp) +#else +#define INIT_STRCASECMP +#define INIT_STRNCASECMP +#endif + +#if SANITIZER_INTERCEPT_FREXP +INTERCEPTOR(double, frexp, double x, int *exp) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, frexp, x, exp); + double res = REAL(frexp)(x, exp); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp)); + return res; +} + +#define INIT_FREXP INTERCEPT_FUNCTION(frexp); +#else +#define INIT_FREXP +#endif // SANITIZER_INTERCEPT_FREXP + +#if SANITIZER_INTERCEPT_FREXPF_FREXPL +INTERCEPTOR(float, frexpf, float x, int *exp) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp); + float res = REAL(frexpf)(x, exp); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp)); + return res; +} + +INTERCEPTOR(long double, frexpl, long double x, int *exp) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp); + long double res = REAL(frexpl)(x, exp); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp)); + return res; +} + +#define INIT_FREXPF_FREXPL \ + INTERCEPT_FUNCTION(frexpf); \ + INTERCEPT_FUNCTION(frexpl) +#else +#define INIT_FREXPF_FREXPL +#endif // SANITIZER_INTERCEPT_FREXPF_FREXPL + #if SANITIZER_INTERCEPT_READ INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) { - void* ctx; + void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, read, fd, ptr, count); SSIZE_T res = REAL(read)(fd, ptr, count); if (res > 0) @@ -35,14 +123,14 @@ INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) { COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd); return res; } -# define INIT_READ INTERCEPT_FUNCTION(read) +#define INIT_READ INTERCEPT_FUNCTION(read) #else -# define INIT_READ +#define INIT_READ #endif #if SANITIZER_INTERCEPT_PREAD INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) { - void* ctx; + void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pread, fd, ptr, count, offset); SSIZE_T res = REAL(pread)(fd, ptr, count, offset); if (res > 0) @@ -51,14 +139,14 @@ INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) { COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd); return res; } -# define INIT_PREAD INTERCEPT_FUNCTION(pread) +#define INIT_PREAD INTERCEPT_FUNCTION(pread) #else -# define INIT_PREAD +#define INIT_PREAD #endif #if SANITIZER_INTERCEPT_PREAD64 INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) { - void* ctx; + void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pread64, fd, ptr, count, offset); SSIZE_T res = REAL(pread64)(fd, ptr, count, offset); if (res > 0) @@ -67,14 +155,14 @@ INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) { COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd); return res; } -# define INIT_PREAD64 INTERCEPT_FUNCTION(pread64) +#define INIT_PREAD64 INTERCEPT_FUNCTION(pread64) #else -# define INIT_PREAD64 +#define INIT_PREAD64 #endif #if SANITIZER_INTERCEPT_WRITE INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) { - void* ctx; + void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, write, fd, ptr, count); if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd); @@ -83,142 +171,821 @@ INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) { COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res); return res; } -# define INIT_WRITE INTERCEPT_FUNCTION(write) +#define INIT_WRITE INTERCEPT_FUNCTION(write) #else -# define INIT_WRITE +#define INIT_WRITE #endif #if SANITIZER_INTERCEPT_PWRITE -INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count) { - void* ctx; - COMMON_INTERCEPTOR_ENTER(ctx, pwrite, fd, ptr, count); +INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count, OFF_T offset) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, pwrite, fd, ptr, count, offset); if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd); - SSIZE_T res = REAL(pwrite)(fd, ptr, count); + SSIZE_T res = REAL(pwrite)(fd, ptr, count, offset); if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res); return res; } -# define INIT_PWRITE INTERCEPT_FUNCTION(pwrite) +#define INIT_PWRITE INTERCEPT_FUNCTION(pwrite) #else -# define INIT_PWRITE +#define INIT_PWRITE #endif #if SANITIZER_INTERCEPT_PWRITE64 -INTERCEPTOR(SSIZE_T, pwrite64, int fd, void *ptr, OFF64_T count) { - void* ctx; - COMMON_INTERCEPTOR_ENTER(ctx, pwrite64, fd, ptr, count); +INTERCEPTOR(SSIZE_T, pwrite64, int fd, void *ptr, OFF64_T count, + OFF64_T offset) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, pwrite64, fd, ptr, count, offset); if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd); - SSIZE_T res = REAL(pwrite64)(fd, ptr, count); + SSIZE_T res = REAL(pwrite64)(fd, ptr, count, offset); if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res); return res; } -# define INIT_PWRITE64 INTERCEPT_FUNCTION(pwrite64) +#define INIT_PWRITE64 INTERCEPT_FUNCTION(pwrite64) #else -# define INIT_PWRITE64 +#define INIT_PWRITE64 #endif #if SANITIZER_INTERCEPT_PRCTL INTERCEPTOR(int, prctl, int option, - unsigned long arg2, unsigned long arg3, // NOLINT - unsigned long arg4, unsigned long arg5) { // NOLINT - void* ctx; + unsigned long arg2, unsigned long arg3, // NOLINT + unsigned long arg4, unsigned long arg5) { // NOLINT + void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5); static const int PR_SET_NAME = 15; int res = REAL(prctl(option, arg2, arg3, arg4, arg5)); if (option == PR_SET_NAME) { char buff[16]; - internal_strncpy(buff, (char*)arg2, 15); + internal_strncpy(buff, (char *)arg2, 15); buff[15] = 0; COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff); } return res; } -# define INIT_PRCTL INTERCEPT_FUNCTION(prctl) +#define INIT_PRCTL INTERCEPT_FUNCTION(prctl) #else -# define INIT_PRCTL -#endif // SANITIZER_INTERCEPT_PRCTL +#define INIT_PRCTL +#endif // SANITIZER_INTERCEPT_PRCTL +#if SANITIZER_INTERCEPT_TIME +INTERCEPTOR(unsigned long, time, unsigned long *t) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, time, t); + unsigned long res = REAL(time)(t); + if (t && res != (unsigned long)-1) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, t, sizeof(*t)); + } + return res; +} +#define INIT_TIME \ + INTERCEPT_FUNCTION(time); +#else +#define INIT_TIME +#endif // SANITIZER_INTERCEPT_TIME + + +#if SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS +INTERCEPTOR(void *, localtime, unsigned long *timep) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, localtime, timep); + void *res = REAL(localtime)(timep); + if (res) { + COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_tm_sz); + } + return res; +} +INTERCEPTOR(void *, localtime_r, unsigned long *timep, void *result) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, localtime_r, timep, result); + void *res = REAL(localtime_r)(timep, result); + if (res) { + COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_tm_sz); + } + return res; +} +INTERCEPTOR(void *, gmtime, unsigned long *timep) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, gmtime, timep); + void *res = REAL(gmtime)(timep); + if (res) { + COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_tm_sz); + } + return res; +} +INTERCEPTOR(void *, gmtime_r, unsigned long *timep, void *result) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, gmtime_r, timep, result); + void *res = REAL(gmtime_r)(timep, result); + if (res) { + COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_tm_sz); + } + return res; +} +INTERCEPTOR(char *, ctime, unsigned long *timep) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, ctime, timep); + char *res = REAL(ctime)(timep); + if (res) { + COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); + } + return res; +} +INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, ctime_r, timep, result); + char *res = REAL(ctime_r)(timep, result); + if (res) { + COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); + } + return res; +} +INTERCEPTOR(char *, asctime, void *tm) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, asctime, tm); + char *res = REAL(asctime)(tm); + if (res) { + COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, struct_tm_sz); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); + } + return res; +} +INTERCEPTOR(char *, asctime_r, void *tm, char *result) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, asctime_r, tm, result); + char *res = REAL(asctime_r)(tm, result); + if (res) { + COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, struct_tm_sz); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); + } + return res; +} +#define INIT_LOCALTIME_AND_FRIENDS \ + INTERCEPT_FUNCTION(localtime); \ + INTERCEPT_FUNCTION(localtime_r); \ + INTERCEPT_FUNCTION(gmtime); \ + INTERCEPT_FUNCTION(gmtime_r); \ + INTERCEPT_FUNCTION(ctime); \ + INTERCEPT_FUNCTION(ctime_r); \ + INTERCEPT_FUNCTION(asctime); \ + INTERCEPT_FUNCTION(asctime_r); +#else +#define INIT_LOCALTIME_AND_FRIENDS +#endif // SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS + #if SANITIZER_INTERCEPT_SCANF #include "sanitizer_common_interceptors_scanf.inc" -INTERCEPTOR(int, vscanf, const char *format, va_list ap) { // NOLINT - void* ctx; - COMMON_INTERCEPTOR_ENTER(ctx, vscanf, format, ap); - scanf_common(ctx, format, ap); - int res = REAL(vscanf)(format, ap); // NOLINT +#define VSCANF_INTERCEPTOR_IMPL(vname, allowGnuMalloc, ...) \ + { \ + void *ctx; \ + COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__); \ + va_list aq; \ + va_copy(aq, ap); \ + int res = REAL(vname)(__VA_ARGS__); \ + if (res > 0) \ + scanf_common(ctx, res, allowGnuMalloc, format, aq); \ + va_end(aq); \ + return res; \ + } + +INTERCEPTOR(int, vscanf, const char *format, va_list ap) +VSCANF_INTERCEPTOR_IMPL(vscanf, true, format, ap) + +INTERCEPTOR(int, vsscanf, const char *str, const char *format, va_list ap) +VSCANF_INTERCEPTOR_IMPL(vsscanf, true, str, format, ap) + +INTERCEPTOR(int, vfscanf, void *stream, const char *format, va_list ap) +VSCANF_INTERCEPTOR_IMPL(vfscanf, true, stream, format, ap) + +#if SANITIZER_INTERCEPT_ISOC99_SCANF +INTERCEPTOR(int, __isoc99_vscanf, const char *format, va_list ap) +VSCANF_INTERCEPTOR_IMPL(__isoc99_vscanf, false, format, ap) + +INTERCEPTOR(int, __isoc99_vsscanf, const char *str, const char *format, + va_list ap) +VSCANF_INTERCEPTOR_IMPL(__isoc99_vsscanf, false, str, format, ap) + +INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap) +VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap) +#endif // SANITIZER_INTERCEPT_ISOC99_SCANF + +#define SCANF_INTERCEPTOR_IMPL(name, vname, ...) \ + { \ + void *ctx; \ + COMMON_INTERCEPTOR_ENTER(ctx, name, __VA_ARGS__); \ + va_list ap; \ + va_start(ap, format); \ + int res = vname(__VA_ARGS__, ap); \ + va_end(ap); \ + return res; \ + } + +INTERCEPTOR(int, scanf, const char *format, ...) +SCANF_INTERCEPTOR_IMPL(scanf, vscanf, format) + +INTERCEPTOR(int, fscanf, void *stream, const char *format, ...) +SCANF_INTERCEPTOR_IMPL(fscanf, vfscanf, stream, format) + +INTERCEPTOR(int, sscanf, const char *str, const char *format, ...) +SCANF_INTERCEPTOR_IMPL(sscanf, vsscanf, str, format) + +#if SANITIZER_INTERCEPT_ISOC99_SCANF +INTERCEPTOR(int, __isoc99_scanf, const char *format, ...) +SCANF_INTERCEPTOR_IMPL(__isoc99_scanf, __isoc99_vscanf, format) + +INTERCEPTOR(int, __isoc99_fscanf, void *stream, const char *format, ...) +SCANF_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format) + +INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...) +SCANF_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format) +#endif + +#define INIT_SCANF \ + INTERCEPT_FUNCTION(scanf); \ + INTERCEPT_FUNCTION(sscanf); \ + INTERCEPT_FUNCTION(fscanf); \ + INTERCEPT_FUNCTION(vscanf); \ + INTERCEPT_FUNCTION(vsscanf); \ + INTERCEPT_FUNCTION(vfscanf); \ + INTERCEPT_FUNCTION(__isoc99_scanf); \ + INTERCEPT_FUNCTION(__isoc99_sscanf); \ + INTERCEPT_FUNCTION(__isoc99_fscanf); \ + INTERCEPT_FUNCTION(__isoc99_vscanf); \ + INTERCEPT_FUNCTION(__isoc99_vsscanf); \ + INTERCEPT_FUNCTION(__isoc99_vfscanf); + +#else +#define INIT_SCANF +#endif + +#if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS +INTERCEPTOR(void *, getpwnam, const char *name) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getpwnam, name); + COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); + void *res = REAL(getpwnam)(name); + if (res != 0) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_passwd_sz); + return res; +} +INTERCEPTOR(void *, getpwuid, u32 uid) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getpwuid, uid); + void *res = REAL(getpwuid)(uid); + if (res != 0) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_passwd_sz); + return res; +} +INTERCEPTOR(void *, getgrnam, const char *name) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getgrnam, name); + COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); + void *res = REAL(getgrnam)(name); + if (res != 0) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_group_sz); return res; } +INTERCEPTOR(void *, getgrgid, u32 gid) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getgrgid, gid); + void *res = REAL(getgrgid)(gid); + if (res != 0) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, struct_group_sz); + return res; +} +#define INIT_GETPWNAM_AND_FRIENDS \ + INTERCEPT_FUNCTION(getpwnam); \ + INTERCEPT_FUNCTION(getpwuid); \ + INTERCEPT_FUNCTION(getgrnam); \ + INTERCEPT_FUNCTION(getgrgid); +#else +#define INIT_GETPWNAM_AND_FRIENDS +#endif + -INTERCEPTOR(int, vsscanf, const char *str, const char *format, // NOLINT - va_list ap) { - void* ctx; - COMMON_INTERCEPTOR_ENTER(ctx, vsscanf, str, format, ap); - scanf_common(ctx, format, ap); - int res = REAL(vsscanf)(str, format, ap); // NOLINT - // FIXME: read of str +#if SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS +INTERCEPTOR(int, getpwnam_r, const char *name, void *pwd, + char *buf, SIZE_T buflen, void **result) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getpwnam_r, name, pwd, buf, buflen, result); + COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); + int res = REAL(getpwnam_r)(name, pwd, buf, buflen, result); + if (!res) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, struct_passwd_sz); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen); + } return res; } +INTERCEPTOR(int, getpwuid_r, u32 uid, void *pwd, + char *buf, SIZE_T buflen, void **result) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getpwuid_r, uid, pwd, buf, buflen, result); + int res = REAL(getpwuid_r)(uid, pwd, buf, buflen, result); + if (!res) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, struct_passwd_sz); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen); + } + return res; +} +INTERCEPTOR(int, getgrnam_r, const char *name, void *grp, + char *buf, SIZE_T buflen, void **result) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getgrnam_r, name, grp, buf, buflen, result); + COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); + int res = REAL(getgrnam_r)(name, grp, buf, buflen, result); + if (!res) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, struct_group_sz); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen); + } + return res; +} +INTERCEPTOR(int, getgrgid_r, u32 gid, void *grp, + char *buf, SIZE_T buflen, void **result) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getgrgid_r, gid, grp, buf, buflen, result); + int res = REAL(getgrgid_r)(gid, grp, buf, buflen, result); + if (!res) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, struct_group_sz); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, buflen); + } + return res; +} +#define INIT_GETPWNAM_R_AND_FRIENDS \ + INTERCEPT_FUNCTION(getpwnam_r); \ + INTERCEPT_FUNCTION(getpwuid_r); \ + INTERCEPT_FUNCTION(getgrnam_r); \ + INTERCEPT_FUNCTION(getgrgid_r); +#else +#define INIT_GETPWNAM_R_AND_FRIENDS +#endif + -INTERCEPTOR(int, vfscanf, void *stream, const char *format, // NOLINT - va_list ap) { - void* ctx; - COMMON_INTERCEPTOR_ENTER(ctx, vfscanf, stream, format, ap); - scanf_common(ctx, format, ap); - int res = REAL(vfscanf)(stream, format, ap); // NOLINT +#if SANITIZER_INTERCEPT_CLOCK_GETTIME +INTERCEPTOR(int, clock_getres, u32 clk_id, void *tp) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, clock_getres, clk_id, tp); + int res = REAL(clock_getres)(clk_id, tp); + if (!res && tp) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz); + } + return res; +} +INTERCEPTOR(int, clock_gettime, u32 clk_id, void *tp) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, clock_gettime, clk_id, tp); + int res = REAL(clock_gettime)(clk_id, tp); + if (!res) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz); + } return res; } +INTERCEPTOR(int, clock_settime, u32 clk_id, const void *tp) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, clock_settime, clk_id, tp); + COMMON_INTERCEPTOR_READ_RANGE(ctx, tp, struct_timespec_sz); + return REAL(clock_settime)(clk_id, tp); +} +#define INIT_CLOCK_GETTIME \ + INTERCEPT_FUNCTION(clock_getres); \ + INTERCEPT_FUNCTION(clock_gettime); \ + INTERCEPT_FUNCTION(clock_settime); +#else +#define INIT_CLOCK_GETTIME +#endif -INTERCEPTOR(int, scanf, const char *format, ...) { // NOLINT - void* ctx; - COMMON_INTERCEPTOR_ENTER(ctx, scanf, format); - va_list ap; - va_start(ap, format); - int res = vscanf(format, ap); // NOLINT - va_end(ap); + +#if SANITIZER_INTERCEPT_GETITIMER +INTERCEPTOR(int, getitimer, int which, void *curr_value) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getitimer, which, curr_value); + int res = REAL(getitimer)(which, curr_value); + if (!res) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerval_sz); + } + return res; +} +INTERCEPTOR(int, setitimer, int which, const void *new_value, void *old_value) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, setitimer, which, new_value, old_value); + COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerval_sz); + int res = REAL(setitimer)(which, new_value, old_value); + if (!res && old_value) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerval_sz); + } return res; } +#define INIT_GETITIMER \ + INTERCEPT_FUNCTION(getitimer); \ + INTERCEPT_FUNCTION(setitimer); +#else +#define INIT_GETITIMER +#endif + + +#if SANITIZER_INTERCEPT_GLOB +struct sanitizer_glob_t { + SIZE_T gl_pathc; + char **gl_pathv; +}; + +static void unpoison_glob_t(void *ctx, sanitizer_glob_t *pglob) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pglob, sizeof(*pglob)); + // +1 for NULL pointer at the end. + COMMON_INTERCEPTOR_WRITE_RANGE( + ctx, pglob->gl_pathv, (pglob->gl_pathc + 1) * sizeof(*pglob->gl_pathv)); + for (SIZE_T i = 0; i < pglob->gl_pathc; ++i) { + char *p = pglob->gl_pathv[i]; + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, REAL(strlen)(p) + 1); + } +} -INTERCEPTOR(int, fscanf, void* stream, const char *format, ...) { // NOLINT - void* ctx; - COMMON_INTERCEPTOR_ENTER(ctx, fscanf, stream, format); - va_list ap; - va_start(ap, format); - int res = vfscanf(stream, format, ap); // NOLINT - va_end(ap); +INTERCEPTOR(int, glob, const char *pattern, int flags, + int (*errfunc)(const char *epath, int eerrno), + sanitizer_glob_t *pglob) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, glob, pattern, flags, errfunc, pglob); + int res = REAL(glob)(pattern, flags, errfunc, pglob); + if (res == 0) + unpoison_glob_t(ctx, pglob); return res; } -INTERCEPTOR(int, sscanf, const char *str, const char *format, ...) { // NOLINT - void* ctx; - COMMON_INTERCEPTOR_ENTER(ctx, sscanf, str, format); // NOLINT - va_list ap; - va_start(ap, format); - int res = vsscanf(str, format, ap); // NOLINT - va_end(ap); +INTERCEPTOR(int, glob64, const char *pattern, int flags, + int (*errfunc)(const char *epath, int eerrno), + sanitizer_glob_t *pglob) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, glob64, pattern, flags, errfunc, pglob); + int res = REAL(glob64)(pattern, flags, errfunc, pglob); + if (res == 0) + unpoison_glob_t(ctx, pglob); + return res; +} +#define INIT_GLOB \ + INTERCEPT_FUNCTION(glob); \ + INTERCEPT_FUNCTION(glob64); +#else // SANITIZER_INTERCEPT_GLOB +#define INIT_GLOB +#endif // SANITIZER_INTERCEPT_GLOB + + +#if SANITIZER_INTERCEPT_WAIT +// According to sys/wait.h, wait(), waitid(), waitpid() may have symbol version +// suffixes on Darwin. See the declaration of INTERCEPTOR_WITH_SUFFIX for +// details. +INTERCEPTOR_WITH_SUFFIX(int, wait, int *status) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, wait, status); + int res = REAL(wait)(status); + if (res != -1 && status) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status)); + return res; +} +INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, int id, void *infop, + int options) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, waitid, idtype, id, infop, options); + int res = REAL(waitid)(idtype, id, infop, options); + if (res != -1 && infop) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, infop, siginfo_t_sz); return res; } +INTERCEPTOR_WITH_SUFFIX(int, waitpid, int pid, int *status, int options) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, waitpid, pid, status, options); + int res = REAL(waitpid)(pid, status, options); + if (res != -1 && status) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status)); + return res; +} +INTERCEPTOR(int, wait3, int *status, int options, void *rusage) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, wait3, status, options, rusage); + int res = REAL(wait3)(status, options, rusage); + if (res != -1) { + if (status) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status)); + if (rusage) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz); + } + return res; +} +INTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, wait4, pid, status, options, rusage); + int res = REAL(wait4)(pid, status, options, rusage); + if (res != -1) { + if (status) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status)); + if (rusage) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz); + } + return res; +} +#define INIT_WAIT \ + INTERCEPT_FUNCTION(wait); \ + INTERCEPT_FUNCTION(waitid); \ + INTERCEPT_FUNCTION(waitpid); \ + INTERCEPT_FUNCTION(wait3); \ + INTERCEPT_FUNCTION(wait4); +#else +#define INIT_WAIT +#endif -#define INIT_SCANF \ - INTERCEPT_FUNCTION(scanf); \ - INTERCEPT_FUNCTION(sscanf); /* NOLINT */ \ - INTERCEPT_FUNCTION(fscanf); \ - INTERCEPT_FUNCTION(vscanf); \ - INTERCEPT_FUNCTION(vsscanf); \ - INTERCEPT_FUNCTION(vfscanf) +#if SANITIZER_INTERCEPT_INET +INTERCEPTOR(char *, inet_ntop, int af, const void *src, char *dst, u32 size) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, inet_ntop, af, src, dst, size); + uptr sz = __sanitizer_in_addr_sz(af); + if (sz) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sz); + // FIXME: figure out read size based on the address family. + char *res = REAL(inet_ntop)(af, src, dst, size); + if (res) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); + return res; +} +INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, inet_pton, af, src, dst); + // FIXME: figure out read size based on the address family. + int res = REAL(inet_pton)(af, src, dst); + if (res == 1) { + uptr sz = __sanitizer_in_addr_sz(af); + if (sz) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sz); + } + return res; +} +#define INIT_INET \ + INTERCEPT_FUNCTION(inet_ntop); \ + INTERCEPT_FUNCTION(inet_pton); +#else +#define INIT_INET +#endif +#if SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM +INTERCEPTOR(int, pthread_getschedparam, uptr thread, int *policy, int *param) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, pthread_getschedparam, thread, policy, param); + int res = REAL(pthread_getschedparam)(thread, policy, param); + if (res == 0) { + if (policy) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, policy, sizeof(*policy)); + if (param) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, sizeof(*param)); + } + return res; +} +#define INIT_PTHREAD_GETSCHEDPARAM INTERCEPT_FUNCTION(pthread_getschedparam); #else -#define INIT_SCANF +#define INIT_PTHREAD_GETSCHEDPARAM +#endif + +#if SANITIZER_INTERCEPT_GETADDRINFO +INTERCEPTOR(int, getaddrinfo, char *node, char *service, + struct __sanitizer_addrinfo *hints, + struct __sanitizer_addrinfo **out) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getaddrinfo, node, service, hints, out); + if (node) COMMON_INTERCEPTOR_READ_RANGE(ctx, node, REAL(strlen)(node) + 1); + if (service) + COMMON_INTERCEPTOR_READ_RANGE(ctx, service, REAL(strlen)(service) + 1); + if (hints) + COMMON_INTERCEPTOR_READ_RANGE(ctx, hints, sizeof(__sanitizer_addrinfo)); + int res = REAL(getaddrinfo)(node, service, hints, out); + if (res == 0) { + struct __sanitizer_addrinfo *p = *out; + while (p) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(__sanitizer_addrinfo)); + if (p->ai_addr) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_addr, struct_sockaddr_sz); + if (p->ai_canonname) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_canonname, + REAL(strlen)(p->ai_canonname) + 1); + p = p->ai_next; + } + } + return res; +} +#define INIT_GETADDRINFO INTERCEPT_FUNCTION(getaddrinfo); +#else +#define INIT_GETADDRINFO +#endif + +#if SANITIZER_INTERCEPT_GETSOCKNAME +INTERCEPTOR(int, getsockname, int sock_fd, void *addr, int *addrlen) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getsockname, sock_fd, addr, addrlen); + COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen)); + int addrlen_in = *addrlen; + int res = REAL(getsockname)(sock_fd, addr, addrlen); + if (res == 0) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addrlen_in, *addrlen)); + } + return res; +} +#define INIT_GETSOCKNAME INTERCEPT_FUNCTION(getsockname); +#else +#define INIT_GETSOCKNAME +#endif + +#if SANITIZER_INTERCEPT_GETHOSTBYNAME || SANITIZER_INTERCEPT_GETHOSTBYNAME_R +static void write_hostent(void *ctx, struct __sanitizer_hostent *h) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h, sizeof(__sanitizer_hostent)); + if (h->h_name) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h->h_name, REAL(strlen)(h->h_name) + 1); + char **p = h->h_aliases; + while (*p) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1); + ++p; + } + COMMON_INTERCEPTOR_WRITE_RANGE( + ctx, h->h_aliases, (p - h->h_aliases + 1) * sizeof(*h->h_aliases)); + p = h->h_addr_list; + while (*p) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, h->h_length); + ++p; + } + COMMON_INTERCEPTOR_WRITE_RANGE( + ctx, h->h_addr_list, (p - h->h_addr_list + 1) * sizeof(*h->h_addr_list)); +} +#endif + +#if SANITIZER_INTERCEPT_GETHOSTBYNAME +INTERCEPTOR(struct __sanitizer_hostent *, gethostbyname, char *name) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname, name); + struct __sanitizer_hostent *res = REAL(gethostbyname)(name); + if (res) write_hostent(ctx, res); + return res; +} + +INTERCEPTOR(struct __sanitizer_hostent *, gethostbyaddr, void *addr, int len, + int type) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr, addr, len, type); + COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len); + struct __sanitizer_hostent *res = REAL(gethostbyaddr)(addr, len, type); + if (res) write_hostent(ctx, res); + return res; +} + +INTERCEPTOR(struct __sanitizer_hostent *, gethostent) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, gethostent); + struct __sanitizer_hostent *res = REAL(gethostent)(); + if (res) write_hostent(ctx, res); + return res; +} + +INTERCEPTOR(struct __sanitizer_hostent *, gethostbyname2, char *name, int af) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2, name, af); + struct __sanitizer_hostent *res = REAL(gethostbyname2)(name, af); + if (res) write_hostent(ctx, res); + return res; +} +#define INIT_GETHOSTBYNAME \ + INTERCEPT_FUNCTION(gethostent); \ + INTERCEPT_FUNCTION(gethostbyaddr); \ + INTERCEPT_FUNCTION(gethostbyname); \ + INTERCEPT_FUNCTION(gethostbyname2); +#else +#define INIT_GETHOSTBYNAME +#endif + +#if SANITIZER_INTERCEPT_GETHOSTBYNAME_R +INTERCEPTOR(int, gethostent_r, struct __sanitizer_hostent *ret, char *buf, + SIZE_T buflen, __sanitizer_hostent **result, int *h_errnop) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, gethostent_r, ret, buf, buflen, result, + h_errnop); + int res = REAL(gethostent_r)(ret, buf, buflen, result, h_errnop); + if (res == 0) { + if (result) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); + if (*result) write_hostent(ctx, *result); + } + if (h_errnop) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop)); + } + return res; +} + +INTERCEPTOR(int, gethostbyaddr_r, void *addr, int len, int type, + struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen, + __sanitizer_hostent **result, int *h_errnop) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr_r, addr, len, type, ret, buf, + buflen, result, h_errnop); + COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len); + int res = REAL(gethostbyaddr_r)(addr, len, type, ret, buf, buflen, result, + h_errnop); + if (res == 0) { + if (result) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); + if (*result) write_hostent(ctx, *result); + } + if (h_errnop) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop)); + } + return res; +} + +INTERCEPTOR(int, gethostbyname_r, char *name, struct __sanitizer_hostent *ret, + char *buf, SIZE_T buflen, __sanitizer_hostent **result, + int *h_errnop) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname_r, name, ret, buf, buflen, result, + h_errnop); + int res = REAL(gethostbyname_r)(name, ret, buf, buflen, result, h_errnop); + if (res == 0) { + if (result) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); + if (*result) write_hostent(ctx, *result); + } + if (h_errnop) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop)); + } + return res; +} + +INTERCEPTOR(int, gethostbyname2_r, char *name, int af, + struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen, + __sanitizer_hostent **result, int *h_errnop) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2_r, name, af, ret, buf, buflen, + result, h_errnop); + int res = + REAL(gethostbyname2_r)(name, af, ret, buf, buflen, result, h_errnop); + if (res == 0) { + if (result) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result)); + if (*result) write_hostent(ctx, *result); + } + if (h_errnop) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop)); + } + return res; +} +#define INIT_GETHOSTBYNAME_R \ + INTERCEPT_FUNCTION(gethostent_r); \ + INTERCEPT_FUNCTION(gethostbyaddr_r); \ + INTERCEPT_FUNCTION(gethostbyname_r); \ + INTERCEPT_FUNCTION(gethostbyname2_r); +#else +#define INIT_GETHOSTBYNAME_R +#endif + +#if SANITIZER_INTERCEPT_GETSOCKOPT +INTERCEPTOR(int, getsockopt, int sockfd, int level, int optname, void *optval, + int *optlen) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getsockopt, sockfd, level, optname, optval, + optlen); + if (optlen) COMMON_INTERCEPTOR_READ_RANGE(ctx, optlen, sizeof(*optlen)); + int res = REAL(getsockopt)(sockfd, level, optname, optval, optlen); + if (res == 0) + if (optval && optlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, optval, *optlen); + return res; +} +#define INIT_GETSOCKOPT INTERCEPT_FUNCTION(getsockopt); +#else +#define INIT_GETSOCKOPT #endif #define SANITIZER_COMMON_INTERCEPTORS_INIT \ + INIT_STRCASECMP; \ + INIT_STRNCASECMP; \ INIT_READ; \ INIT_PREAD; \ INIT_PREAD64; \ INIT_PRCTL; \ INIT_WRITE; \ - INIT_SCANF; + INIT_PWRITE; \ + INIT_PWRITE64; \ + INIT_LOCALTIME_AND_FRIENDS; \ + INIT_SCANF; \ + INIT_FREXP; \ + INIT_FREXPF_FREXPL; \ + INIT_GETPWNAM_AND_FRIENDS; \ + INIT_GETPWNAM_R_AND_FRIENDS; \ + INIT_CLOCK_GETTIME; \ + INIT_GETITIMER; \ + INIT_TIME; \ + INIT_GLOB; \ + INIT_WAIT; \ + INIT_INET; \ + INIT_PTHREAD_GETSCHEDPARAM; \ + INIT_GETADDRINFO; \ + INIT_GETSOCKNAME; \ + INIT_GETHOSTBYNAME; \ + INIT_GETHOSTBYNAME_R; \ + INIT_GETSOCKOPT; diff --git a/lib/sanitizer_common/sanitizer_common_interceptors_scanf.inc b/lib/sanitizer_common/sanitizer_common_interceptors_scanf.inc index 63d67a7115ec..8bb5cd818ac2 100644 --- a/lib/sanitizer_common/sanitizer_common_interceptors_scanf.inc +++ b/lib/sanitizer_common/sanitizer_common_interceptors_scanf.inc @@ -8,83 +8,41 @@ //===----------------------------------------------------------------------===// // // Scanf implementation for use in *Sanitizer interceptors. +// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html +// with a few common GNU extensions. // //===----------------------------------------------------------------------===// #include <stdarg.h> -struct ScanfSpec { - char c; - unsigned size; +struct ScanfDirective { + int argIdx; // argument index, or -1 of not specified ("%n$") + int fieldWidth; + bool suppressed; // suppress assignment ("*") + bool allocate; // allocate space ("m") + char lengthModifier[2]; + char convSpecifier; + bool maybeGnuMalloc; }; -// One-letter specs. -static const ScanfSpec scanf_specs[] = { - {'p', sizeof(void *)}, - {'e', sizeof(float)}, - {'E', sizeof(float)}, - {'a', sizeof(float)}, - {'f', sizeof(float)}, - {'g', sizeof(float)}, - {'d', sizeof(int)}, - {'i', sizeof(int)}, - {'o', sizeof(int)}, - {'u', sizeof(int)}, - {'x', sizeof(int)}, - {'X', sizeof(int)}, - {'n', sizeof(int)}, - {'t', sizeof(PTRDIFF_T)}, - {'z', sizeof(SIZE_T)}, - {'j', sizeof(INTMAX_T)}, - {'h', sizeof(short)} -}; - -static const unsigned scanf_specs_cnt = - sizeof(scanf_specs) / sizeof(scanf_specs[0]); - -// %ll?, %L?, %q? specs -static const ScanfSpec scanf_llspecs[] = { - {'e', sizeof(long double)}, - {'f', sizeof(long double)}, - {'g', sizeof(long double)}, - {'d', sizeof(long long)}, - {'i', sizeof(long long)}, - {'o', sizeof(long long)}, - {'u', sizeof(long long)}, - {'x', sizeof(long long)} -}; - -static const unsigned scanf_llspecs_cnt = - sizeof(scanf_llspecs) / sizeof(scanf_llspecs[0]); - -// %l? specs -static const ScanfSpec scanf_lspecs[] = { - {'e', sizeof(double)}, - {'f', sizeof(double)}, - {'g', sizeof(double)}, - {'d', sizeof(long)}, - {'i', sizeof(long)}, - {'o', sizeof(long)}, - {'u', sizeof(long)}, - {'x', sizeof(long)}, - {'X', sizeof(long)}, -}; - -static const unsigned scanf_lspecs_cnt = - sizeof(scanf_lspecs) / sizeof(scanf_lspecs[0]); - -static unsigned match_spec(const struct ScanfSpec *spec, unsigned n, char c) { - for (unsigned i = 0; i < n; ++i) - if (spec[i].c == c) - return spec[i].size; - return 0; +static const char *parse_number(const char *p, int *out) { + *out = internal_atoll(p); + while (*p >= '0' && *p <= '9') + ++p; + return p; } -static void scanf_common(void *ctx, const char *format, va_list ap_const) { - va_list aq; - va_copy(aq, ap_const); +static bool char_is_one_of(char c, const char *s) { + return !!internal_strchr(s, c); +} - const char *p = format; - unsigned size; +// Parse scanf format string. If a valid directive in encountered, it is +// returned in dir. This function returns the pointer to the first +// unprocessed character, or 0 in case of error. +// In case of the end-of-string, a pointer to the closing \0 is returned. +static const char *scanf_parse_next(const char *p, bool allowGnuMalloc, + ScanfDirective *dir) { + internal_memset(dir, 0, sizeof(*dir)); + dir->argIdx = -1; while (*p) { if (*p != '%') { @@ -92,51 +50,260 @@ static void scanf_common(void *ctx, const char *format, va_list ap_const) { continue; } ++p; - if (*p == '*' || *p == '%' || *p == 0) { + // %% + if (*p == '%') { ++p; continue; } - if (*p == '0' || (*p >= '1' && *p <= '9')) { - size = internal_atoll(p); - // +1 for the \0 at the end - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size + 1); + if (*p == '\0') { + return 0; + } + // %n$ + if (*p >= '0' && *p <= '9') { + int number; + const char *q = parse_number(p, &number); + if (*q == '$') { + dir->argIdx = number; + p = q + 1; + } + // Otherwise, do not change p. This will be re-parsed later as the field + // width. + } + // * + if (*p == '*') { + dir->suppressed = true; ++p; - continue; } - - if (*p == 'L' || *p == 'q') { + // Field width. + if (*p >= '0' && *p <= '9') { + p = parse_number(p, &dir->fieldWidth); + if (dir->fieldWidth <= 0) + return 0; + } + // m + if (*p == 'm') { + dir->allocate = true; ++p; - size = match_spec(scanf_llspecs, scanf_llspecs_cnt, *p); - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size); - continue; } - - if (*p == 'l') { + // Length modifier. + if (char_is_one_of(*p, "jztLq")) { + dir->lengthModifier[0] = *p; + ++p; + } else if (*p == 'h') { + dir->lengthModifier[0] = 'h'; + ++p; + if (*p == 'h') { + dir->lengthModifier[1] = 'h'; + ++p; + } + } else if (*p == 'l') { + dir->lengthModifier[0] = 'l'; ++p; if (*p == 'l') { + dir->lengthModifier[1] = 'l'; ++p; - size = match_spec(scanf_llspecs, scanf_llspecs_cnt, *p); - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size); - continue; - } else { - size = match_spec(scanf_lspecs, scanf_lspecs_cnt, *p); - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size); - continue; } } + // Conversion specifier. + dir->convSpecifier = *p++; + // Consume %[...] expression. + if (dir->convSpecifier == '[') { + if (*p == '^') + ++p; + if (*p == ']') + ++p; + while (*p && *p != ']') + ++p; + if (*p == 0) + return 0; // unexpected end of string + // Consume the closing ']'. + ++p; + } + // This is unfortunately ambiguous between old GNU extension + // of %as, %aS and %a[...] and newer POSIX %a followed by + // letters s, S or [. + if (allowGnuMalloc && dir->convSpecifier == 'a' && + !dir->lengthModifier[0]) { + if (*p == 's' || *p == 'S') { + dir->maybeGnuMalloc = true; + ++p; + } else if (*p == '[') { + // Watch for %a[h-j%d], if % appears in the + // [...] range, then we need to give up, we don't know + // if scanf will parse it as POSIX %a [h-j %d ] or + // GNU allocation of string with range dh-j plus %. + const char *q = p + 1; + if (*q == '^') + ++q; + if (*q == ']') + ++q; + while (*q && *q != ']' && *q != '%') + ++q; + if (*q == 0 || *q == '%') + return 0; + p = q + 1; // Consume the closing ']'. + dir->maybeGnuMalloc = true; + } + } + break; + } + return p; +} - if (*p == 'h' && *(p + 1) == 'h') { - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), sizeof(char)); - p += 2; - continue; +// Returns true if the character is an integer conversion specifier. +static bool scanf_is_integer_conv(char c) { + return char_is_one_of(c, "diouxXn"); +} + +// Returns true if the character is an floating point conversion specifier. +static bool scanf_is_float_conv(char c) { + return char_is_one_of(c, "aAeEfFgG"); +} + +// Returns string output character size for string-like conversions, +// or 0 if the conversion is invalid. +static int scanf_get_char_size(ScanfDirective *dir) { + if (char_is_one_of(dir->convSpecifier, "CS")) { + // wchar_t + return 0; + } + + if (char_is_one_of(dir->convSpecifier, "cs[")) { + if (dir->lengthModifier[0] == 'l') + // wchar_t + return 0; + else if (dir->lengthModifier[0] == 0) + return sizeof(char); + else + return 0; + } + + return 0; +} + +enum ScanfStoreSize { + // Store size not known in advance; can be calculated as strlen() of the + // destination buffer. + SSS_STRLEN = -1, + // Invalid conversion specifier. + SSS_INVALID = 0 +}; + +// Returns the store size of a scanf directive (if >0), or a value of +// ScanfStoreSize. +static int scanf_get_store_size(ScanfDirective *dir) { + if (dir->allocate) { + if (!char_is_one_of(dir->convSpecifier, "cCsS[")) + return SSS_INVALID; + return sizeof(char *); + } + + if (dir->maybeGnuMalloc) { + if (dir->convSpecifier != 'a' || dir->lengthModifier[0]) + return SSS_INVALID; + // This is ambiguous, so check the smaller size of char * (if it is + // a GNU extension of %as, %aS or %a[...]) and float (if it is + // POSIX %a followed by s, S or [ letters). + return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float); + } + + if (scanf_is_integer_conv(dir->convSpecifier)) { + switch (dir->lengthModifier[0]) { + case 'h': + return dir->lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short); + case 'l': + return dir->lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long); + case 'L': + return sizeof(long long); + case 'j': + return sizeof(INTMAX_T); + case 'z': + return sizeof(SIZE_T); + case 't': + return sizeof(PTRDIFF_T); + case 0: + return sizeof(int); + default: + return SSS_INVALID; } + } - size = match_spec(scanf_specs, scanf_specs_cnt, *p); - if (size) { - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, va_arg(aq, void *), size); - ++p; + if (scanf_is_float_conv(dir->convSpecifier)) { + switch (dir->lengthModifier[0]) { + case 'L': + case 'q': + return sizeof(long double); + case 'l': + return dir->lengthModifier[1] == 'l' ? sizeof(long double) + : sizeof(double); + case 0: + return sizeof(float); + default: + return SSS_INVALID; + } + } + + if (char_is_one_of(dir->convSpecifier, "sS[")) { + unsigned charSize = scanf_get_char_size(dir); + if (charSize == 0) + return SSS_INVALID; + if (dir->fieldWidth == 0) + return SSS_STRLEN; + return (dir->fieldWidth + 1) * charSize; + } + + if (char_is_one_of(dir->convSpecifier, "cC")) { + unsigned charSize = scanf_get_char_size(dir); + if (charSize == 0) + return SSS_INVALID; + if (dir->fieldWidth == 0) + return charSize; + return dir->fieldWidth * charSize; + } + + if (dir->convSpecifier == 'p') { + if (dir->lengthModifier[1] != 0) + return SSS_INVALID; + return sizeof(void *); + } + + return SSS_INVALID; +} + +// Common part of *scanf interceptors. +// Process format string and va_list, and report all store ranges. +// Stops when "consuming" n_inputs input items. +static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc, + const char *format, va_list aq) { + CHECK_GT(n_inputs, 0); + const char *p = format; + + while (*p && n_inputs) { + ScanfDirective dir; + p = scanf_parse_next(p, allowGnuMalloc, &dir); + if (!p) + break; + if (dir.convSpecifier == 0) { + // This can only happen at the end of the format string. + CHECK_EQ(*p, 0); + break; + } + // Here the directive is valid. Do what it says. + if (dir.argIdx != -1) { + // Unsupported. + break; + } + if (dir.suppressed) continue; + int size = scanf_get_store_size(&dir); + if (size == SSS_INVALID) + break; + void *argp = va_arg(aq, void *); + if (dir.convSpecifier != 'n') + --n_inputs; + if (size == SSS_STRLEN) { + size = internal_strlen((const char *)argp) + 1; } + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size); } - va_end(aq); } diff --git a/lib/sanitizer_common/sanitizer_common_libcdep.cc b/lib/sanitizer_common/sanitizer_common_libcdep.cc new file mode 100644 index 000000000000..36f6cf0bc0db --- /dev/null +++ b/lib/sanitizer_common/sanitizer_common_libcdep.cc @@ -0,0 +1,23 @@ +//===-- sanitizer_common_libcdep.cc ---------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between AddressSanitizer and ThreadSanitizer +// run-time libraries. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common.h" + +namespace __sanitizer { + +bool PrintsToTty() { + MaybeOpenReportFile(); + return internal_isatty(report_fd); +} + +} // namespace __sanitizer diff --git a/lib/sanitizer_common/sanitizer_common_syscalls.inc b/lib/sanitizer_common/sanitizer_common_syscalls.inc new file mode 100644 index 000000000000..da25e6b6ad2c --- /dev/null +++ b/lib/sanitizer_common/sanitizer_common_syscalls.inc @@ -0,0 +1,148 @@ +//===-- sanitizer_common_syscalls.inc ---------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Common syscalls handlers for tools like AddressSanitizer, +// ThreadSanitizer, MemorySanitizer, etc. +// +// This file should be included into the tool's interceptor file, +// which has to define it's own macros: +// COMMON_SYSCALL_PRE_READ_RANGE +// Called in prehook for regions that will be read by the kernel and +// must be initialized. +// COMMON_SYSCALL_PRE_WRITE_RANGE +// Called in prehook for regions that will be written to by the kernel +// and must be addressable. The actual write range may be smaller than +// reported in the prehook. See POST_WRITE_RANGE. +// COMMON_SYSCALL_POST_READ_RANGE +// Called in posthook for regions that were read by the kernel. Does +// not make much sense. +// COMMON_SYSCALL_POST_WRITE_RANGE +// Called in posthook for regions that were written to by the kernel +// and are now initialized. +//===----------------------------------------------------------------------===// + +#define PRE_SYSCALL(name) \ + SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_##name +#define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s) +#define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) + +#define POST_SYSCALL(name) \ + SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_##name +#define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s) +#define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s) + +// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such). + +extern "C" { +struct sanitizer_kernel_iovec { + void *iov_base; + unsigned long iov_len; +}; + +struct sanitizer_kernel_msghdr { + void *msg_name; + int msg_namelen; + struct sanitizer_kernel_iovec *msg_iov; + unsigned long msg_iovlen; + void *msg_control; + unsigned long msg_controllen; + unsigned msg_flags; +}; + +struct sanitizer_kernel_timeval { + long tv_sec; + long tv_usec; +}; + +struct sanitizer_kernel_rusage { + struct sanitizer_kernel_timeval ru_timeval[2]; + long ru_long[14]; +}; + +PRE_SYSCALL(recvmsg)(int sockfd, struct sanitizer_kernel_msghdr *msg, + int flags) { + PRE_READ(msg, sizeof(*msg)); +} + +POST_SYSCALL(recvmsg)(long res, int sockfd, struct sanitizer_kernel_msghdr *msg, + int flags) { + if (res > 0) + for (unsigned long i = 0; i < msg->msg_iovlen; ++i) { + POST_WRITE(msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len); + } + POST_WRITE(msg->msg_control, msg->msg_controllen); +} + +PRE_SYSCALL(rt_sigpending)(void *p, unsigned long s) { PRE_WRITE(p, s); } + +POST_SYSCALL(rt_sigpending)(long res, void *p, unsigned long s) { + if (res == 0) { + POST_WRITE(p, s); + } +} + +PRE_SYSCALL(getdents)(int fd, void *dirp, int count) { PRE_WRITE(dirp, count); } + +POST_SYSCALL(getdents)(long res, int fd, void *dirp, int count) { + if (res > 0) { + POST_WRITE(dirp, res); + } +} + +PRE_SYSCALL(getdents64)(int fd, void *dirp, int count) { + PRE_WRITE(dirp, count); +} + +POST_SYSCALL(getdents64)(long res, int fd, void *dirp, int count) { + if (res > 0) { + POST_WRITE(dirp, res); + } +} + +PRE_SYSCALL(wait4)(int pid, int *status, int options, + struct sanitizer_kernel_rusage *r) { + if (status) { + PRE_WRITE(status, sizeof(*status)); + } + if (r) { + PRE_WRITE(r, sizeof(*r)); + } +} + +POST_SYSCALL(wait4)(long res, int pid, int *status, int options, + struct sanitizer_kernel_rusage *r) { + if (res > 0) { + if (status) { + POST_WRITE(status, sizeof(*status)); + } + if (r) { + POST_WRITE(r, sizeof(*r)); + } + } +} + +PRE_SYSCALL(waitpid)(int pid, int *status, int options) { + if (status) { + PRE_WRITE(status, sizeof(*status)); + } +} + +POST_SYSCALL(waitpid)(long res, int pid, int *status, int options) { + if (res > 0 && status) { + POST_WRITE(status, sizeof(*status)); + } +} +} // extern "C" + +#undef PRE_SYSCALL +#undef PRE_READ +#undef PRE_WRITE +#undef POST_SYSCALL +#undef POST_READ +#undef POST_WRITE diff --git a/lib/sanitizer_common/sanitizer_flags.cc b/lib/sanitizer_common/sanitizer_flags.cc index eca910c08090..b7218e5ad212 100644 --- a/lib/sanitizer_common/sanitizer_flags.cc +++ b/lib/sanitizer_common/sanitizer_flags.cc @@ -18,15 +18,35 @@ namespace __sanitizer { +CommonFlags common_flags_dont_use_directly; + +void ParseCommonFlagsFromString(const char *str) { + CommonFlags *f = common_flags(); + ParseFlag(str, &f->malloc_context_size, "malloc_context_size"); + ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix"); + ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal"); + ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc"); + ParseFlag(str, &f->symbolize, "symbolize"); +} + static bool GetFlagValue(const char *env, const char *name, const char **value, int *value_length) { if (env == 0) return false; - const char *pos = internal_strstr(env, name); - const char *end; - if (pos == 0) - return false; + const char *pos = 0; + for (;;) { + pos = internal_strstr(env, name); + if (pos == 0) + return false; + if (pos != env && ((pos[-1] >= 'a' && pos[-1] <= 'z') || pos[-1] == '_')) { + // Seems to be middle of another flag name or value. + env = pos + 1; + continue; + } + break; + } pos += internal_strlen(name); + const char *end; if (pos[0] != '=') { end = pos; } else { @@ -38,7 +58,8 @@ static bool GetFlagValue(const char *env, const char *name, pos += 1; end = internal_strchr(pos, '\''); } else { - end = internal_strchr(pos, ' '); + // Read until the next space or colon. + end = pos + internal_strcspn(pos, " :"); } if (end == 0) end = pos + internal_strlen(pos); diff --git a/lib/sanitizer_common/sanitizer_flags.h b/lib/sanitizer_common/sanitizer_flags.h index b7ce4524b055..e97ce6a87188 100644 --- a/lib/sanitizer_common/sanitizer_flags.h +++ b/lib/sanitizer_common/sanitizer_flags.h @@ -22,6 +22,29 @@ void ParseFlag(const char *env, bool *flag, const char *name); void ParseFlag(const char *env, int *flag, const char *name); void ParseFlag(const char *env, const char **flag, const char *name); +struct CommonFlags { + // If set, use the online symbolizer from common sanitizer runtime. + bool symbolize; + // Path to external symbolizer. + const char *external_symbolizer_path; + // Strips this prefix from file paths in error reports. + const char *strip_path_prefix; + // Use fast (frame-pointer-based) unwinder on fatal errors (if available). + bool fast_unwind_on_fatal; + // Use fast (frame-pointer-based) unwinder on malloc/free (if available). + bool fast_unwind_on_malloc; + // Max number of stack frames kept for each allocation/deallocation. + int malloc_context_size; +}; + +extern CommonFlags common_flags_dont_use_directly; + +inline CommonFlags *common_flags() { + return &common_flags_dont_use_directly; +} + +void ParseCommonFlagsFromString(const char *str); + } // namespace __sanitizer #endif // SANITIZER_FLAGS_H diff --git a/lib/sanitizer_common/sanitizer_internal_defs.h b/lib/sanitizer_common/sanitizer_internal_defs.h index 7ff27338192a..9a7d374bf5ad 100644 --- a/lib/sanitizer_common/sanitizer_internal_defs.h +++ b/lib/sanitizer_common/sanitizer_internal_defs.h @@ -13,19 +13,109 @@ #ifndef SANITIZER_DEFS_H #define SANITIZER_DEFS_H -#include "sanitizer/common_interface_defs.h" +#include "sanitizer_platform.h" + +#if SANITIZER_WINDOWS +// FIXME find out what we need on Windows. __declspec(dllexport) ? +# define SANITIZER_INTERFACE_ATTRIBUTE +# define SANITIZER_WEAK_ATTRIBUTE +#elif defined(SANITIZER_GO) +# define SANITIZER_INTERFACE_ATTRIBUTE +# define SANITIZER_WEAK_ATTRIBUTE +#else +# define SANITIZER_INTERFACE_ATTRIBUTE __attribute__((visibility("default"))) +# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak)) +#endif + +#if SANITIZER_LINUX && !defined(SANITIZER_GO) +# define SANITIZER_SUPPORTS_WEAK_HOOKS 1 +#else +# define SANITIZER_SUPPORTS_WEAK_HOOKS 0 +#endif + +// GCC does not understand __has_feature +#if !defined(__has_feature) +# define __has_feature(x) 0 +#endif + +// For portability reasons we do not include stddef.h, stdint.h or any other +// system header, but we do need some basic types that are not defined +// in a portable way by the language itself. +namespace __sanitizer { + +#if defined(_WIN64) +// 64-bit Windows uses LLP64 data model. +typedef unsigned long long uptr; // NOLINT +typedef signed long long sptr; // NOLINT +#else +typedef unsigned long uptr; // NOLINT +typedef signed long sptr; // NOLINT +#endif // defined(_WIN64) +#if defined(__x86_64__) +// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use +// 64-bit pointer to unwind stack frame. +typedef unsigned long long uhwptr; // NOLINT +#else +typedef uptr uhwptr; // NOLINT +#endif +typedef unsigned char u8; +typedef unsigned short u16; // NOLINT +typedef unsigned int u32; +typedef unsigned long long u64; // NOLINT +typedef signed char s8; +typedef signed short s16; // NOLINT +typedef signed int s32; +typedef signed long long s64; // NOLINT +typedef int fd_t; + +// WARNING: OFF_T may be different from OS type off_t, depending on the value of +// _FILE_OFFSET_BITS. This definition of OFF_T matches the ABI of system calls +// like pread and mmap, as opposed to pread64 and mmap64. +// Mac and Linux/x86-64 are special. +#if SANITIZER_MAC || (SANITIZER_LINUX && defined(__x86_64__)) +typedef u64 OFF_T; +#else +typedef uptr OFF_T; +#endif +typedef u64 OFF64_T; +} // namespace __sanitizer + +extern "C" { + // Tell the tools to write their reports to "path.<pid>" instead of stderr. + void __sanitizer_set_report_path(const char *path) + SANITIZER_INTERFACE_ATTRIBUTE; + + // Tell the tools to write their reports to given file descriptor instead of + // stderr. + void __sanitizer_set_report_fd(int fd) + SANITIZER_INTERFACE_ATTRIBUTE; + + // Notify the tools that the sandbox is going to be turned on. The reserved + // parameter will be used in the future to hold a structure with functions + // that the tools may call to bypass the sandbox. + void __sanitizer_sandbox_on_notify(void *reserved) + SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; + + // This function is called by the tool when it has just finished reporting + // an error. 'error_summary' is a one-line string that summarizes + // the error message. This function can be overridden by the client. + void __sanitizer_report_error_summary(const char *error_summary) + SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE; +} // extern "C" + + using namespace __sanitizer; // NOLINT // ----------- ATTENTION ------------- // This header should NOT include any other headers to avoid portability issues. // Common defs. -#define INLINE static inline +#define INLINE inline #define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE #define WEAK SANITIZER_WEAK_ATTRIBUTE // Platform-specific defs. #if defined(_MSC_VER) -# define ALWAYS_INLINE __declspec(forceinline) +# define ALWAYS_INLINE __forceinline // FIXME(timurrrr): do we need this on Windows? # define ALIAS(x) # define ALIGNED(x) __declspec(align(x)) @@ -40,7 +130,7 @@ using namespace __sanitizer; // NOLINT # define USED # define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */ #else // _MSC_VER -# define ALWAYS_INLINE __attribute__((always_inline)) +# define ALWAYS_INLINE inline __attribute__((always_inline)) # define ALIAS(x) __attribute__((alias(x))) # define ALIGNED(x) __attribute__((aligned(x))) # define FORMAT(f, a) __attribute__((format(printf, f, a))) @@ -60,7 +150,7 @@ using namespace __sanitizer; // NOLINT # endif #endif // _MSC_VER -#if defined(_WIN32) +#if SANITIZER_WINDOWS typedef unsigned long DWORD; // NOLINT typedef DWORD thread_return_t; # define THREAD_CALLING_CONV __stdcall @@ -183,10 +273,12 @@ extern "C" void* _ReturnAddress(void); # define GET_CURRENT_FRAME() (uptr)0xDEADBEEF #endif -#define HANDLE_EINTR(res, f) { \ - do { \ - res = (f); \ - } while (res == -1 && errno == EINTR); \ +#define HANDLE_EINTR(res, f) \ + { \ + int rverrno; \ + do { \ + res = (f); \ + } while (internal_iserror(res, &rverrno) && rverrno == EINTR); \ } #endif // SANITIZER_DEFS_H diff --git a/lib/sanitizer_common/sanitizer_lfstack.h b/lib/sanitizer_common/sanitizer_lfstack.h index c26e45db8f89..088413908087 100644 --- a/lib/sanitizer_common/sanitizer_lfstack.h +++ b/lib/sanitizer_common/sanitizer_lfstack.h @@ -68,6 +68,6 @@ struct LFStack { atomic_uint64_t head_; }; -} +} // namespace __sanitizer #endif // #ifndef SANITIZER_LFSTACK_H diff --git a/lib/sanitizer_common/sanitizer_libc.cc b/lib/sanitizer_common/sanitizer_libc.cc index 349be35012dd..20c03c4474a1 100644 --- a/lib/sanitizer_common/sanitizer_libc.cc +++ b/lib/sanitizer_common/sanitizer_libc.cc @@ -206,7 +206,7 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) { } bool mem_is_zero(const char *beg, uptr size) { - CHECK_LE(size, 1UL << FIRST_32_SECOND_64(30, 40)); // Sanity check. + CHECK_LE(size, 1ULL << FIRST_32_SECOND_64(30, 40)); // Sanity check. const char *end = beg + size; uptr *aligned_beg = (uptr *)RoundUpTo((uptr)beg, sizeof(uptr)); uptr *aligned_end = (uptr *)RoundDownTo((uptr)end, sizeof(uptr)); diff --git a/lib/sanitizer_common/sanitizer_libc.h b/lib/sanitizer_common/sanitizer_libc.h index aa052c654d39..82d809a0305a 100644 --- a/lib/sanitizer_common/sanitizer_libc.h +++ b/lib/sanitizer_common/sanitizer_libc.h @@ -11,14 +11,13 @@ // run-time libraries. // These tools can not use some of the libc functions directly because those // functions are intercepted. Instead, we implement a tiny subset of libc here. -// NOTE: This file may be included into user code. //===----------------------------------------------------------------------===// #ifndef SANITIZER_LIBC_H #define SANITIZER_LIBC_H // ----------- ATTENTION ------------- // This header should NOT include any other headers from sanitizer runtime. -#include "sanitizer/common_interface_defs.h" +#include "sanitizer_internal_defs.h" namespace __sanitizer { @@ -46,6 +45,7 @@ char *internal_strrchr(const char *s, int c); char *internal_strstr(const char *haystack, const char *needle); // Works only for base=10 and doesn't set errno. s64 internal_simple_strtoll(const char *nptr, char **endptr, int base); +int internal_snprintf(char *buffer, uptr length, const char *format, ...); // Return true if all bytes in [mem, mem+size) are zero. // Optimized for the case when the result is true. @@ -53,28 +53,46 @@ bool mem_is_zero(const char *mem, uptr size); // Memory -void *internal_mmap(void *addr, uptr length, int prot, int flags, - int fd, u64 offset); -int internal_munmap(void *addr, uptr length); +uptr internal_mmap(void *addr, uptr length, int prot, int flags, + int fd, u64 offset); +uptr internal_munmap(void *addr, uptr length); // I/O -typedef int fd_t; const fd_t kInvalidFd = -1; const fd_t kStdinFd = 0; const fd_t kStdoutFd = 1; const fd_t kStderrFd = 2; -int internal_close(fd_t fd); +uptr internal_close(fd_t fd); int internal_isatty(fd_t fd); -fd_t internal_open(const char *filename, bool write); + +// Use __sanitizer::OpenFile() instead. +uptr internal_open(const char *filename, int flags); +uptr internal_open(const char *filename, int flags, u32 mode); + uptr internal_read(fd_t fd, void *buf, uptr count); uptr internal_write(fd_t fd, const void *buf, uptr count); + +// OS uptr internal_filesize(fd_t fd); // -1 on error. -int internal_dup2(int oldfd, int newfd); +uptr internal_stat(const char *path, void *buf); +uptr internal_lstat(const char *path, void *buf); +uptr internal_fstat(fd_t fd, void *buf); +uptr internal_dup2(int oldfd, int newfd); uptr internal_readlink(const char *path, char *buf, uptr bufsize); -int internal_snprintf(char *buffer, uptr length, const char *format, ...); +uptr internal_unlink(const char *path); +void NORETURN internal__exit(int exitcode); +uptr internal_lseek(fd_t fd, OFF_T offset, int whence); + +uptr internal_ptrace(int request, int pid, void *addr, void *data); +uptr internal_waitpid(int pid, int *status, int options); +uptr internal_getpid(); +uptr internal_getppid(); // Threading -int internal_sched_yield(); +uptr internal_sched_yield(); + +// Error handling +bool internal_iserror(uptr retval, int *rverrno = 0); } // namespace __sanitizer diff --git a/lib/sanitizer_common/sanitizer_linux.cc b/lib/sanitizer_common/sanitizer_linux.cc index 8b9ba38ca777..6e234e5f1e1d 100644 --- a/lib/sanitizer_common/sanitizer_linux.cc +++ b/lib/sanitizer_common/sanitizer_linux.cc @@ -11,20 +11,28 @@ // run-time libraries and implements linux-specific functions from // sanitizer_libc.h. //===----------------------------------------------------------------------===// -#ifdef __linux__ + +#include "sanitizer_platform.h" +#if SANITIZER_LINUX #include "sanitizer_common.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" +#include "sanitizer_linux.h" #include "sanitizer_mutex.h" #include "sanitizer_placement_new.h" #include "sanitizer_procmaps.h" #include "sanitizer_stacktrace.h" +#include "sanitizer_symbolizer.h" +#include <asm/param.h> +#include <dlfcn.h> +#include <errno.h> #include <fcntl.h> #include <pthread.h> #include <sched.h> #include <sys/mman.h> +#include <sys/ptrace.h> #include <sys/resource.h> #include <sys/stat.h> #include <sys/syscall.h> @@ -32,9 +40,20 @@ #include <sys/types.h> #include <unistd.h> #include <unwind.h> -#include <errno.h> -#include <sys/prctl.h> -#include <linux/futex.h> + +#if !SANITIZER_ANDROID +#include <sys/signal.h> +#endif + +// <linux/time.h> +struct kernel_timeval { + long tv_sec; + long tv_usec; +}; + +// <linux/futex.h> is broken on some linux distributions. +const int FUTEX_WAIT = 0; +const int FUTEX_WAKE = 1; // Are we using 32-bit or 64-bit syscalls? // x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32 @@ -47,130 +66,158 @@ namespace __sanitizer { +#ifdef __x86_64__ +#include "sanitizer_syscall_linux_x86_64.inc" +#else +#include "sanitizer_syscall_generic.inc" +#endif + // --------------- sanitizer_libc.h -void *internal_mmap(void *addr, uptr length, int prot, int flags, +uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd, u64 offset) { #if SANITIZER_LINUX_USES_64BIT_SYSCALLS - return (void *)syscall(__NR_mmap, addr, length, prot, flags, fd, offset); + return internal_syscall(__NR_mmap, addr, length, prot, flags, fd, offset); #else - return (void *)syscall(__NR_mmap2, addr, length, prot, flags, fd, offset); + return internal_syscall(__NR_mmap2, addr, length, prot, flags, fd, offset); #endif } -int internal_munmap(void *addr, uptr length) { - return syscall(__NR_munmap, addr, length); +uptr internal_munmap(void *addr, uptr length) { + return internal_syscall(__NR_munmap, addr, length); +} + +uptr internal_close(fd_t fd) { + return internal_syscall(__NR_close, fd); } -int internal_close(fd_t fd) { - return syscall(__NR_close, fd); +uptr internal_open(const char *filename, int flags) { + return internal_syscall(__NR_open, filename, flags); } -fd_t internal_open(const char *filename, bool write) { - return syscall(__NR_open, filename, +uptr internal_open(const char *filename, int flags, u32 mode) { + return internal_syscall(__NR_open, filename, flags, mode); +} + +uptr OpenFile(const char *filename, bool write) { + return internal_open(filename, write ? O_WRONLY | O_CREAT /*| O_CLOEXEC*/ : O_RDONLY, 0660); } uptr internal_read(fd_t fd, void *buf, uptr count) { sptr res; - HANDLE_EINTR(res, (sptr)syscall(__NR_read, fd, buf, count)); + HANDLE_EINTR(res, (sptr)internal_syscall(__NR_read, fd, buf, count)); return res; } uptr internal_write(fd_t fd, const void *buf, uptr count) { sptr res; - HANDLE_EINTR(res, (sptr)syscall(__NR_write, fd, buf, count)); + HANDLE_EINTR(res, (sptr)internal_syscall(__NR_write, fd, buf, count)); return res; } -uptr internal_filesize(fd_t fd) { +#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS +static void stat64_to_stat(struct stat64 *in, struct stat *out) { + internal_memset(out, 0, sizeof(*out)); + out->st_dev = in->st_dev; + out->st_ino = in->st_ino; + out->st_mode = in->st_mode; + out->st_nlink = in->st_nlink; + out->st_uid = in->st_uid; + out->st_gid = in->st_gid; + out->st_rdev = in->st_rdev; + out->st_size = in->st_size; + out->st_blksize = in->st_blksize; + out->st_blocks = in->st_blocks; + out->st_atime = in->st_atime; + out->st_mtime = in->st_mtime; + out->st_ctime = in->st_ctime; + out->st_ino = in->st_ino; +} +#endif + +uptr internal_stat(const char *path, void *buf) { #if SANITIZER_LINUX_USES_64BIT_SYSCALLS - struct stat st; - if (syscall(__NR_fstat, fd, &st)) - return -1; + return internal_syscall(__NR_stat, path, buf); #else - struct stat64 st; - if (syscall(__NR_fstat64, fd, &st)) - return -1; + struct stat64 buf64; + int res = internal_syscall(__NR_stat64, path, &buf64); + stat64_to_stat(&buf64, (struct stat *)buf); + return res; #endif +} + +uptr internal_lstat(const char *path, void *buf) { +#if SANITIZER_LINUX_USES_64BIT_SYSCALLS + return internal_syscall(__NR_lstat, path, buf); +#else + struct stat64 buf64; + int res = internal_syscall(__NR_lstat64, path, &buf64); + stat64_to_stat(&buf64, (struct stat *)buf); + return res; +#endif +} + +uptr internal_fstat(fd_t fd, void *buf) { +#if SANITIZER_LINUX_USES_64BIT_SYSCALLS + return internal_syscall(__NR_fstat, fd, buf); +#else + struct stat64 buf64; + int res = internal_syscall(__NR_fstat64, fd, &buf64); + stat64_to_stat(&buf64, (struct stat *)buf); + return res; +#endif +} + +uptr internal_filesize(fd_t fd) { + struct stat st; + if (internal_fstat(fd, &st)) + return -1; return (uptr)st.st_size; } -int internal_dup2(int oldfd, int newfd) { - return syscall(__NR_dup2, oldfd, newfd); +uptr internal_dup2(int oldfd, int newfd) { + return internal_syscall(__NR_dup2, oldfd, newfd); } uptr internal_readlink(const char *path, char *buf, uptr bufsize) { - return (uptr)syscall(__NR_readlink, path, buf, bufsize); + return internal_syscall(__NR_readlink, path, buf, bufsize); +} + +uptr internal_unlink(const char *path) { + return internal_syscall(__NR_unlink, path); } -int internal_sched_yield() { - return syscall(__NR_sched_yield); +uptr internal_sched_yield() { + return internal_syscall(__NR_sched_yield); +} + +void internal__exit(int exitcode) { + internal_syscall(__NR_exit_group, exitcode); + Die(); // Unreachable. +} + +uptr internal_execve(const char *filename, char *const argv[], + char *const envp[]) { + return internal_syscall(__NR_execve, filename, argv, envp); } // ----------------- sanitizer_common.h bool FileExists(const char *filename) { -#if SANITIZER_LINUX_USES_64BIT_SYSCALLS struct stat st; - if (syscall(__NR_stat, filename, &st)) + if (internal_stat(filename, &st)) return false; -#else - struct stat64 st; - if (syscall(__NR_stat64, filename, &st)) - return false; -#endif // Sanity check: filename is a regular file. return S_ISREG(st.st_mode); } uptr GetTid() { - return syscall(__NR_gettid); -} - -void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, - uptr *stack_bottom) { - static const uptr kMaxThreadStackSize = 256 * (1 << 20); // 256M - CHECK(stack_top); - CHECK(stack_bottom); - if (at_initialization) { - // This is the main thread. Libpthread may not be initialized yet. - struct rlimit rl; - CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0); - - // Find the mapping that contains a stack variable. - MemoryMappingLayout proc_maps; - uptr start, end, offset; - uptr prev_end = 0; - while (proc_maps.Next(&start, &end, &offset, 0, 0)) { - if ((uptr)&rl < end) - break; - prev_end = end; - } - CHECK((uptr)&rl >= start && (uptr)&rl < end); - - // Get stacksize from rlimit, but clip it so that it does not overlap - // with other mappings. - uptr stacksize = rl.rlim_cur; - if (stacksize > end - prev_end) - stacksize = end - prev_end; - // When running with unlimited stack size, we still want to set some limit. - // The unlimited stack size is caused by 'ulimit -s unlimited'. - // Also, for some reason, GNU make spawns subprocesses with unlimited stack. - if (stacksize > kMaxThreadStackSize) - stacksize = kMaxThreadStackSize; - *stack_top = end; - *stack_bottom = end - stacksize; - return; - } - pthread_attr_t attr; - CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0); - uptr stacksize = 0; - void *stackaddr = 0; - pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize); - pthread_attr_destroy(&attr); + return internal_syscall(__NR_gettid); +} - *stack_top = (uptr)stackaddr + stacksize; - *stack_bottom = (uptr)stackaddr; - CHECK(stacksize < kMaxThreadStackSize); // Sanity check. +u64 NanoTime() { + kernel_timeval tv; + internal_syscall(__NR_gettimeofday, &tv, 0); + return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000; } // Like getenv, but reads env directly from /proc and does not use libc. @@ -201,6 +248,11 @@ const char *GetEnv(const char *name) { return 0; // Not found. } +extern "C" { + extern void *__libc_stack_end SANITIZER_WEAK_ATTRIBUTE; +} + +#if !SANITIZER_GO static void ReadNullSepFileToArray(const char *path, char ***arr, int arr_size) { char *buff; @@ -219,13 +271,33 @@ static void ReadNullSepFileToArray(const char *path, char ***arr, } (*arr)[count] = 0; } +#endif + +static void GetArgsAndEnv(char*** argv, char*** envp) { +#if !SANITIZER_GO + if (&__libc_stack_end) { +#endif + uptr* stack_end = (uptr*)__libc_stack_end; + int argc = *stack_end; + *argv = (char**)(stack_end + 1); + *envp = (char**)(stack_end + argc + 2); +#if !SANITIZER_GO + } else { + static const int kMaxArgv = 2000, kMaxEnvp = 2000; + ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv); + ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp); + } +#endif +} void ReExec() { - static const int kMaxArgv = 100, kMaxEnvp = 1000; char **argv, **envp; - ReadNullSepFileToArray("/proc/self/cmdline", &argv, kMaxArgv); - ReadNullSepFileToArray("/proc/self/environ", &envp, kMaxEnvp); - execve(argv[0], argv, envp); + GetArgsAndEnv(&argv, &envp); + uptr rv = internal_execve("/proc/self/exe", argv, envp); + int rverrno; + CHECK_EQ(internal_iserror(rv, &rverrno), true); + Printf("execve failed, errno %d\n", rverrno); + Die(); } void PrepareForSandboxing() { @@ -234,6 +306,8 @@ void PrepareForSandboxing() { // process will be able to load additional libraries, so it's fine to use the // cached mappings. MemoryMappingLayout::CacheMemoryMappings(); + // Same for /proc/self/exe in the symbolizer. + SymbolizerPrepareForSandboxing(); } // ----------------- sanitizer_procmaps.h @@ -241,18 +315,22 @@ void PrepareForSandboxing() { ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_; StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized. -MemoryMappingLayout::MemoryMappingLayout() { +MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) { proc_self_maps_.len = ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data, &proc_self_maps_.mmaped_size, 1 << 26); - if (proc_self_maps_.mmaped_size == 0) { - LoadFromCache(); - CHECK_GT(proc_self_maps_.len, 0); + if (cache_enabled) { + if (proc_self_maps_.mmaped_size == 0) { + LoadFromCache(); + CHECK_GT(proc_self_maps_.len, 0); + } + } else { + CHECK_GT(proc_self_maps_.mmaped_size, 0); } - // internal_write(2, proc_self_maps_.data, proc_self_maps_.len); Reset(); // FIXME: in the future we may want to cache the mappings on demand only. - CacheMemoryMappings(); + if (cache_enabled) + CacheMemoryMappings(); } MemoryMappingLayout::~MemoryMappingLayout() { @@ -314,7 +392,7 @@ static uptr ParseHex(char **str) { return x; } -static bool IsOnOf(char c, char c1, char c2) { +static bool IsOneOf(char c, char c1, char c2) { return c == c1 || c == c2; } @@ -323,7 +401,8 @@ static bool IsDecimal(char c) { } bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset, - char filename[], uptr filename_size) { + char filename[], uptr filename_size, + uptr *protection) { char *last = proc_self_maps_.data + proc_self_maps_.len; if (current_ >= last) return false; uptr dummy; @@ -338,10 +417,22 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset, CHECK_EQ(*current_++, '-'); *end = ParseHex(¤t_); CHECK_EQ(*current_++, ' '); - CHECK(IsOnOf(*current_++, '-', 'r')); - CHECK(IsOnOf(*current_++, '-', 'w')); - CHECK(IsOnOf(*current_++, '-', 'x')); - CHECK(IsOnOf(*current_++, 's', 'p')); + uptr local_protection = 0; + CHECK(IsOneOf(*current_, '-', 'r')); + if (*current_++ == 'r') + local_protection |= kProtectionRead; + CHECK(IsOneOf(*current_, '-', 'w')); + if (*current_++ == 'w') + local_protection |= kProtectionWrite; + CHECK(IsOneOf(*current_, '-', 'x')); + if (*current_++ == 'x') + local_protection |= kProtectionExecute; + CHECK(IsOneOf(*current_, 's', 'p')); + if (*current_++ == 's') + local_protection |= kProtectionShared; + if (protection) { + *protection = local_protection; + } CHECK_EQ(*current_++, ' '); *offset = ParseHex(¤t_); CHECK_EQ(*current_++, ' '); @@ -371,87 +462,12 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset, // Gets the object name and the offset by walking MemoryMappingLayout. bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset, char filename[], - uptr filename_size) { - return IterateForObjectNameAndOffset(addr, offset, filename, filename_size); + uptr filename_size, + uptr *protection) { + return IterateForObjectNameAndOffset(addr, offset, filename, filename_size, + protection); } -bool SanitizerSetThreadName(const char *name) { -#ifdef PR_SET_NAME - return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT -#else - return false; -#endif -} - -bool SanitizerGetThreadName(char *name, int max_len) { -#ifdef PR_GET_NAME - char buff[17]; - if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT - return false; - internal_strncpy(name, buff, max_len); - name[max_len] = 0; - return true; -#else - return false; -#endif -} - -#ifndef SANITIZER_GO -//------------------------- SlowUnwindStack ----------------------------------- -#ifdef __arm__ -#define UNWIND_STOP _URC_END_OF_STACK -#define UNWIND_CONTINUE _URC_NO_REASON -#else -#define UNWIND_STOP _URC_NORMAL_STOP -#define UNWIND_CONTINUE _URC_NO_REASON -#endif - -uptr Unwind_GetIP(struct _Unwind_Context *ctx) { -#ifdef __arm__ - uptr val; - _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE, - 15 /* r15 = PC */, _UVRSD_UINT32, &val); - CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed"); - // Clear the Thumb bit. - return val & ~(uptr)1; -#else - return _Unwind_GetIP(ctx); -#endif -} - -_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) { - StackTrace *b = (StackTrace*)param; - CHECK(b->size < b->max_size); - uptr pc = Unwind_GetIP(ctx); - b->trace[b->size++] = pc; - if (b->size == b->max_size) return UNWIND_STOP; - return UNWIND_CONTINUE; -} - -static bool MatchPc(uptr cur_pc, uptr trace_pc) { - return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64; -} - -void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { - this->size = 0; - this->max_size = max_depth; - if (max_depth > 1) { - _Unwind_Backtrace(Unwind_Trace, this); - // We need to pop a few frames so that pc is on top. - // trace[0] belongs to the current function so we always pop it. - int to_pop = 1; - /**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1; - else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2; - else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3; - else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4; - else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5; - this->PopStackFrames(to_pop); - } - this->trace[0] = pc; -} - -#endif // #ifndef SANITIZER_GO - enum MutexState { MtxUnlocked = 0, MtxLocked = 1, @@ -462,12 +478,16 @@ BlockingMutex::BlockingMutex(LinkerInitialized) { CHECK_EQ(owner_, 0); } +BlockingMutex::BlockingMutex() { + internal_memset(this, 0, sizeof(*this)); +} + void BlockingMutex::Lock() { atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked) return; while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) - syscall(__NR_futex, m, FUTEX_WAIT, MtxSleeping, 0, 0, 0); + internal_syscall(__NR_futex, m, FUTEX_WAIT, MtxSleeping, 0, 0, 0); } void BlockingMutex::Unlock() { @@ -475,9 +495,147 @@ void BlockingMutex::Unlock() { u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed); CHECK_NE(v, MtxUnlocked); if (v == MtxSleeping) - syscall(__NR_futex, m, FUTEX_WAKE, 1, 0, 0, 0); + internal_syscall(__NR_futex, m, FUTEX_WAKE, 1, 0, 0, 0); +} + +void BlockingMutex::CheckLocked() { + atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); + CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed)); +} + +// ----------------- sanitizer_linux.h +// The actual size of this structure is specified by d_reclen. +// Note that getdents64 uses a different structure format. We only provide the +// 32-bit syscall here. +struct linux_dirent { + unsigned long d_ino; + unsigned long d_off; + unsigned short d_reclen; + char d_name[256]; +}; + +// Syscall wrappers. +uptr internal_ptrace(int request, int pid, void *addr, void *data) { + return internal_syscall(__NR_ptrace, request, pid, addr, data); +} + +uptr internal_waitpid(int pid, int *status, int options) { + return internal_syscall(__NR_wait4, pid, status, options, 0 /* rusage */); +} + +uptr internal_getpid() { + return internal_syscall(__NR_getpid); +} + +uptr internal_getppid() { + return internal_syscall(__NR_getppid); +} + +uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) { + return internal_syscall(__NR_getdents, fd, dirp, count); +} + +uptr internal_lseek(fd_t fd, OFF_T offset, int whence) { + return internal_syscall(__NR_lseek, fd, offset, whence); +} + +uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) { + return internal_syscall(__NR_prctl, option, arg2, arg3, arg4, arg5); +} + +uptr internal_sigaltstack(const struct sigaltstack *ss, + struct sigaltstack *oss) { + return internal_syscall(__NR_sigaltstack, ss, oss); +} + +// ThreadLister implementation. +ThreadLister::ThreadLister(int pid) + : pid_(pid), + descriptor_(-1), + buffer_(4096), + error_(true), + entry_((struct linux_dirent *)buffer_.data()), + bytes_read_(0) { + char task_directory_path[80]; + internal_snprintf(task_directory_path, sizeof(task_directory_path), + "/proc/%d/task/", pid); + uptr openrv = internal_open(task_directory_path, O_RDONLY | O_DIRECTORY); + if (internal_iserror(openrv)) { + error_ = true; + Report("Can't open /proc/%d/task for reading.\n", pid); + } else { + error_ = false; + descriptor_ = openrv; + } +} + +int ThreadLister::GetNextTID() { + int tid = -1; + do { + if (error_) + return -1; + if ((char *)entry_ >= &buffer_[bytes_read_] && !GetDirectoryEntries()) + return -1; + if (entry_->d_ino != 0 && entry_->d_name[0] >= '0' && + entry_->d_name[0] <= '9') { + // Found a valid tid. + tid = (int)internal_atoll(entry_->d_name); + } + entry_ = (struct linux_dirent *)(((char *)entry_) + entry_->d_reclen); + } while (tid < 0); + return tid; +} + +void ThreadLister::Reset() { + if (error_ || descriptor_ < 0) + return; + internal_lseek(descriptor_, 0, SEEK_SET); +} + +ThreadLister::~ThreadLister() { + if (descriptor_ >= 0) + internal_close(descriptor_); +} + +bool ThreadLister::error() { return error_; } + +bool ThreadLister::GetDirectoryEntries() { + CHECK_GE(descriptor_, 0); + CHECK_NE(error_, true); + bytes_read_ = internal_getdents(descriptor_, + (struct linux_dirent *)buffer_.data(), + buffer_.size()); + if (internal_iserror(bytes_read_)) { + Report("Can't read directory entries from /proc/%d/task.\n", pid_); + error_ = true; + return false; + } else if (bytes_read_ == 0) { + return false; + } + entry_ = (struct linux_dirent *)buffer_.data(); + return true; +} + +uptr GetPageSize() { +#if defined(__x86_64__) || defined(__i386__) + return EXEC_PAGESIZE; +#else + return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy. +#endif +} + +// Match full names of the form /path/to/base_name{-,.}* +bool LibraryNameIs(const char *full_name, const char *base_name) { + const char *name = full_name; + // Strip path. + while (*name != '\0') name++; + while (name > full_name && *name != '/') name--; + if (*name == '/') name++; + uptr base_name_length = internal_strlen(base_name); + if (internal_strncmp(name, base_name, base_name_length)) return false; + return (name[base_name_length] == '-' || name[base_name_length] == '.'); } } // namespace __sanitizer -#endif // __linux__ +#endif // SANITIZER_LINUX diff --git a/lib/sanitizer_common/sanitizer_linux.h b/lib/sanitizer_common/sanitizer_linux.h new file mode 100644 index 000000000000..ba68e6c2dd5a --- /dev/null +++ b/lib/sanitizer_common/sanitizer_linux.h @@ -0,0 +1,65 @@ +//===-- sanitizer_linux.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Linux-specific syscall wrappers and classes. +// +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_LINUX_H +#define SANITIZER_LINUX_H + +#include "sanitizer_common.h" +#include "sanitizer_internal_defs.h" + +struct sigaltstack; + +namespace __sanitizer { +// Dirent structure for getdents(). Note that this structure is different from +// the one in <dirent.h>, which is used by readdir(). +struct linux_dirent; + +// Syscall wrappers. +uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count); +uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5); +uptr internal_sigaltstack(const struct sigaltstack* ss, + struct sigaltstack* oss); + +// This class reads thread IDs from /proc/<pid>/task using only syscalls. +class ThreadLister { + public: + explicit ThreadLister(int pid); + ~ThreadLister(); + // GetNextTID returns -1 if the list of threads is exhausted, or if there has + // been an error. + int GetNextTID(); + void Reset(); + bool error(); + + private: + bool GetDirectoryEntries(); + + int pid_; + int descriptor_; + InternalScopedBuffer<char> buffer_; + bool error_; + struct linux_dirent* entry_; + int bytes_read_; +}; + +void AdjustStackSizeLinux(void *attr, int verbosity); + +// Exposed for testing. +uptr ThreadDescriptorSize(); + +// Matches a library's file name against a base name (stripping path and version +// information). +bool LibraryNameIs(const char *full_name, const char *base_name); + +} // namespace __sanitizer + +#endif // SANITIZER_LINUX_H diff --git a/lib/sanitizer_common/sanitizer_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_linux_libcdep.cc new file mode 100644 index 000000000000..d9e2f5389606 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_linux_libcdep.cc @@ -0,0 +1,273 @@ +//===-- sanitizer_linux_libcdep.cc ----------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between AddressSanitizer and ThreadSanitizer +// run-time libraries and implements linux-specific functions from +// sanitizer_libc.h. +//===----------------------------------------------------------------------===// + +#include "sanitizer_platform.h" +#if SANITIZER_LINUX + +#include "sanitizer_common.h" +#include "sanitizer_procmaps.h" +#include "sanitizer_stacktrace.h" + +#ifdef __x86_64__ +#include <asm/prctl.h> +#endif +#include <dlfcn.h> +#include <pthread.h> +#include <sys/prctl.h> +#include <sys/resource.h> +#include <unwind.h> + +#ifdef __x86_64__ +extern "C" int arch_prctl(int code, __sanitizer::uptr *addr); +#endif + +namespace __sanitizer { + +void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, + uptr *stack_bottom) { + static const uptr kMaxThreadStackSize = 256 * (1 << 20); // 256M + CHECK(stack_top); + CHECK(stack_bottom); + if (at_initialization) { + // This is the main thread. Libpthread may not be initialized yet. + struct rlimit rl; + CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0); + + // Find the mapping that contains a stack variable. + MemoryMappingLayout proc_maps(/*cache_enabled*/true); + uptr start, end, offset; + uptr prev_end = 0; + while (proc_maps.Next(&start, &end, &offset, 0, 0, /* protection */0)) { + if ((uptr)&rl < end) + break; + prev_end = end; + } + CHECK((uptr)&rl >= start && (uptr)&rl < end); + + // Get stacksize from rlimit, but clip it so that it does not overlap + // with other mappings. + uptr stacksize = rl.rlim_cur; + if (stacksize > end - prev_end) + stacksize = end - prev_end; + // When running with unlimited stack size, we still want to set some limit. + // The unlimited stack size is caused by 'ulimit -s unlimited'. + // Also, for some reason, GNU make spawns subprocesses with unlimited stack. + if (stacksize > kMaxThreadStackSize) + stacksize = kMaxThreadStackSize; + *stack_top = end; + *stack_bottom = end - stacksize; + return; + } + pthread_attr_t attr; + CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0); + uptr stacksize = 0; + void *stackaddr = 0; + pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize); + pthread_attr_destroy(&attr); + + *stack_top = (uptr)stackaddr + stacksize; + *stack_bottom = (uptr)stackaddr; + CHECK(stacksize < kMaxThreadStackSize); // Sanity check. +} + +// Does not compile for Go because dlsym() requires -ldl +#ifndef SANITIZER_GO +bool SetEnv(const char *name, const char *value) { + void *f = dlsym(RTLD_NEXT, "setenv"); + if (f == 0) + return false; + typedef int(*setenv_ft)(const char *name, const char *value, int overwrite); + setenv_ft setenv_f; + CHECK_EQ(sizeof(setenv_f), sizeof(f)); + internal_memcpy(&setenv_f, &f, sizeof(f)); + return setenv_f(name, value, 1) == 0; +} +#endif + +bool SanitizerSetThreadName(const char *name) { +#ifdef PR_SET_NAME + return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT +#else + return false; +#endif +} + +bool SanitizerGetThreadName(char *name, int max_len) { +#ifdef PR_GET_NAME + char buff[17]; + if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT + return false; + internal_strncpy(name, buff, max_len); + name[max_len] = 0; + return true; +#else + return false; +#endif +} + +#ifndef SANITIZER_GO +//------------------------- SlowUnwindStack ----------------------------------- +#ifdef __arm__ +#define UNWIND_STOP _URC_END_OF_STACK +#define UNWIND_CONTINUE _URC_NO_REASON +#else +#define UNWIND_STOP _URC_NORMAL_STOP +#define UNWIND_CONTINUE _URC_NO_REASON +#endif + +uptr Unwind_GetIP(struct _Unwind_Context *ctx) { +#ifdef __arm__ + uptr val; + _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE, + 15 /* r15 = PC */, _UVRSD_UINT32, &val); + CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed"); + // Clear the Thumb bit. + return val & ~(uptr)1; +#else + return _Unwind_GetIP(ctx); +#endif +} + +_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) { + StackTrace *b = (StackTrace*)param; + CHECK(b->size < b->max_size); + uptr pc = Unwind_GetIP(ctx); + b->trace[b->size++] = pc; + if (b->size == b->max_size) return UNWIND_STOP; + return UNWIND_CONTINUE; +} + +static bool MatchPc(uptr cur_pc, uptr trace_pc) { + return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64; +} + +void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { + this->size = 0; + this->max_size = max_depth; + if (max_depth > 1) { + _Unwind_Backtrace(Unwind_Trace, this); + // We need to pop a few frames so that pc is on top. + // trace[0] belongs to the current function so we always pop it. + int to_pop = 1; + /**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1; + else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2; + else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3; + else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4; + else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5; + this->PopStackFrames(to_pop); + } + this->trace[0] = pc; +} + +#endif // !SANITIZER_GO + +static uptr g_tls_size; + +#ifdef __i386__ +# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall)) +#else +# define DL_INTERNAL_FUNCTION +#endif + +void InitTlsSize() { +#if !defined(SANITIZER_GO) && !SANITIZER_ANDROID + typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION; + get_tls_func get_tls; + void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info"); + CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr)); + internal_memcpy(&get_tls, &get_tls_static_info_ptr, + sizeof(get_tls_static_info_ptr)); + CHECK_NE(get_tls, 0); + size_t tls_size = 0; + size_t tls_align = 0; + get_tls(&tls_size, &tls_align); + g_tls_size = tls_size; +#endif +} + +uptr GetTlsSize() { + return g_tls_size; +} + +// sizeof(struct thread) from glibc. +#ifdef __x86_64__ +const uptr kThreadDescriptorSize = 2304; + +uptr ThreadDescriptorSize() { + return kThreadDescriptorSize; +} +#endif + +void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, + uptr *tls_addr, uptr *tls_size) { +#ifndef SANITIZER_GO +#ifdef __x86_64__ + arch_prctl(ARCH_GET_FS, tls_addr); + *tls_size = GetTlsSize(); + *tls_addr -= *tls_size; + *tls_addr += kThreadDescriptorSize; +#else + *tls_addr = 0; + *tls_size = 0; +#endif + + uptr stack_top, stack_bottom; + GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); + *stk_addr = stack_bottom; + *stk_size = stack_top - stack_bottom; + + if (!main) { + // If stack and tls intersect, make them non-intersecting. + if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) { + CHECK_GT(*tls_addr + *tls_size, *stk_addr); + CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size); + *stk_size -= *tls_size; + *tls_addr = *stk_addr + *stk_size; + } + } +#else // SANITIZER_GO + *stk_addr = 0; + *stk_size = 0; + *tls_addr = 0; + *tls_size = 0; +#endif // SANITIZER_GO +} + +void AdjustStackSizeLinux(void *attr_, int verbosity) { + pthread_attr_t *attr = (pthread_attr_t *)attr_; + uptr stackaddr = 0; + size_t stacksize = 0; + pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize); + // GLibC will return (0 - stacksize) as the stack address in the case when + // stacksize is set, but stackaddr is not. + bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0); + // We place a lot of tool data into TLS, account for that. + const uptr minstacksize = GetTlsSize() + 128*1024; + if (stacksize < minstacksize) { + if (!stack_set) { + if (verbosity && stacksize != 0) + Printf("Sanitizer: increasing stacksize %zu->%zu\n", stacksize, + minstacksize); + pthread_attr_setstacksize(attr, minstacksize); + } else { + Printf("Sanitizer: pre-allocated stack size is insufficient: " + "%zu < %zu\n", stacksize, minstacksize); + Printf("Sanitizer: pthread_create is likely to fail.\n"); + } + } +} + +} // namespace __sanitizer + +#endif // SANITIZER_LINUX diff --git a/lib/sanitizer_common/sanitizer_mac.cc b/lib/sanitizer_common/sanitizer_mac.cc index c4b8e4c2bcf2..f97d1e39c5bf 100644 --- a/lib/sanitizer_common/sanitizer_mac.cc +++ b/lib/sanitizer_common/sanitizer_mac.cc @@ -12,7 +12,15 @@ // sanitizer_libc.h. //===----------------------------------------------------------------------===// -#ifdef __APPLE__ +#include "sanitizer_platform.h" +#if SANITIZER_MAC + +// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so +// the clients will most certainly use 64-bit ones as well. +#ifndef _DARWIN_USE_64_BIT_INODE +#define _DARWIN_USE_64_BIT_INODE 1 +#endif +#include <stdio.h> #include "sanitizer_common.h" #include "sanitizer_internal_defs.h" @@ -31,26 +39,37 @@ #include <sys/types.h> #include <unistd.h> #include <libkern/OSAtomic.h> +#include <errno.h> namespace __sanitizer { +#include "sanitizer_syscall_generic.inc" + // ---------------------- sanitizer_libc.h -void *internal_mmap(void *addr, size_t length, int prot, int flags, - int fd, u64 offset) { - return mmap(addr, length, prot, flags, fd, offset); +uptr internal_mmap(void *addr, size_t length, int prot, int flags, + int fd, u64 offset) { + return (uptr)mmap(addr, length, prot, flags, fd, offset); } -int internal_munmap(void *addr, uptr length) { +uptr internal_munmap(void *addr, uptr length) { return munmap(addr, length); } -int internal_close(fd_t fd) { +uptr internal_close(fd_t fd) { return close(fd); } -fd_t internal_open(const char *filename, bool write) { - return open(filename, - write ? O_WRONLY | O_CREAT : O_RDONLY, 0660); +uptr internal_open(const char *filename, int flags) { + return open(filename, flags); +} + +uptr internal_open(const char *filename, int flags, u32 mode) { + return open(filename, flags, mode); +} + +uptr OpenFile(const char *filename, bool write) { + return internal_open(filename, + write ? O_WRONLY | O_CREAT : O_RDONLY, 0660); } uptr internal_read(fd_t fd, void *buf, uptr count) { @@ -61,14 +80,26 @@ uptr internal_write(fd_t fd, const void *buf, uptr count) { return write(fd, buf, count); } +uptr internal_stat(const char *path, void *buf) { + return stat(path, (struct stat *)buf); +} + +uptr internal_lstat(const char *path, void *buf) { + return lstat(path, (struct stat *)buf); +} + +uptr internal_fstat(fd_t fd, void *buf) { + return fstat(fd, (struct stat *)buf); +} + uptr internal_filesize(fd_t fd) { struct stat st; - if (fstat(fd, &st)) + if (internal_fstat(fd, &st)) return -1; return (uptr)st.st_size; } -int internal_dup2(int oldfd, int newfd) { +uptr internal_dup2(int oldfd, int newfd) { return dup2(oldfd, newfd); } @@ -76,10 +107,18 @@ uptr internal_readlink(const char *path, char *buf, uptr bufsize) { return readlink(path, buf, bufsize); } -int internal_sched_yield() { +uptr internal_sched_yield() { return sched_yield(); } +void internal__exit(int exitcode) { + _exit(exitcode); +} + +uptr internal_getpid() { + return getpid(); +} + // ----------------- sanitizer_common.h bool FileExists(const char *filename) { struct stat st; @@ -131,9 +170,13 @@ void PrepareForSandboxing() { // Nothing here for now. } +uptr GetPageSize() { + return sysconf(_SC_PAGESIZE); +} + // ----------------- sanitizer_procmaps.h -MemoryMappingLayout::MemoryMappingLayout() { +MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) { Reset(); } @@ -186,7 +229,9 @@ void MemoryMappingLayout::LoadFromCache() { template<u32 kLCSegment, typename SegmentCommand> bool MemoryMappingLayout::NextSegmentLoad( uptr *start, uptr *end, uptr *offset, - char filename[], uptr filename_size) { + char filename[], uptr filename_size, uptr *protection) { + if (protection) + UNIMPLEMENTED(); const char* lc = current_load_cmd_addr_; current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize; if (((const load_command *)lc)->cmd == kLCSegment) { @@ -211,7 +256,8 @@ bool MemoryMappingLayout::NextSegmentLoad( } bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset, - char filename[], uptr filename_size) { + char filename[], uptr filename_size, + uptr *protection) { for (; current_image_ >= 0; current_image_--) { const mach_header* hdr = _dyld_get_image_header(current_image_); if (!hdr) continue; @@ -243,14 +289,14 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset, #ifdef MH_MAGIC_64 case MH_MAGIC_64: { if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>( - start, end, offset, filename, filename_size)) + start, end, offset, filename, filename_size, protection)) return true; break; } #endif case MH_MAGIC: { if (NextSegmentLoad<LC_SEGMENT, struct segment_command>( - start, end, offset, filename, filename_size)) + start, end, offset, filename, filename_size, protection)) return true; break; } @@ -264,18 +310,24 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset, bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset, char filename[], - uptr filename_size) { - return IterateForObjectNameAndOffset(addr, offset, filename, filename_size); + uptr filename_size, + uptr *protection) { + return IterateForObjectNameAndOffset(addr, offset, filename, filename_size, + protection); } BlockingMutex::BlockingMutex(LinkerInitialized) { // We assume that OS_SPINLOCK_INIT is zero } +BlockingMutex::BlockingMutex() { + internal_memset(this, 0, sizeof(*this)); +} + void BlockingMutex::Lock() { CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_)); - CHECK(OS_SPINLOCK_INIT == 0); - CHECK(owner_ != (uptr)pthread_self()); + CHECK_EQ(OS_SPINLOCK_INIT, 0); + CHECK_NE(owner_, (uptr)pthread_self()); OSSpinLockLock((OSSpinLock*)&opaque_storage_); CHECK(!owner_); owner_ = (uptr)pthread_self(); @@ -287,6 +339,27 @@ void BlockingMutex::Unlock() { OSSpinLockUnlock((OSSpinLock*)&opaque_storage_); } +void BlockingMutex::CheckLocked() { + CHECK_EQ((uptr)pthread_self(), owner_); +} + +uptr GetTlsSize() { + return 0; +} + +void InitTlsSize() { +} + +void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, + uptr *tls_addr, uptr *tls_size) { + uptr stack_top, stack_bottom; + GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); + *stk_addr = stack_bottom; + *stk_size = stack_top - stack_bottom; + *tls_addr = 0; + *tls_size = 0; +} + } // namespace __sanitizer -#endif // __APPLE__ +#endif // SANITIZER_MAC diff --git a/lib/sanitizer_common/sanitizer_mutex.h b/lib/sanitizer_common/sanitizer_mutex.h index 56438fce471c..469981c35176 100644 --- a/lib/sanitizer_common/sanitizer_mutex.h +++ b/lib/sanitizer_common/sanitizer_mutex.h @@ -70,8 +70,10 @@ class SpinMutex : public StaticSpinMutex { class BlockingMutex { public: explicit BlockingMutex(LinkerInitialized); + BlockingMutex(); void Lock(); void Unlock(); + void CheckLocked(); private: uptr opaque_storage_[10]; uptr owner_; // for debugging diff --git a/lib/sanitizer_common/sanitizer_placement_new.h b/lib/sanitizer_common/sanitizer_placement_new.h index c0b85e1c1717..a42301aedeac 100644 --- a/lib/sanitizer_common/sanitizer_placement_new.h +++ b/lib/sanitizer_common/sanitizer_placement_new.h @@ -19,7 +19,7 @@ #include "sanitizer_internal_defs.h" namespace __sanitizer { -#if (SANITIZER_WORDSIZE == 64) || defined(__APPLE__) +#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC typedef uptr operator_new_ptr_type; #else typedef u32 operator_new_ptr_type; diff --git a/lib/sanitizer_common/sanitizer_platform.h b/lib/sanitizer_common/sanitizer_platform.h new file mode 100644 index 000000000000..acb997180957 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_platform.h @@ -0,0 +1,46 @@ +//===-- sanitizer_platform.h ------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Common platform macros. +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_PLATFORM_H +#define SANITIZER_PLATFORM_H + +#if !defined(__linux__) && !defined(__APPLE__) && !defined(_WIN32) +# error "This operating system is not supported" +#endif + +#if defined(__linux__) +# define SANITIZER_LINUX 1 +#else +# define SANITIZER_LINUX 0 +#endif + +#if defined(__APPLE__) +# define SANITIZER_MAC 1 +#else +# define SANITIZER_MAC 0 +#endif + +#if defined(_WIN32) +# define SANITIZER_WINDOWS 1 +#else +# define SANITIZER_WINDOWS 0 +#endif + +#if defined(__ANDROID__) || defined(ANDROID) +# define SANITIZER_ANDROID 1 +#else +# define SANITIZER_ANDROID 0 +#endif + +#define SANITIZER_POSIX (SANITIZER_LINUX || SANITIZER_MAC) + +#endif // SANITIZER_PLATFORM_H diff --git a/lib/sanitizer_common/sanitizer_platform_interceptors.h b/lib/sanitizer_common/sanitizer_platform_interceptors.h index abd41fe8c997..60c7145b611a 100644 --- a/lib/sanitizer_common/sanitizer_platform_interceptors.h +++ b/lib/sanitizer_common/sanitizer_platform_interceptors.h @@ -11,21 +11,38 @@ // given library functions on a given platform. // //===----------------------------------------------------------------------===// +#ifndef SANITIZER_PLATFORM_INTERCEPTORS_H +#define SANITIZER_PLATFORM_INTERCEPTORS_H #include "sanitizer_internal_defs.h" -#if !defined(_WIN32) +#if !SANITIZER_WINDOWS # define SI_NOT_WINDOWS 1 +# include "sanitizer_platform_limits_posix.h" #else # define SI_NOT_WINDOWS 0 #endif -#if defined(__linux__) && !defined(ANDROID) +#if SANITIZER_LINUX && !SANITIZER_ANDROID # define SI_LINUX_NOT_ANDROID 1 #else # define SI_LINUX_NOT_ANDROID 0 #endif +#if SANITIZER_LINUX +# define SI_LINUX 1 +#else +# define SI_LINUX 0 +#endif + +#if SANITIZER_MAC +# define SI_MAC 1 +#else +# define SI_MAC 0 +#endif + +# define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS + # define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS # define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS # define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS @@ -33,6 +50,30 @@ # define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID # define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID -# define SANITIZER_INTERCEPT_PRCTL SI_LINUX_NOT_ANDROID +# define SANITIZER_INTERCEPT_PRCTL SI_LINUX + +# define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS + +# define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS +# define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX + +# define SANITIZER_INTERCEPT_FREXP 1 +# define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_NOT_WINDOWS + +# define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS +# define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \ + SI_MAC || SI_LINUX_NOT_ANDROID +# define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_LINUX +# define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS +# define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS +# define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID +# define SANITIZER_INTERCEPT_WAIT SI_NOT_WINDOWS +# define SANITIZER_INTERCEPT_INET SI_NOT_WINDOWS +# define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_NOT_WINDOWS +# define SANITIZER_INTERCEPT_GETADDRINFO SI_NOT_WINDOWS +# define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS +# define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS +# define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_LINUX +# define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS -# define SANITIZER_INTERCEPT_SCANF 1 +#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H diff --git a/lib/sanitizer_common/sanitizer_platform_limits_posix.cc b/lib/sanitizer_common/sanitizer_platform_limits_posix.cc new file mode 100644 index 000000000000..c269de65ca2c --- /dev/null +++ b/lib/sanitizer_common/sanitizer_platform_limits_posix.cc @@ -0,0 +1,155 @@ +//===-- sanitizer_platform_limits_posix.cc --------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of Sanitizer common code. +// +// Sizes and layouts of platform-specific POSIX data structures. +//===----------------------------------------------------------------------===// + + +#include "sanitizer_platform.h" +#if SANITIZER_LINUX || SANITIZER_MAC + +#include "sanitizer_internal_defs.h" +#include "sanitizer_platform_limits_posix.h" + +#include <arpa/inet.h> +#include <dirent.h> +#include <grp.h> +#include <pthread.h> +#include <pwd.h> +#include <signal.h> +#include <stddef.h> +#include <sys/utsname.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <sys/socket.h> +#include <sys/time.h> +#include <sys/resource.h> +#include <sys/socket.h> +#include <netdb.h> +#include <time.h> + +#if !SANITIZER_ANDROID +#include <sys/ucontext.h> +#endif // !SANITIZER_ANDROID + +#if SANITIZER_LINUX +#include <link.h> +#include <sys/vfs.h> +#include <sys/epoll.h> +#endif // SANITIZER_LINUX + +namespace __sanitizer { + unsigned struct_utsname_sz = sizeof(struct utsname); + unsigned struct_stat_sz = sizeof(struct stat); + unsigned struct_stat64_sz = sizeof(struct stat64); + unsigned struct_rusage_sz = sizeof(struct rusage); + unsigned struct_tm_sz = sizeof(struct tm); + unsigned struct_passwd_sz = sizeof(struct passwd); + unsigned struct_group_sz = sizeof(struct group); + unsigned siginfo_t_sz = sizeof(siginfo_t); + unsigned struct_sigaction_sz = sizeof(struct sigaction); + unsigned struct_itimerval_sz = sizeof(struct itimerval); + unsigned pthread_t_sz = sizeof(pthread_t); + unsigned struct_sockaddr_sz = sizeof(struct sockaddr); + +#if !SANITIZER_ANDROID + unsigned ucontext_t_sz = sizeof(ucontext_t); +#endif // !SANITIZER_ANDROID + +#if SANITIZER_LINUX + unsigned struct_rlimit_sz = sizeof(struct rlimit); + unsigned struct_dirent_sz = sizeof(struct dirent); + unsigned struct_statfs_sz = sizeof(struct statfs); + unsigned struct_epoll_event_sz = sizeof(struct epoll_event); + unsigned struct_timespec_sz = sizeof(struct timespec); +#endif // SANITIZER_LINUX + +#if SANITIZER_LINUX && !SANITIZER_ANDROID + unsigned struct_dirent64_sz = sizeof(struct dirent64); + unsigned struct_rlimit64_sz = sizeof(struct rlimit64); + unsigned struct_statfs64_sz = sizeof(struct statfs64); +#endif // SANITIZER_LINUX && !SANITIZER_ANDROID + + uptr sig_ign = (uptr)SIG_IGN; + uptr sig_dfl = (uptr)SIG_DFL; + + void* __sanitizer_get_msghdr_iov_iov_base(void* msg, int idx) { + return ((struct msghdr *)msg)->msg_iov[idx].iov_base; + } + + uptr __sanitizer_get_msghdr_iov_iov_len(void* msg, int idx) { + return ((struct msghdr *)msg)->msg_iov[idx].iov_len; + } + + uptr __sanitizer_get_msghdr_iovlen(void* msg) { + return ((struct msghdr *)msg)->msg_iovlen; + } + + uptr __sanitizer_get_socklen_t(void* socklen_ptr) { + return *(socklen_t*)socklen_ptr; + } + + uptr __sanitizer_get_sigaction_sa_sigaction(void *act) { + struct sigaction *a = (struct sigaction *)act; + // Check that sa_sigaction and sa_handler are the same. + CHECK((void *)&(a->sa_sigaction) == (void *)&(a->sa_handler)); + return (uptr) a->sa_sigaction; + } + void __sanitizer_set_sigaction_sa_sigaction(void *act, uptr cb) { + struct sigaction *a = (struct sigaction *)act; + a->sa_sigaction = (void (*)(int, siginfo_t *, void *))cb; + } + bool __sanitizer_get_sigaction_sa_siginfo(void *act) { + struct sigaction *a = (struct sigaction *)act; + return a->sa_flags & SA_SIGINFO; + } + + uptr __sanitizer_in_addr_sz(int af) { + if (af == AF_INET) + return sizeof(struct in_addr); + else if (af == AF_INET6) + return sizeof(struct in6_addr); + else + return 0; + } +} // namespace __sanitizer + +COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t)); +COMPILER_CHECK(sizeof(__sanitizer::struct_sigaction_max_sz) >= + sizeof(__sanitizer::struct_sigaction_sz)); +#if SANITIZER_LINUX +COMPILER_CHECK(offsetof(struct __sanitizer_dl_phdr_info, dlpi_addr) == + offsetof(struct dl_phdr_info, dlpi_addr)); +COMPILER_CHECK(offsetof(struct __sanitizer_dl_phdr_info, dlpi_name) == + offsetof(struct dl_phdr_info, dlpi_name)); +COMPILER_CHECK(offsetof(struct __sanitizer_dl_phdr_info, dlpi_phdr) == + offsetof(struct dl_phdr_info, dlpi_phdr)); +COMPILER_CHECK(offsetof(struct __sanitizer_dl_phdr_info, dlpi_phnum) == + offsetof(struct dl_phdr_info, dlpi_phnum)); +#endif + +COMPILER_CHECK(sizeof(struct __sanitizer_addrinfo) == sizeof(struct addrinfo)); +COMPILER_CHECK(offsetof(struct __sanitizer_addrinfo, ai_addr) == + offsetof(struct addrinfo, ai_addr)); +COMPILER_CHECK(offsetof(struct __sanitizer_addrinfo, ai_canonname) == + offsetof(struct addrinfo, ai_canonname)); +COMPILER_CHECK(offsetof(struct __sanitizer_addrinfo, ai_next) == + offsetof(struct addrinfo, ai_next)); + +COMPILER_CHECK(sizeof(struct __sanitizer_hostent) == sizeof(struct hostent)); +COMPILER_CHECK(offsetof(struct __sanitizer_hostent, h_name) == + offsetof(struct hostent, h_name)); +COMPILER_CHECK(offsetof(struct __sanitizer_hostent, h_aliases) == + offsetof(struct hostent, h_aliases)); +COMPILER_CHECK(offsetof(struct __sanitizer_hostent, h_addr_list) == + offsetof(struct hostent, h_addr_list)); + +#endif // SANITIZER_LINUX || SANITIZER_MAC diff --git a/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/lib/sanitizer_common/sanitizer_platform_limits_posix.h new file mode 100644 index 000000000000..37581953db24 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_platform_limits_posix.h @@ -0,0 +1,115 @@ +//===-- sanitizer_platform_limits_posix.h ---------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of Sanitizer common code. +// +// Sizes and layouts of platform-specific POSIX data structures. +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H +#define SANITIZER_PLATFORM_LIMITS_POSIX_H + +#include "sanitizer_platform.h" + +namespace __sanitizer { + extern unsigned struct_utsname_sz; + extern unsigned struct_stat_sz; + extern unsigned struct_stat64_sz; + extern unsigned struct_rusage_sz; + extern unsigned struct_tm_sz; + extern unsigned struct_passwd_sz; + extern unsigned struct_group_sz; + extern unsigned struct_sigaction_sz; + extern unsigned siginfo_t_sz; + extern unsigned struct_itimerval_sz; + extern unsigned pthread_t_sz; + extern unsigned struct_sockaddr_sz; + +#if !SANITIZER_ANDROID + extern unsigned ucontext_t_sz; +#endif // !SANITIZER_ANDROID + +#if SANITIZER_LINUX + extern unsigned struct_rlimit_sz; + extern unsigned struct_dirent_sz; + extern unsigned struct_statfs_sz; + extern unsigned struct_epoll_event_sz; + extern unsigned struct_timespec_sz; +#endif // SANITIZER_LINUX + +#if SANITIZER_LINUX && !SANITIZER_ANDROID + extern unsigned struct_dirent64_sz; + extern unsigned struct_rlimit64_sz; + extern unsigned struct_statfs64_sz; +#endif // SANITIZER_LINUX && !SANITIZER_ANDROID + + void* __sanitizer_get_msghdr_iov_iov_base(void* msg, int idx); + uptr __sanitizer_get_msghdr_iov_iov_len(void* msg, int idx); + uptr __sanitizer_get_msghdr_iovlen(void* msg); + uptr __sanitizer_get_socklen_t(void* socklen_ptr); + + // This thing depends on the platform. We are only interested in the upper + // limit. Verified with a compiler assert in .cc. + const int pthread_attr_t_max_sz = 128; + union __sanitizer_pthread_attr_t { + char size[pthread_attr_t_max_sz]; // NOLINT + void *align; + }; + + uptr __sanitizer_get_sigaction_sa_sigaction(void *act); + void __sanitizer_set_sigaction_sa_sigaction(void *act, uptr cb); + bool __sanitizer_get_sigaction_sa_siginfo(void *act); + + const unsigned struct_sigaction_max_sz = 256; + union __sanitizer_sigaction { + char size[struct_sigaction_max_sz]; // NOLINT + }; + + extern uptr sig_ign; + extern uptr sig_dfl; + + uptr __sanitizer_in_addr_sz(int af); + +#if SANITIZER_LINUX + struct __sanitizer_dl_phdr_info { + uptr dlpi_addr; + const char *dlpi_name; + const void *dlpi_phdr; + short dlpi_phnum; + }; +#endif + + struct __sanitizer_addrinfo { + int ai_flags; + int ai_family; + int ai_socktype; + int ai_protocol; +#if SANITIZER_ANDROID || SANITIZER_MAC + unsigned ai_addrlen; + char *ai_canonname; + void *ai_addr; +#else // LINUX + uptr ai_addrlen; + void *ai_addr; + char *ai_canonname; +#endif + struct __sanitizer_addrinfo *ai_next; + }; + + struct __sanitizer_hostent { + char *h_name; + char **h_aliases; + int h_addrtype; + int h_length; + char **h_addr_list; + }; + +} // namespace __sanitizer + +#endif diff --git a/lib/sanitizer_common/sanitizer_posix.cc b/lib/sanitizer_common/sanitizer_posix.cc index 32657838600d..af25b245fdd3 100644 --- a/lib/sanitizer_common/sanitizer_posix.cc +++ b/lib/sanitizer_common/sanitizer_posix.cc @@ -11,123 +11,106 @@ // run-time libraries and implements POSIX-specific functions from // sanitizer_libc.h. //===----------------------------------------------------------------------===// -#if defined(__linux__) || defined(__APPLE__) + +#include "sanitizer_platform.h" +#if SANITIZER_LINUX || SANITIZER_MAC #include "sanitizer_common.h" #include "sanitizer_libc.h" #include "sanitizer_procmaps.h" +#include "sanitizer_stacktrace.h" -#include <errno.h> -#include <pthread.h> -#include <stdarg.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> #include <sys/mman.h> -#include <sys/resource.h> -#include <sys/time.h> -#include <sys/types.h> -#include <unistd.h> namespace __sanitizer { // ------------- sanitizer_common.h -uptr GetPageSize() { - return sysconf(_SC_PAGESIZE); -} - uptr GetMmapGranularity() { return GetPageSize(); } -int GetPid() { - return getpid(); -} - -uptr GetThreadSelf() { - return (uptr)pthread_self(); -} - void *MmapOrDie(uptr size, const char *mem_type) { size = RoundUpTo(size, GetPageSizeCached()); - void *res = internal_mmap(0, size, + uptr res = internal_mmap(0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); - if (res == (void*)-1) { + int reserrno; + if (internal_iserror(res, &reserrno)) { static int recursion_count; if (recursion_count) { // The Report() and CHECK calls below may call mmap recursively and fail. // If we went into recursion, just die. - RawWrite("AddressSanitizer is unable to mmap\n"); + RawWrite("ERROR: Failed to mmap\n"); Die(); } recursion_count++; - Report("ERROR: Failed to allocate 0x%zx (%zd) bytes of %s: %s\n", - size, size, mem_type, strerror(errno)); + Report("ERROR: %s failed to allocate 0x%zx (%zd) bytes of %s: %d\n", + SanitizerToolName, size, size, mem_type, reserrno); DumpProcessMap(); CHECK("unable to mmap" && 0); } - return res; + return (void *)res; } void UnmapOrDie(void *addr, uptr size) { if (!addr || !size) return; - int res = internal_munmap(addr, size); - if (res != 0) { - Report("ERROR: Failed to deallocate 0x%zx (%zd) bytes at address %p\n", - size, size, addr); + uptr res = internal_munmap(addr, size); + if (internal_iserror(res)) { + Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n", + SanitizerToolName, size, size, addr); CHECK("unable to unmap" && 0); } } void *MmapFixedNoReserve(uptr fixed_addr, uptr size) { uptr PageSize = GetPageSizeCached(); - void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)), + uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)), RoundUpTo(size, PageSize), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0); - if (p == (void*)-1) - Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n", - size, size, fixed_addr, errno); - return p; + int reserrno; + if (internal_iserror(p, &reserrno)) + Report("ERROR: " + "%s failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n", + SanitizerToolName, size, size, fixed_addr, reserrno); + return (void *)p; } void *MmapFixedOrDie(uptr fixed_addr, uptr size) { uptr PageSize = GetPageSizeCached(); - void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)), + uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)), RoundUpTo(size, PageSize), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0); - if (p == (void*)-1) { - Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n", - size, size, fixed_addr, errno); + int reserrno; + if (internal_iserror(p, &reserrno)) { + Report("ERROR:" + " %s failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n", + SanitizerToolName, size, size, fixed_addr, reserrno); CHECK("unable to mmap" && 0); } - return p; + return (void *)p; } void *Mprotect(uptr fixed_addr, uptr size) { - return internal_mmap((void*)fixed_addr, size, - PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, - -1, 0); -} - -void FlushUnneededShadowMemory(uptr addr, uptr size) { - madvise((void*)addr, size, MADV_DONTNEED); + return (void *)internal_mmap((void*)fixed_addr, size, + PROT_NONE, + MAP_PRIVATE | MAP_ANON | MAP_FIXED | + MAP_NORESERVE, -1, 0); } void *MapFileToMemory(const char *file_name, uptr *buff_size) { - fd_t fd = internal_open(file_name, false); - CHECK_NE(fd, kInvalidFd); + uptr openrv = OpenFile(file_name, false); + CHECK(!internal_iserror(openrv)); + fd_t fd = openrv; uptr fsize = internal_filesize(fd); CHECK_NE(fsize, (uptr)-1); CHECK_GT(fsize, 0); *buff_size = RoundUpTo(fsize, GetPageSizeCached()); - void *map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0); - return (map == MAP_FAILED) ? 0 : map; + uptr map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0); + return internal_iserror(map) ? 0 : (void *)map; } @@ -143,10 +126,11 @@ static inline bool IntervalsAreSeparate(uptr start1, uptr end1, // several worker threads on Mac, which aren't expected to map big chunks of // memory). bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { - MemoryMappingLayout procmaps; + MemoryMappingLayout proc_maps(/*cache_enabled*/true); uptr start, end; - while (procmaps.Next(&start, &end, - /*offset*/0, /*filename*/0, /*filename_size*/0)) { + while (proc_maps.Next(&start, &end, + /*offset*/0, /*filename*/0, /*filename_size*/0, + /*protection*/0)) { if (!IntervalsAreSeparate(start, end, range_start, range_end)) return false; } @@ -154,13 +138,13 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { } void DumpProcessMap() { - MemoryMappingLayout proc_maps; + MemoryMappingLayout proc_maps(/*cache_enabled*/true); uptr start, end; const sptr kBufSize = 4095; char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__); Report("Process memory map follows:\n"); while (proc_maps.Next(&start, &end, /* file_offset */0, - filename, kBufSize)) { + filename, kBufSize, /* protection */0)) { Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename); } Report("End of process memory map.\n"); @@ -171,58 +155,6 @@ const char *GetPwd() { return GetEnv("PWD"); } -void DisableCoreDumper() { - struct rlimit nocore; - nocore.rlim_cur = 0; - nocore.rlim_max = 0; - setrlimit(RLIMIT_CORE, &nocore); -} - -bool StackSizeIsUnlimited() { - struct rlimit rlim; - CHECK_EQ(0, getrlimit(RLIMIT_STACK, &rlim)); - return (rlim.rlim_cur == (uptr)-1); -} - -void SetStackSizeLimitInBytes(uptr limit) { - struct rlimit rlim; - rlim.rlim_cur = limit; - rlim.rlim_max = limit; - if (setrlimit(RLIMIT_STACK, &rlim)) { - Report("setrlimit() failed %d\n", errno); - Die(); - } - CHECK(!StackSizeIsUnlimited()); -} - -void SleepForSeconds(int seconds) { - sleep(seconds); -} - -void SleepForMillis(int millis) { - usleep(millis * 1000); -} - -void Exit(int exitcode) { - _exit(exitcode); -} - -void Abort() { - abort(); -} - -int Atexit(void (*function)(void)) { -#ifndef SANITIZER_GO - return atexit(function); -#else - return 0; -#endif -} - -int internal_isatty(fd_t fd) { - return isatty(fd); -} - } // namespace __sanitizer -#endif // __linux__ || __APPLE_ +#endif // SANITIZER_LINUX || SANITIZER_MAC diff --git a/lib/sanitizer_common/sanitizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_posix_libcdep.cc new file mode 100644 index 000000000000..43da171ba271 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_posix_libcdep.cc @@ -0,0 +1,116 @@ +//===-- sanitizer_posix_libcdep.cc ----------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between AddressSanitizer and ThreadSanitizer +// run-time libraries and implements libc-dependent POSIX-specific functions +// from sanitizer_libc.h. +//===----------------------------------------------------------------------===// + +#include "sanitizer_platform.h" + +#if SANITIZER_LINUX || SANITIZER_MAC +#include "sanitizer_common.h" +#include "sanitizer_stacktrace.h" + +#include <errno.h> +#include <pthread.h> +#include <stdlib.h> +#include <sys/mman.h> +#include <sys/resource.h> +#include <sys/time.h> +#include <sys/types.h> +#include <unistd.h> + +namespace __sanitizer { + +u32 GetUid() { + return getuid(); +} + +uptr GetThreadSelf() { + return (uptr)pthread_self(); +} + +void FlushUnneededShadowMemory(uptr addr, uptr size) { + madvise((void*)addr, size, MADV_DONTNEED); +} + +void DisableCoreDumper() { + struct rlimit nocore; + nocore.rlim_cur = 0; + nocore.rlim_max = 0; + setrlimit(RLIMIT_CORE, &nocore); +} + +bool StackSizeIsUnlimited() { + struct rlimit rlim; + CHECK_EQ(0, getrlimit(RLIMIT_STACK, &rlim)); + return (rlim.rlim_cur == (uptr)-1); +} + +void SetStackSizeLimitInBytes(uptr limit) { + struct rlimit rlim; + rlim.rlim_cur = limit; + rlim.rlim_max = limit; + if (setrlimit(RLIMIT_STACK, &rlim)) { + Report("ERROR: %s setrlimit() failed %d\n", SanitizerToolName, errno); + Die(); + } + CHECK(!StackSizeIsUnlimited()); +} + +void SleepForSeconds(int seconds) { + sleep(seconds); +} + +void SleepForMillis(int millis) { + usleep(millis * 1000); +} + +void Abort() { + abort(); +} + +int Atexit(void (*function)(void)) { +#ifndef SANITIZER_GO + return atexit(function); +#else + return 0; +#endif +} + +int internal_isatty(fd_t fd) { + return isatty(fd); +} + +#ifndef SANITIZER_GO +void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, + uptr stack_top, uptr stack_bottom, bool fast) { +#if !SANITIZER_CAN_FAST_UNWIND + fast = false; +#endif +#if SANITIZER_MAC + // Always unwind fast on Mac. + (void)fast; +#else + if (!fast || (stack_top == stack_bottom)) + return stack->SlowUnwindStack(pc, max_s); +#endif // SANITIZER_MAC + stack->size = 0; + stack->trace[0] = pc; + if (max_s > 1) { + stack->max_size = max_s; + stack->FastUnwindStack(pc, bp, stack_top, stack_bottom); + } +} +#endif // SANITIZER_GO + +} // namespace __sanitizer + +#endif diff --git a/lib/sanitizer_common/sanitizer_printf.cc b/lib/sanitizer_common/sanitizer_printf.cc index 2393e8f2b87b..5935d7f17a5e 100644 --- a/lib/sanitizer_common/sanitizer_printf.cc +++ b/lib/sanitizer_common/sanitizer_printf.cc @@ -21,8 +21,14 @@ #include <stdio.h> #include <stdarg.h> +#if SANITIZER_WINDOWS +# define va_copy(dst, src) ((dst) = (src)) +#endif + namespace __sanitizer { +StaticSpinMutex CommonSanitizerReportMutex; + static int AppendChar(char **buff, const char *buff_end, char c) { if (*buff < buff_end) { **buff = c; @@ -173,17 +179,86 @@ void SetPrintfAndReportCallback(void (*callback)(const char *)) { PrintfAndReportCallback = callback; } -void Printf(const char *format, ...) { +#if SANITIZER_SUPPORTS_WEAK_HOOKS +// Can be overriden in frontend. +SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE +void OnPrint(const char *str); +#endif + +static void CallPrintfAndReportCallback(const char *str) { +#if SANITIZER_SUPPORTS_WEAK_HOOKS + if (&OnPrint != NULL) + OnPrint(str); +#endif + if (PrintfAndReportCallback) + PrintfAndReportCallback(str); +} + +static void SharedPrintfCode(bool append_pid, const char *format, + va_list args) { + va_list args2; + va_copy(args2, args); const int kLen = 16 * 1024; - InternalScopedBuffer<char> buffer(kLen); + // |local_buffer| is small enough not to overflow the stack and/or violate + // the stack limit enforced by TSan (-Wframe-larger-than=512). On the other + // hand, the bigger the buffer is, the more the chance the error report will + // fit into it. + char local_buffer[400]; + int needed_length; + char *buffer = local_buffer; + int buffer_size = ARRAY_SIZE(local_buffer); + // First try to print a message using a local buffer, and then fall back to + // mmaped buffer. + for (int use_mmap = 0; use_mmap < 2; use_mmap++) { + if (use_mmap) { + va_end(args); + va_copy(args, args2); + buffer = (char*)MmapOrDie(kLen, "Report"); + buffer_size = kLen; + } + needed_length = 0; + if (append_pid) { + int pid = internal_getpid(); + needed_length += internal_snprintf(buffer, buffer_size, "==%d==", pid); + if (needed_length >= buffer_size) { + // The pid doesn't fit into the current buffer. + if (!use_mmap) + continue; + RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n"); + } + } + needed_length += VSNPrintf(buffer + needed_length, + buffer_size - needed_length, format, args); + if (needed_length >= buffer_size) { + // The message doesn't fit into the current buffer. + if (!use_mmap) + continue; + RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n"); + } + // If the message fit into the buffer, print it and exit. + break; + } + RawWrite(buffer); + CallPrintfAndReportCallback(buffer); + // If we had mapped any memory, clean up. + if (buffer != local_buffer) + UnmapOrDie((void *)buffer, buffer_size); + va_end(args2); +} + +void Printf(const char *format, ...) { va_list args; va_start(args, format); - int needed_length = VSNPrintf(buffer.data(), kLen, format, args); + SharedPrintfCode(false, format, args); + va_end(args); +} + +// Like Printf, but prints the current PID before the output string. +void Report(const char *format, ...) { + va_list args; + va_start(args, format); + SharedPrintfCode(true, format, args); va_end(args); - RAW_CHECK_MSG(needed_length < kLen, "Buffer in Printf is too short!\n"); - RawWrite(buffer.data()); - if (PrintfAndReportCallback) - PrintfAndReportCallback(buffer.data()); } // Writes at most "length" symbols to "buffer" (including trailing '\0'). @@ -198,22 +273,4 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) { return needed_length; } -// Like Printf, but prints the current PID before the output string. -void Report(const char *format, ...) { - const int kLen = 16 * 1024; - InternalScopedBuffer<char> buffer(kLen); - int needed_length = internal_snprintf(buffer.data(), - kLen, "==%d== ", GetPid()); - RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n"); - va_list args; - va_start(args, format); - needed_length += VSNPrintf(buffer.data() + needed_length, - kLen - needed_length, format, args); - va_end(args); - RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n"); - RawWrite(buffer.data()); - if (PrintfAndReportCallback) - PrintfAndReportCallback(buffer.data()); -} - } // namespace __sanitizer diff --git a/lib/sanitizer_common/sanitizer_procmaps.h b/lib/sanitizer_common/sanitizer_procmaps.h index 1b8ea7aff165..b96f09ec4561 100644 --- a/lib/sanitizer_common/sanitizer_procmaps.h +++ b/lib/sanitizer_common/sanitizer_procmaps.h @@ -19,51 +19,63 @@ namespace __sanitizer { -#ifdef _WIN32 +#if SANITIZER_WINDOWS class MemoryMappingLayout { public: - MemoryMappingLayout() {} + explicit MemoryMappingLayout(bool cache_enabled) { + (void)cache_enabled; + } bool GetObjectNameAndOffset(uptr addr, uptr *offset, - char filename[], uptr filename_size) { + char filename[], uptr filename_size, + uptr *protection) { UNIMPLEMENTED(); } }; #else // _WIN32 -#if defined(__linux__) +#if SANITIZER_LINUX struct ProcSelfMapsBuff { char *data; uptr mmaped_size; uptr len; }; -#endif // defined(__linux__) +#endif // SANITIZER_LINUX class MemoryMappingLayout { public: - MemoryMappingLayout(); + explicit MemoryMappingLayout(bool cache_enabled); bool Next(uptr *start, uptr *end, uptr *offset, - char filename[], uptr filename_size); + char filename[], uptr filename_size, uptr *protection); void Reset(); // Gets the object file name and the offset in that object for a given // address 'addr'. Returns true on success. bool GetObjectNameAndOffset(uptr addr, uptr *offset, - char filename[], uptr filename_size); + char filename[], uptr filename_size, + uptr *protection); // In some cases, e.g. when running under a sandbox on Linux, ASan is unable // to obtain the memory mappings. It should fall back to pre-cached data // instead of aborting. static void CacheMemoryMappings(); ~MemoryMappingLayout(); + // Memory protection masks. + static const uptr kProtectionRead = 1; + static const uptr kProtectionWrite = 2; + static const uptr kProtectionExecute = 4; + static const uptr kProtectionShared = 8; + private: void LoadFromCache(); // Default implementation of GetObjectNameAndOffset. // Quite slow, because it iterates through the whole process map for each // lookup. bool IterateForObjectNameAndOffset(uptr addr, uptr *offset, - char filename[], uptr filename_size) { + char filename[], uptr filename_size, + uptr *protection) { Reset(); uptr start, end, file_offset; - for (int i = 0; Next(&start, &end, &file_offset, filename, filename_size); + for (int i = 0; Next(&start, &end, &file_offset, filename, filename_size, + protection); i++) { if (addr >= start && addr < end) { // Don't subtract 'start' for the first entry: @@ -86,17 +98,18 @@ class MemoryMappingLayout { return false; } -# if defined __linux__ +# if SANITIZER_LINUX ProcSelfMapsBuff proc_self_maps_; char *current_; // Static mappings cache. static ProcSelfMapsBuff cached_proc_self_maps_; static StaticSpinMutex cache_lock_; // protects cached_proc_self_maps_. -# elif defined __APPLE__ +# elif SANITIZER_MAC template<u32 kLCSegment, typename SegmentCommand> bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset, - char filename[], uptr filename_size); + char filename[], uptr filename_size, + uptr *protection); int current_image_; u32 current_magic_; u32 current_filetype_; diff --git a/lib/sanitizer_common/sanitizer_quarantine.h b/lib/sanitizer_common/sanitizer_quarantine.h index ec90d2d6871b..599d13645dd7 100644 --- a/lib/sanitizer_common/sanitizer_quarantine.h +++ b/lib/sanitizer_common/sanitizer_quarantine.h @@ -159,7 +159,7 @@ class QuarantineCache { atomic_store(&size_, Size() + add, memory_order_relaxed); } - QuarantineBatch *NOINLINE AllocBatch(Callback cb) { + NOINLINE QuarantineBatch* AllocBatch(Callback cb) { QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b)); b->count = 0; b->size = 0; @@ -167,6 +167,6 @@ class QuarantineCache { return b; } }; -} +} // namespace __sanitizer #endif // #ifndef SANITIZER_QUARANTINE_H diff --git a/lib/sanitizer_common/sanitizer_report_decorator.h b/lib/sanitizer_common/sanitizer_report_decorator.h index 50a3ee572fdb..49334d5e0c71 100644 --- a/lib/sanitizer_common/sanitizer_report_decorator.h +++ b/lib/sanitizer_common/sanitizer_report_decorator.h @@ -14,24 +14,26 @@ // //===----------------------------------------------------------------------===// -#ifndef SANITIZER_ALLOCATOR_H -#define SANITIZER_ALLOCATOR_H +#ifndef SANITIZER_REPORT_DECORATOR_H +#define SANITIZER_REPORT_DECORATOR_H namespace __sanitizer { class AnsiColorDecorator { public: explicit AnsiColorDecorator(bool use_ansi_colors) : ansi_(use_ansi_colors) { } - const char *Black() { return ansi_ ? "\033[1m\033[30m" : ""; } - const char *Red() { return ansi_ ? "\033[1m\033[31m" : ""; } - const char *Green() { return ansi_ ? "\033[1m\033[32m" : ""; } - const char *Yellow() { return ansi_ ? "\033[1m\033[33m" : ""; } - const char *Blue() { return ansi_ ? "\033[1m\033[34m" : ""; } - const char *Magenta() { return ansi_ ? "\033[1m\033[35m" : ""; } - const char *Cyan() { return ansi_ ? "\033[1m\033[36m" : ""; } - const char *White() { return ansi_ ? "\033[1m\033[37m" : ""; } - const char *Default() { return ansi_ ? "\033[1m\033[0m" : ""; } + const char *Bold() const { return ansi_ ? "\033[1m" : ""; } + const char *Black() const { return ansi_ ? "\033[1m\033[30m" : ""; } + const char *Red() const { return ansi_ ? "\033[1m\033[31m" : ""; } + const char *Green() const { return ansi_ ? "\033[1m\033[32m" : ""; } + const char *Yellow() const { return ansi_ ? "\033[1m\033[33m" : ""; } + const char *Blue() const { return ansi_ ? "\033[1m\033[34m" : ""; } + const char *Magenta() const { return ansi_ ? "\033[1m\033[35m" : ""; } + const char *Cyan() const { return ansi_ ? "\033[1m\033[36m" : ""; } + const char *White() const { return ansi_ ? "\033[1m\033[37m" : ""; } + const char *Default() const { return ansi_ ? "\033[1m\033[0m" : ""; } private: bool ansi_; }; } // namespace __sanitizer -#endif // SANITIZER_ALLOCATOR_H + +#endif // SANITIZER_REPORT_DECORATOR_H diff --git a/lib/sanitizer_common/sanitizer_stackdepot.h b/lib/sanitizer_common/sanitizer_stackdepot.h index 49e6669dd203..5915fdbb4310 100644 --- a/lib/sanitizer_common/sanitizer_stackdepot.h +++ b/lib/sanitizer_common/sanitizer_stackdepot.h @@ -13,7 +13,7 @@ #ifndef SANITIZER_STACKDEPOT_H #define SANITIZER_STACKDEPOT_H -#include "sanitizer/common_interface_defs.h" +#include "sanitizer_internal_defs.h" namespace __sanitizer { diff --git a/lib/sanitizer_common/sanitizer_stacktrace.cc b/lib/sanitizer_common/sanitizer_stacktrace.cc index 109a674e45b3..724c29c86b66 100644 --- a/lib/sanitizer_common/sanitizer_stacktrace.cc +++ b/lib/sanitizer_common/sanitizer_stacktrace.cc @@ -17,8 +17,9 @@ #include "sanitizer_symbolizer.h" namespace __sanitizer { -static const char *StripPathPrefix(const char *filepath, - const char *strip_file_prefix) { +const char *StripPathPrefix(const char *filepath, + const char *strip_file_prefix) { + if (filepath == 0) return 0; if (filepath == internal_strstr(filepath, strip_file_prefix)) return filepath + internal_strlen(strip_file_prefix); return filepath; @@ -63,7 +64,7 @@ static void PrintModuleAndOffset(const char *module, uptr offset, void StackTrace::PrintStack(const uptr *addr, uptr size, bool symbolize, const char *strip_file_prefix, SymbolizeCallback symbolize_callback ) { - MemoryMappingLayout proc_maps; + MemoryMappingLayout proc_maps(/*cache_enabled*/true); InternalScopedBuffer<char> buff(GetPageSizeCached() * 2); InternalScopedBuffer<AddressInfo> addr_frames(64); uptr frame_num = 0; @@ -84,7 +85,7 @@ void StackTrace::PrintStack(const uptr *addr, uptr size, frame_num++; } } - if (symbolize && addr_frames_num == 0) { + if (symbolize && addr_frames_num == 0 && &SymbolizeCode) { // Use our own (online) symbolizer, if necessary. addr_frames_num = SymbolizeCode(pc, addr_frames.data(), addr_frames.size()); @@ -112,7 +113,8 @@ void StackTrace::PrintStack(const uptr *addr, uptr size, PrintStackFramePrefix(frame_num, pc); uptr offset; if (proc_maps.GetObjectNameAndOffset(pc, &offset, - buff.data(), buff.size())) { + buff.data(), buff.size(), + /* protection */0)) { PrintModuleAndOffset(buff.data(), offset, strip_file_prefix); } Printf("\n"); @@ -130,10 +132,12 @@ void StackTrace::FastUnwindStack(uptr pc, uptr bp, CHECK(size == 0 && trace[0] == pc); size = 1; uhwptr *frame = (uhwptr *)bp; - uhwptr *prev_frame = frame; - while (frame >= prev_frame && + uhwptr *prev_frame = frame - 1; + // Avoid infinite loop when frame == frame[0] by using frame > prev_frame. + while (frame > prev_frame && frame < (uhwptr *)stack_top - 2 && frame > (uhwptr *)stack_bottom && + IsAligned((uptr)frame, sizeof(*frame)) && size < max_size) { uhwptr pc1 = frame[1]; if (pc1 != pc) { diff --git a/lib/sanitizer_common/sanitizer_stacktrace.h b/lib/sanitizer_common/sanitizer_stacktrace.h index 597d24fd067f..fcfdd7e0b59b 100644 --- a/lib/sanitizer_common/sanitizer_stacktrace.h +++ b/lib/sanitizer_common/sanitizer_stacktrace.h @@ -19,6 +19,14 @@ namespace __sanitizer { static const uptr kStackTraceMax = 256; +#if SANITIZER_LINUX && (defined(__arm__) || \ + defined(__powerpc__) || defined(__powerpc64__) || \ + defined(__sparc__)) +#define SANITIZER_CAN_FAST_UNWIND 0 +#else +#define SANITIZER_CAN_FAST_UNWIND 1 +#endif + struct StackTrace { typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer, int out_size); @@ -57,6 +65,13 @@ struct StackTrace { u32 *compressed, uptr size); }; + +const char *StripPathPrefix(const char *filepath, + const char *strip_file_prefix); + +void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, + uptr stack_top, uptr stack_bottom, bool fast); + } // namespace __sanitizer // Use this macro if you want to print stack trace with the caller diff --git a/lib/sanitizer_common/sanitizer_stoptheworld.h b/lib/sanitizer_common/sanitizer_stoptheworld.h new file mode 100644 index 000000000000..cc9408bb845f --- /dev/null +++ b/lib/sanitizer_common/sanitizer_stoptheworld.h @@ -0,0 +1,68 @@ +//===-- sanitizer_stoptheworld.h --------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Defines the StopTheWorld function which suspends the execution of the current +// process and runs the user-supplied callback in the same address space. +// +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_STOPTHEWORLD_H +#define SANITIZER_STOPTHEWORLD_H + +#include "sanitizer_internal_defs.h" +#include "sanitizer_common.h" + +namespace __sanitizer { +typedef int SuspendedThreadID; + +// Holds the list of suspended threads and provides an interface to dump their +// register contexts. +class SuspendedThreadsList { + public: + SuspendedThreadsList() + : thread_ids_(1024) {} + SuspendedThreadID GetThreadID(uptr index) const { + CHECK_LT(index, thread_ids_.size()); + return thread_ids_[index]; + } + int GetRegistersAndSP(uptr index, uptr *buffer, uptr *sp) const; + // The buffer in GetRegistersAndSP should be at least this big. + static uptr RegisterCount(); + uptr thread_count() const { return thread_ids_.size(); } + bool Contains(SuspendedThreadID thread_id) const { + for (uptr i = 0; i < thread_ids_.size(); i++) { + if (thread_ids_[i] == thread_id) + return true; + } + return false; + } + void Append(SuspendedThreadID thread_id) { + thread_ids_.push_back(thread_id); + } + + private: + InternalVector<SuspendedThreadID> thread_ids_; + + // Prohibit copy and assign. + SuspendedThreadsList(const SuspendedThreadsList&); + void operator=(const SuspendedThreadsList&); +}; + +typedef void (*StopTheWorldCallback)( + const SuspendedThreadsList &suspended_threads_list, + void *argument); + +// Suspend all threads in the current process and run the callback on the list +// of suspended threads. This function will resume the threads before returning. +// The callback should not call any libc functions. +// This function should NOT be called from multiple threads simultaneously. +void StopTheWorld(StopTheWorldCallback callback, void *argument); + +} // namespace __sanitizer + +#endif // SANITIZER_STOPTHEWORLD_H diff --git a/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc new file mode 100644 index 000000000000..e5284ee2211a --- /dev/null +++ b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc @@ -0,0 +1,403 @@ +//===-- sanitizer_stoptheworld_linux_libcdep.cc ---------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// See sanitizer_stoptheworld.h for details. +// This implementation was inspired by Markus Gutschke's linuxthreads.cc. +// +//===----------------------------------------------------------------------===// + + +#include "sanitizer_platform.h" +#if SANITIZER_LINUX + +#include "sanitizer_stoptheworld.h" + +#include <errno.h> +#include <sched.h> // for clone +#include <stddef.h> +#include <sys/prctl.h> // for PR_* definitions +#include <sys/ptrace.h> // for PTRACE_* definitions +#include <sys/types.h> // for pid_t +#if SANITIZER_ANDROID && defined(__arm__) +# include <linux/user.h> // for pt_regs +#else +# include <sys/user.h> // for user_regs_struct +#endif +#include <sys/wait.h> // for signal-related stuff + +#include "sanitizer_common.h" +#include "sanitizer_libc.h" +#include "sanitizer_linux.h" +#include "sanitizer_mutex.h" +#include "sanitizer_placement_new.h" + +// This module works by spawning a Linux task which then attaches to every +// thread in the caller process with ptrace. This suspends the threads, and +// PTRACE_GETREGS can then be used to obtain their register state. The callback +// supplied to StopTheWorld() is run in the tracer task while the threads are +// suspended. +// The tracer task must be placed in a different thread group for ptrace to +// work, so it cannot be spawned as a pthread. Instead, we use the low-level +// clone() interface (we want to share the address space with the caller +// process, so we prefer clone() over fork()). +// +// We avoid the use of libc for two reasons: +// 1. calling a library function while threads are suspended could cause a +// deadlock, if one of the treads happens to be holding a libc lock; +// 2. it's generally not safe to call libc functions from the tracer task, +// because clone() does not set up a thread-local storage for it. Any +// thread-local variables used by libc will be shared between the tracer task +// and the thread which spawned it. +// +// We deal with this by replacing libc calls with calls to our own +// implementations defined in sanitizer_libc.h and sanitizer_linux.h. However, +// there are still some libc functions which are used here: +// +// * All of the system calls ultimately go through the libc syscall() function. +// We're operating under the assumption that syscall()'s implementation does +// not acquire any locks or use any thread-local data (except for the errno +// variable, which we handle separately). +// +// * We lack custom implementations of sigfillset() and sigaction(), so we use +// the libc versions instead. The same assumptions as above apply. +// +// * It is safe to call libc functions before the cloned thread is spawned or +// after it has exited. The following functions are used in this manner: +// sigdelset() +// sigprocmask() +// clone() + +COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t)); + +namespace __sanitizer { +// This class handles thread suspending/unsuspending in the tracer thread. +class ThreadSuspender { + public: + explicit ThreadSuspender(pid_t pid) + : pid_(pid) { + CHECK_GE(pid, 0); + } + bool SuspendAllThreads(); + void ResumeAllThreads(); + void KillAllThreads(); + SuspendedThreadsList &suspended_threads_list() { + return suspended_threads_list_; + } + private: + SuspendedThreadsList suspended_threads_list_; + pid_t pid_; + bool SuspendThread(SuspendedThreadID thread_id); +}; + +bool ThreadSuspender::SuspendThread(SuspendedThreadID thread_id) { + // Are we already attached to this thread? + // Currently this check takes linear time, however the number of threads is + // usually small. + if (suspended_threads_list_.Contains(thread_id)) + return false; + int pterrno; + if (internal_iserror(internal_ptrace(PTRACE_ATTACH, thread_id, NULL, NULL), + &pterrno)) { + // Either the thread is dead, or something prevented us from attaching. + // Log this event and move on. + Report("Could not attach to thread %d (errno %d).\n", thread_id, pterrno); + return false; + } else { + if (SanitizerVerbosity > 0) + Report("Attached to thread %d.\n", thread_id); + // The thread is not guaranteed to stop before ptrace returns, so we must + // wait on it. + uptr waitpid_status; + HANDLE_EINTR(waitpid_status, internal_waitpid(thread_id, NULL, __WALL)); + int wperrno; + if (internal_iserror(waitpid_status, &wperrno)) { + // Got a ECHILD error. I don't think this situation is possible, but it + // doesn't hurt to report it. + Report("Waiting on thread %d failed, detaching (errno %d).\n", thread_id, + wperrno); + internal_ptrace(PTRACE_DETACH, thread_id, NULL, NULL); + return false; + } + suspended_threads_list_.Append(thread_id); + return true; + } +} + +void ThreadSuspender::ResumeAllThreads() { + for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++) { + pid_t tid = suspended_threads_list_.GetThreadID(i); + int pterrno; + if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, NULL, NULL), + &pterrno)) { + if (SanitizerVerbosity > 0) + Report("Detached from thread %d.\n", tid); + } else { + // Either the thread is dead, or we are already detached. + // The latter case is possible, for instance, if this function was called + // from a signal handler. + Report("Could not detach from thread %d (errno %d).\n", tid, pterrno); + } + } +} + +void ThreadSuspender::KillAllThreads() { + for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++) + internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i), + NULL, NULL); +} + +bool ThreadSuspender::SuspendAllThreads() { + ThreadLister thread_lister(pid_); + bool added_threads; + do { + // Run through the directory entries once. + added_threads = false; + pid_t tid = thread_lister.GetNextTID(); + while (tid >= 0) { + if (SuspendThread(tid)) + added_threads = true; + tid = thread_lister.GetNextTID(); + } + if (thread_lister.error()) { + // Detach threads and fail. + ResumeAllThreads(); + return false; + } + thread_lister.Reset(); + } while (added_threads); + return true; +} + +// Pointer to the ThreadSuspender instance for use in signal handler. +static ThreadSuspender *thread_suspender_instance = NULL; + +// Signals that should not be blocked (this is used in the parent thread as well +// as the tracer thread). +static const int kUnblockedSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, + SIGBUS, SIGXCPU, SIGXFSZ }; + +// Structure for passing arguments into the tracer thread. +struct TracerThreadArgument { + StopTheWorldCallback callback; + void *callback_argument; + // The tracer thread waits on this mutex while the parent finished its + // preparations. + BlockingMutex mutex; +}; + +// Signal handler to wake up suspended threads when the tracer thread dies. +void TracerThreadSignalHandler(int signum, siginfo_t *siginfo, void *) { + if (thread_suspender_instance != NULL) { + if (signum == SIGABRT) + thread_suspender_instance->KillAllThreads(); + else + thread_suspender_instance->ResumeAllThreads(); + } + internal__exit((signum == SIGABRT) ? 1 : 2); +} + +// Size of alternative stack for signal handlers in the tracer thread. +static const int kHandlerStackSize = 4096; + +// This function will be run as a cloned task. +static int TracerThread(void* argument) { + TracerThreadArgument *tracer_thread_argument = + (TracerThreadArgument *)argument; + + // Wait for the parent thread to finish preparations. + tracer_thread_argument->mutex.Lock(); + tracer_thread_argument->mutex.Unlock(); + + ThreadSuspender thread_suspender(internal_getppid()); + // Global pointer for the signal handler. + thread_suspender_instance = &thread_suspender; + + // Alternate stack for signal handling. + InternalScopedBuffer<char> handler_stack_memory(kHandlerStackSize); + struct sigaltstack handler_stack; + internal_memset(&handler_stack, 0, sizeof(handler_stack)); + handler_stack.ss_sp = handler_stack_memory.data(); + handler_stack.ss_size = kHandlerStackSize; + internal_sigaltstack(&handler_stack, NULL); + + // Install our handler for fatal signals. Other signals should be blocked by + // the mask we inherited from the caller thread. + for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals); + signal_index++) { + struct sigaction new_sigaction; + internal_memset(&new_sigaction, 0, sizeof(new_sigaction)); + new_sigaction.sa_sigaction = TracerThreadSignalHandler; + new_sigaction.sa_flags = SA_ONSTACK | SA_SIGINFO; + sigfillset(&new_sigaction.sa_mask); + sigaction(kUnblockedSignals[signal_index], &new_sigaction, NULL); + } + + int exit_code = 0; + if (!thread_suspender.SuspendAllThreads()) { + Report("Failed suspending threads.\n"); + exit_code = 3; + } else { + tracer_thread_argument->callback(thread_suspender.suspended_threads_list(), + tracer_thread_argument->callback_argument); + thread_suspender.ResumeAllThreads(); + exit_code = 0; + } + thread_suspender_instance = NULL; + handler_stack.ss_flags = SS_DISABLE; + internal_sigaltstack(&handler_stack, NULL); + return exit_code; +} + +class ScopedStackSpaceWithGuard { + public: + explicit ScopedStackSpaceWithGuard(uptr stack_size) { + stack_size_ = stack_size; + guard_size_ = GetPageSizeCached(); + // FIXME: Omitting MAP_STACK here works in current kernels but might break + // in the future. + guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_, + "ScopedStackWithGuard"); + CHECK_EQ(guard_start_, (uptr)Mprotect((uptr)guard_start_, guard_size_)); + } + ~ScopedStackSpaceWithGuard() { + UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_); + } + void *Bottom() const { + return (void *)(guard_start_ + stack_size_ + guard_size_); + } + + private: + uptr stack_size_; + uptr guard_size_; + uptr guard_start_; +}; + +static sigset_t blocked_sigset; +static sigset_t old_sigset; +static struct sigaction old_sigactions[ARRAY_SIZE(kUnblockedSignals)]; + +void StopTheWorld(StopTheWorldCallback callback, void *argument) { + // Block all signals that can be blocked safely, and install default handlers + // for the remaining signals. + // We cannot allow user-defined handlers to run while the ThreadSuspender + // thread is active, because they could conceivably call some libc functions + // which modify errno (which is shared between the two threads). + sigfillset(&blocked_sigset); + for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals); + signal_index++) { + // Remove the signal from the set of blocked signals. + sigdelset(&blocked_sigset, kUnblockedSignals[signal_index]); + // Install the default handler. + struct sigaction new_sigaction; + internal_memset(&new_sigaction, 0, sizeof(new_sigaction)); + new_sigaction.sa_handler = SIG_DFL; + sigfillset(&new_sigaction.sa_mask); + sigaction(kUnblockedSignals[signal_index], &new_sigaction, + &old_sigactions[signal_index]); + } + int sigprocmask_status = sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset); + CHECK_EQ(sigprocmask_status, 0); // sigprocmask should never fail + // Make this process dumpable. Processes that are not dumpable cannot be + // attached to. + int process_was_dumpable = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0); + if (!process_was_dumpable) + internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); + // Prepare the arguments for TracerThread. + struct TracerThreadArgument tracer_thread_argument; + tracer_thread_argument.callback = callback; + tracer_thread_argument.callback_argument = argument; + const uptr kTracerStackSize = 2 * 1024 * 1024; + ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize); + // Block the execution of TracerThread until after we have set ptrace + // permissions. + tracer_thread_argument.mutex.Lock(); + pid_t tracer_pid = clone(TracerThread, tracer_stack.Bottom(), + CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED, + &tracer_thread_argument, 0, 0, 0); + if (tracer_pid < 0) { + Report("Failed spawning a tracer thread (errno %d).\n", errno); + tracer_thread_argument.mutex.Unlock(); + } else { + // On some systems we have to explicitly declare that we want to be traced + // by the tracer thread. +#ifdef PR_SET_PTRACER + internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0); +#endif + // Allow the tracer thread to start. + tracer_thread_argument.mutex.Unlock(); + // Since errno is shared between this thread and the tracer thread, we + // must avoid using errno while the tracer thread is running. + // At this point, any signal will either be blocked or kill us, so waitpid + // should never return (and set errno) while the tracer thread is alive. + uptr waitpid_status = internal_waitpid(tracer_pid, NULL, __WALL); + int wperrno; + if (internal_iserror(waitpid_status, &wperrno)) + Report("Waiting on the tracer thread failed (errno %d).\n", wperrno); + } + // Restore the dumpable flag. + if (!process_was_dumpable) + internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0); + // Restore the signal handlers. + for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals); + signal_index++) { + sigaction(kUnblockedSignals[signal_index], + &old_sigactions[signal_index], NULL); + } + sigprocmask(SIG_SETMASK, &old_sigset, &old_sigset); +} + +// Platform-specific methods from SuspendedThreadsList. +#if SANITIZER_ANDROID && defined(__arm__) +typedef pt_regs regs_struct; +#define REG_SP ARM_sp + +#elif SANITIZER_LINUX && defined(__arm__) +typedef user_regs regs_struct; +#define REG_SP uregs[13] + +#elif defined(__i386__) || defined(__x86_64__) +typedef user_regs_struct regs_struct; +#if defined(__i386__) +#define REG_SP esp +#else +#define REG_SP rsp +#endif + +#elif defined(__powerpc__) || defined(__powerpc64__) +typedef pt_regs regs_struct; +#define REG_SP gpr[PT_R1] + +#else +#error "Unsupported architecture" +#endif // SANITIZER_ANDROID && defined(__arm__) + +int SuspendedThreadsList::GetRegistersAndSP(uptr index, + uptr *buffer, + uptr *sp) const { + pid_t tid = GetThreadID(index); + regs_struct regs; + int pterrno; + if (internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, NULL, ®s), + &pterrno)) { + Report("Could not get registers from thread %d (errno %d).\n", + tid, pterrno); + return -1; + } + + *sp = regs.REG_SP; + internal_memcpy(buffer, ®s, sizeof(regs)); + return 0; +} + +uptr SuspendedThreadsList::RegisterCount() { + return sizeof(regs_struct) / sizeof(uptr); +} +} // namespace __sanitizer + +#endif // SANITIZER_LINUX diff --git a/lib/sanitizer_common/sanitizer_symbolizer.h b/lib/sanitizer_common/sanitizer_symbolizer.h index c26d621ea065..ef37fd387f98 100644 --- a/lib/sanitizer_common/sanitizer_symbolizer.h +++ b/lib/sanitizer_common/sanitizer_symbolizer.h @@ -66,9 +66,13 @@ struct DataInfo { // for a given address (in all inlined functions). Returns the number // of descriptions actually filled. // This function should NOT be called from two threads simultaneously. -uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames); +uptr SymbolizeCode(uptr address, AddressInfo *frames, uptr max_frames) + SANITIZER_WEAK_ATTRIBUTE; bool SymbolizeData(uptr address, DataInfo *info); +bool IsSymbolizerAvailable(); +void FlushSymbolizer(); // releases internal caches (if any) + // Attempts to demangle the provided C++ mangled name. const char *Demangle(const char *Name); @@ -104,8 +108,13 @@ bool StartSymbolizerSubprocess(const char *path_to_symbolizer, // OS-dependent function that fills array with descriptions of at most // "max_modules" currently loaded modules. Returns the number of -// initialized modules. -uptr GetListOfModules(LoadedModule *modules, uptr max_modules); +// initialized modules. If filter is nonzero, ignores modules for which +// filter(full_name) is false. +typedef bool (*string_predicate_t)(const char *); +uptr GetListOfModules(LoadedModule *modules, uptr max_modules, + string_predicate_t filter); + +void SymbolizerPrepareForSandboxing(); } // namespace __sanitizer diff --git a/lib/sanitizer_common/sanitizer_symbolizer_itanium.cc b/lib/sanitizer_common/sanitizer_symbolizer_itanium.cc index 438629492923..e20fb91f0eb5 100644 --- a/lib/sanitizer_common/sanitizer_symbolizer_itanium.cc +++ b/lib/sanitizer_common/sanitizer_symbolizer_itanium.cc @@ -10,7 +10,9 @@ // This file is shared between the sanitizer run-time libraries. // Itanium C++ ABI-specific implementation of symbolizer parts. //===----------------------------------------------------------------------===// -#if defined(__APPLE__) || defined(__linux__) + +#include "sanitizer_platform.h" +#if SANITIZER_MAC || SANITIZER_LINUX #include "sanitizer_symbolizer.h" @@ -39,4 +41,4 @@ const char *__sanitizer::Demangle(const char *MangledName) { return MangledName; } -#endif // __APPLE__ || __linux__ +#endif // SANITIZER_MAC || SANITIZER_LINUX diff --git a/lib/sanitizer_common/sanitizer_symbolizer.cc b/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc index a1d95ae0e0b2..ad339e21a927 100644 --- a/lib/sanitizer_common/sanitizer_symbolizer.cc +++ b/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc @@ -1,4 +1,4 @@ -//===-- sanitizer_symbolizer.cc -------------------------------------------===// +//===-- sanitizer_symbolizer_libcdep.cc -----------------------------------===// // // The LLVM Compiler Infrastructure // @@ -111,7 +111,7 @@ class ExternalSymbolizer { char *SendCommand(bool is_data, const char *module_name, uptr module_offset) { CHECK(module_name); - internal_snprintf(buffer_, kBufferSize, "%s%s 0x%zx\n", + internal_snprintf(buffer_, kBufferSize, "%s\"%s\" 0x%zx\n", is_data ? "DATA " : "", module_name, module_offset); if (!writeToSymbolizer(buffer_, internal_strlen(buffer_))) return 0; @@ -128,6 +128,9 @@ class ExternalSymbolizer { return StartSymbolizerSubprocess(path_, &input_fd_, &output_fd_); } + void Flush() { + } + private: bool readFromSymbolizer(char *buffer, uptr max_length) { if (max_length == 0) @@ -176,7 +179,67 @@ class ExternalSymbolizer { static LowLevelAllocator symbolizer_allocator; // Linker initialized. +#if SANITIZER_SUPPORTS_WEAK_HOOKS +extern "C" { +SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE +bool __sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset, + char *Buffer, int MaxLength); +SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE +bool __sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset, + char *Buffer, int MaxLength); +SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_symbolize_flush(); +} // extern "C" + +class InternalSymbolizer { + public: + typedef bool (*SanitizerSymbolizeFn)(const char*, u64, char*, int); + + static InternalSymbolizer *get() { + if (__sanitizer_symbolize_code != 0 && + __sanitizer_symbolize_data != 0) { + void *mem = symbolizer_allocator.Allocate(sizeof(InternalSymbolizer)); + return new(mem) InternalSymbolizer(); + } + return 0; + } + + char *SendCommand(bool is_data, const char *module_name, uptr module_offset) { + SanitizerSymbolizeFn symbolize_fn = is_data ? __sanitizer_symbolize_data + : __sanitizer_symbolize_code; + if (symbolize_fn(module_name, module_offset, buffer_, kBufferSize)) + return buffer_; + return 0; + } + + void Flush() { + if (__sanitizer_symbolize_flush) + __sanitizer_symbolize_flush(); + } + + private: + InternalSymbolizer() { } + + static const int kBufferSize = 16 * 1024; + char buffer_[kBufferSize]; +}; +#else // SANITIZER_SUPPORTS_WEAK_HOOKS + +class InternalSymbolizer { + public: + static InternalSymbolizer *get() { return 0; } + char *SendCommand(bool is_data, const char *module_name, uptr module_offset) { + return 0; + } + void Flush() { + } +}; + +#endif // SANITIZER_SUPPORTS_WEAK_HOOKS + class Symbolizer { + // This class has no constructor, as global constructors are forbidden in + // sanitizer_common. It should be linker initialized instead. public: uptr SymbolizeCode(uptr addr, AddressInfo *frames, uptr max_frames) { if (max_frames == 0) @@ -268,8 +331,30 @@ class Symbolizer { return true; } + bool IsSymbolizerAvailable() { + if (internal_symbolizer_ == 0) + internal_symbolizer_ = InternalSymbolizer::get(); + return internal_symbolizer_ || external_symbolizer_; + } + + void Flush() { + if (internal_symbolizer_) + internal_symbolizer_->Flush(); + if (external_symbolizer_) + external_symbolizer_->Flush(); + } + private: char *SendCommand(bool is_data, const char *module_name, uptr module_offset) { + // First, try to use internal symbolizer. + if (!IsSymbolizerAvailable()) { + return 0; + } + if (internal_symbolizer_) { + return internal_symbolizer_->SendCommand(is_data, module_name, + module_offset); + } + // Otherwise, fall back to external symbolizer. if (external_symbolizer_ == 0) { ReportExternalSymbolizerError( "WARNING: Trying to symbolize code, but external " @@ -293,21 +378,35 @@ class Symbolizer { } LoadedModule *FindModuleForAddress(uptr address) { - if (modules_ == 0) { + bool modules_were_reloaded = false; + if (modules_ == 0 || !modules_fresh_) { modules_ = (LoadedModule*)(symbolizer_allocator.Allocate( kMaxNumberOfModuleContexts * sizeof(LoadedModule))); CHECK(modules_); - n_modules_ = GetListOfModules(modules_, kMaxNumberOfModuleContexts); - CHECK_GT(n_modules_, 0); + n_modules_ = GetListOfModules(modules_, kMaxNumberOfModuleContexts, + /* filter */ 0); + // FIXME: Return this check when GetListOfModules is implemented on Mac. + // CHECK_GT(n_modules_, 0); CHECK_LT(n_modules_, kMaxNumberOfModuleContexts); + modules_fresh_ = true; + modules_were_reloaded = true; } for (uptr i = 0; i < n_modules_; i++) { if (modules_[i].containsAddress(address)) { return &modules_[i]; } } + // Reload the modules and look up again, if we haven't tried it yet. + if (!modules_were_reloaded) { + // FIXME: set modules_fresh_ from dlopen()/dlclose() interceptors. + // It's too aggressive to reload the list of modules each time we fail + // to find a module for a given address. + modules_fresh_ = false; + return FindModuleForAddress(address); + } return 0; } + void ReportExternalSymbolizerError(const char *msg) { // Don't use atomics here for now, as SymbolizeCode can't be called // from multiple threads anyway. @@ -322,8 +421,11 @@ class Symbolizer { static const uptr kMaxNumberOfModuleContexts = 1 << 14; LoadedModule *modules_; // Array of module descriptions is leaked. uptr n_modules_; + // If stale, need to reload the modules before looking up addresses. + bool modules_fresh_; ExternalSymbolizer *external_symbolizer_; // Leaked. + InternalSymbolizer *internal_symbolizer_; // Leaked. }; static Symbolizer symbolizer; // Linker initialized. @@ -340,4 +442,12 @@ bool InitializeExternalSymbolizer(const char *path_to_symbolizer) { return symbolizer.InitializeExternalSymbolizer(path_to_symbolizer); } +bool IsSymbolizerAvailable() { + return symbolizer.IsSymbolizerAvailable(); +} + +void FlushSymbolizer() { + symbolizer.Flush(); +} + } // namespace __sanitizer diff --git a/lib/sanitizer_common/sanitizer_symbolizer_linux.cc b/lib/sanitizer_common/sanitizer_symbolizer_linux_libcdep.cc index 4bd3dc8826ef..82ce50e0aacb 100644 --- a/lib/sanitizer_common/sanitizer_symbolizer_linux.cc +++ b/lib/sanitizer_common/sanitizer_symbolizer_linux_libcdep.cc @@ -1,4 +1,4 @@ -//===-- sanitizer_symbolizer_linux.cc -------------------------------------===// +//===-- sanitizer_symbolizer_linux_libcdep.cc -----------------------------===// // // The LLVM Compiler Infrastructure // @@ -11,13 +11,18 @@ // run-time libraries. // Linux-specific implementation of symbolizer parts. //===----------------------------------------------------------------------===// -#ifdef __linux__ + +#include "sanitizer_platform.h" +#if SANITIZER_LINUX #include "sanitizer_common.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" #include "sanitizer_placement_new.h" #include "sanitizer_symbolizer.h" +// Android NDK r8e elf.h depends on stdint.h without including the latter. +#include <stdint.h> + #include <elf.h> #include <errno.h> #include <poll.h> @@ -26,7 +31,7 @@ #include <sys/wait.h> #include <unistd.h> -#if !defined(__ANDROID__) && !defined(ANDROID) +#if !SANITIZER_ANDROID #include <link.h> #endif @@ -99,7 +104,7 @@ bool StartSymbolizerSubprocess(const char *path_to_symbolizer, for (int fd = getdtablesize(); fd > 2; fd--) internal_close(fd); execl(path_to_symbolizer, path_to_symbolizer, (char*)0); - Exit(1); + internal__exit(1); } // Continue execution in parent process. @@ -121,39 +126,71 @@ bool StartSymbolizerSubprocess(const char *path_to_symbolizer, return true; } -#if defined(__ANDROID__) || defined(ANDROID) -uptr GetListOfModules(LoadedModule *modules, uptr max_modules) { - UNIMPLEMENTED(); +#if SANITIZER_ANDROID +uptr GetListOfModules(LoadedModule *modules, uptr max_modules, + string_predicate_t filter) { + return 0; } -#else // ANDROID + +void SymbolizerPrepareForSandboxing() { + // Do nothing on Android. +} +#else // SANITIZER_ANDROID typedef ElfW(Phdr) Elf_Phdr; struct DlIteratePhdrData { LoadedModule *modules; uptr current_n; + bool first; uptr max_n; + string_predicate_t filter; }; static const uptr kMaxPathLength = 512; +static char proc_self_exe_cache_str[kMaxPathLength]; +static uptr proc_self_exe_cache_len = 0; + +static uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { + uptr module_name_len = internal_readlink( + "/proc/self/exe", buf, buf_len); + int readlink_error; + if (internal_iserror(buf_len, &readlink_error)) { + if (proc_self_exe_cache_len) { + // If available, use the cached module name. + CHECK_LE(proc_self_exe_cache_len, buf_len); + internal_strncpy(buf, proc_self_exe_cache_str, buf_len); + module_name_len = internal_strlen(proc_self_exe_cache_str); + } else { + // We can't read /proc/self/exe for some reason, assume the name of the + // binary is unknown. + Report("WARNING: readlink(\"/proc/self/exe\") failed with errno %d, " + "some stack frames may not be symbolized\n", readlink_error); + module_name_len = internal_snprintf(buf, buf_len, "/proc/self/exe"); + } + CHECK_LT(module_name_len, buf_len); + buf[module_name_len] = '\0'; + } + return module_name_len; +} + static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { DlIteratePhdrData *data = (DlIteratePhdrData*)arg; if (data->current_n == data->max_n) return 0; InternalScopedBuffer<char> module_name(kMaxPathLength); module_name.data()[0] = '\0'; - if (data->current_n == 0) { + if (data->first) { + data->first = false; // First module is the binary itself. - uptr module_name_len = internal_readlink( - "/proc/self/exe", module_name.data(), module_name.size()); - CHECK_NE(module_name_len, (uptr)-1); - CHECK_LT(module_name_len, module_name.size()); - module_name[module_name_len] = '\0'; + ReadBinaryName(module_name.data(), module_name.size()); } else if (info->dlpi_name) { internal_strncpy(module_name.data(), info->dlpi_name, module_name.size()); } if (module_name.data()[0] == '\0') return 0; + if (data->filter && !data->filter(module_name.data())) + return 0; void *mem = &data->modules[data->current_n]; LoadedModule *cur_module = new(mem) LoadedModule(module_name.data(), info->dlpi_addr); @@ -169,14 +206,22 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { return 0; } -uptr GetListOfModules(LoadedModule *modules, uptr max_modules) { +uptr GetListOfModules(LoadedModule *modules, uptr max_modules, + string_predicate_t filter) { CHECK(modules); - DlIteratePhdrData data = {modules, 0, max_modules}; + DlIteratePhdrData data = {modules, 0, true, max_modules, filter}; dl_iterate_phdr(dl_iterate_phdr_cb, &data); return data.current_n; } -#endif // ANDROID + +void SymbolizerPrepareForSandboxing() { + if (!proc_self_exe_cache_len) { + proc_self_exe_cache_len = + ReadBinaryName(proc_self_exe_cache_str, kMaxPathLength); + } +} +#endif // SANITIZER_ANDROID } // namespace __sanitizer -#endif // __linux__ +#endif // SANITIZER_LINUX diff --git a/lib/sanitizer_common/sanitizer_symbolizer_mac.cc b/lib/sanitizer_common/sanitizer_symbolizer_mac.cc index 23993607e77b..9d96690bfda2 100644 --- a/lib/sanitizer_common/sanitizer_symbolizer_mac.cc +++ b/lib/sanitizer_common/sanitizer_symbolizer_mac.cc @@ -11,7 +11,9 @@ // run-time libraries. // Mac-specific implementation of symbolizer parts. //===----------------------------------------------------------------------===// -#ifdef __APPLE__ + +#include "sanitizer_platform.h" +#if SANITIZER_MAC #include "sanitizer_internal_defs.h" #include "sanitizer_symbolizer.h" @@ -22,10 +24,17 @@ bool StartSymbolizerSubprocess(const char *path_to_symbolizer, UNIMPLEMENTED(); } -uptr GetListOfModules(LoadedModule *modules, uptr max_modules) { - UNIMPLEMENTED(); +uptr GetListOfModules(LoadedModule *modules, uptr max_modules, + string_predicate_t filter) { + // FIXME: Actually implement this on Mac. Just using MemoryMappingLayout + // may be enough for this on Mac. + return 0; +} + +void SymbolizerPrepareForSandboxing() { + // Do nothing on Mac. } } // namespace __sanitizer -#endif // __APPLE__ +#endif // SANITIZER_MAC diff --git a/lib/sanitizer_common/sanitizer_symbolizer_win.cc b/lib/sanitizer_common/sanitizer_symbolizer_win.cc index f1b6a02a6f9a..993261aab7b0 100644 --- a/lib/sanitizer_common/sanitizer_symbolizer_win.cc +++ b/lib/sanitizer_common/sanitizer_symbolizer_win.cc @@ -11,7 +11,9 @@ // run-time libraries. // Windows-specific implementation of symbolizer parts. //===----------------------------------------------------------------------===// -#ifdef _WIN32 + +#include "sanitizer_platform.h" +#if SANITIZER_WINDOWS #include <windows.h> #include "sanitizer_internal_defs.h" @@ -24,10 +26,15 @@ bool StartSymbolizerSubprocess(const char *path_to_symbolizer, UNIMPLEMENTED(); } -uptr GetListOfModules(LoadedModule *modules, uptr max_modules) { +uptr GetListOfModules(LoadedModule *modules, uptr max_modules, + string_predicate_t filter) { UNIMPLEMENTED(); }; +void SymbolizerPrepareForSandboxing() { + // Do nothing on Windows. +} + const char *Demangle(const char *MangledName) { return MangledName; } diff --git a/lib/sanitizer_common/sanitizer_syscall_generic.inc b/lib/sanitizer_common/sanitizer_syscall_generic.inc new file mode 100644 index 000000000000..aac20a5f2d69 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_syscall_generic.inc @@ -0,0 +1,24 @@ +//===-- sanitizer_syscall_generic.inc ---------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Generic implementations of internal_syscall and internal_iserror. +// +//===----------------------------------------------------------------------===// + +#define internal_syscall syscall + +bool internal_iserror(uptr retval, int *rverrno) { + if (retval == (uptr)-1) { + if (rverrno) + *rverrno = errno; + return true; + } else { + return false; + } +} diff --git a/lib/sanitizer_common/sanitizer_syscall_linux_x86_64.inc b/lib/sanitizer_common/sanitizer_syscall_linux_x86_64.inc new file mode 100644 index 000000000000..e084b84ab118 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_syscall_linux_x86_64.inc @@ -0,0 +1,87 @@ +//===-- sanitizer_syscall_linux_x86_64.inc ----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Implementations of internal_syscall and internal_iserror for Linux/x86_64. +// +//===----------------------------------------------------------------------===// + +static uptr internal_syscall(u64 nr) { + u64 retval; + asm volatile("syscall" : "=a"(retval) : "a"(nr) : "rcx", "r11"); + return retval; +} + +template <typename T1> +static uptr internal_syscall(u64 nr, T1 arg1) { + u64 retval; + asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1) : + "rcx", "r11"); + return retval; +} + +template <typename T1, typename T2> +static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2) { + u64 retval; + asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1), + "S"((u64)arg2) : "rcx", "r11"); + return retval; +} + +template <typename T1, typename T2, typename T3> +static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3) { + u64 retval; + asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1), + "S"((u64)arg2), "d"((u64)arg3) : "rcx", "r11"); + return retval; +} + +template <typename T1, typename T2, typename T3, typename T4> +static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4) { + u64 retval; + asm volatile("mov %5, %%r10;" + "syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1), + "S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4) : + "rcx", "r11", "r10"); + return retval; +} + +template <typename T1, typename T2, typename T3, typename T4, typename T5> +static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4, + T5 arg5) { + u64 retval; + asm volatile("mov %5, %%r10;" + "mov %6, %%r8;" + "syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1), + "S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4), "r"((u64)arg5) : + "rcx", "r11", "r10", "r8"); + return retval; +} + +template <typename T1, typename T2, typename T3, typename T4, typename T5, + typename T6> +static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4, + T5 arg5, T6 arg6) { + u64 retval; + asm volatile("mov %5, %%r10;" + "mov %6, %%r8;" + "mov %7, %%r9;" + "syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1), + "S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4), "r"((u64)arg5), + "r"((u64)arg6) : "rcx", "r11", "r10", "r8", "r9"); + return retval; +} + +bool internal_iserror(uptr retval, int *rverrno) { + if (retval >= (uptr)-4095) { + if (rverrno) + *rverrno = -retval; + return true; + } + return false; +} diff --git a/lib/sanitizer_common/sanitizer_thread_registry.cc b/lib/sanitizer_common/sanitizer_thread_registry.cc new file mode 100644 index 000000000000..466dc3b8a27f --- /dev/null +++ b/lib/sanitizer_common/sanitizer_thread_registry.cc @@ -0,0 +1,279 @@ +//===-- sanitizer_thread_registry.cc --------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between sanitizer tools. +// +// General thread bookkeeping functionality. +//===----------------------------------------------------------------------===// + +#include "sanitizer_thread_registry.h" + +namespace __sanitizer { + +ThreadContextBase::ThreadContextBase(u32 tid) + : tid(tid), unique_id(0), os_id(0), user_id(0), status(ThreadStatusInvalid), + detached(false), reuse_count(0), parent_tid(0), next(0) { + name[0] = '\0'; +} + +ThreadContextBase::~ThreadContextBase() { + // ThreadContextBase should never be deleted. + CHECK(0); +} + +void ThreadContextBase::SetName(const char *new_name) { + name[0] = '\0'; + if (new_name) { + internal_strncpy(name, new_name, sizeof(name)); + name[sizeof(name) - 1] = '\0'; + } +} + +void ThreadContextBase::SetDead() { + CHECK(status == ThreadStatusRunning || + status == ThreadStatusFinished); + status = ThreadStatusDead; + user_id = 0; + OnDead(); +} + +void ThreadContextBase::SetJoined(void *arg) { + // FIXME(dvyukov): print message and continue (it's user error). + CHECK_EQ(false, detached); + CHECK_EQ(ThreadStatusFinished, status); + status = ThreadStatusDead; + user_id = 0; + OnJoined(arg); +} + +void ThreadContextBase::SetFinished() { + if (!detached) + status = ThreadStatusFinished; + OnFinished(); +} + +void ThreadContextBase::SetStarted(uptr _os_id, void *arg) { + status = ThreadStatusRunning; + os_id = _os_id; + OnStarted(arg); +} + +void ThreadContextBase::SetCreated(uptr _user_id, u64 _unique_id, + bool _detached, u32 _parent_tid, void *arg) { + status = ThreadStatusCreated; + user_id = _user_id; + unique_id = _unique_id; + detached = _detached; + // Parent tid makes no sense for the main thread. + if (tid != 0) + parent_tid = _parent_tid; + OnCreated(arg); +} + +void ThreadContextBase::Reset() { + status = ThreadStatusInvalid; + reuse_count++; + SetName(0); + OnReset(); +} + +// ThreadRegistry implementation. + +const u32 ThreadRegistry::kUnknownTid = -1U; + +ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads, + u32 thread_quarantine_size) + : context_factory_(factory), + max_threads_(max_threads), + thread_quarantine_size_(thread_quarantine_size), + mtx_(), + n_contexts_(0), + total_threads_(0), + alive_threads_(0), + max_alive_threads_(0), + running_threads_(0) { + threads_ = (ThreadContextBase **)MmapOrDie(max_threads_ * sizeof(threads_[0]), + "ThreadRegistry"); + dead_threads_.clear(); + invalid_threads_.clear(); +} + +void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running, + uptr *alive) { + BlockingMutexLock l(&mtx_); + if (total) *total = n_contexts_; + if (running) *running = running_threads_; + if (alive) *alive = alive_threads_; +} + +uptr ThreadRegistry::GetMaxAliveThreads() { + BlockingMutexLock l(&mtx_); + return max_alive_threads_; +} + +u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid, + void *arg) { + BlockingMutexLock l(&mtx_); + u32 tid = kUnknownTid; + ThreadContextBase *tctx = QuarantinePop(); + if (tctx) { + tid = tctx->tid; + } else if (n_contexts_ < max_threads_) { + // Allocate new thread context and tid. + tid = n_contexts_++; + tctx = context_factory_(tid); + threads_[tid] = tctx; + } else { + Report("%s: Thread limit (%u threads) exceeded. Dying.\n", + SanitizerToolName, max_threads_); + Die(); + } + CHECK_NE(tctx, 0); + CHECK_NE(tid, kUnknownTid); + CHECK_LT(tid, max_threads_); + CHECK_EQ(tctx->status, ThreadStatusInvalid); + alive_threads_++; + if (max_alive_threads_ < alive_threads_) { + max_alive_threads_++; + CHECK_EQ(alive_threads_, max_alive_threads_); + } + tctx->SetCreated(user_id, total_threads_++, detached, + parent_tid, arg); + return tid; +} + +void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb, + void *arg) { + CheckLocked(); + for (u32 tid = 0; tid < n_contexts_; tid++) { + ThreadContextBase *tctx = threads_[tid]; + if (tctx == 0) + continue; + cb(tctx, arg); + } +} + +u32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) { + BlockingMutexLock l(&mtx_); + for (u32 tid = 0; tid < n_contexts_; tid++) { + ThreadContextBase *tctx = threads_[tid]; + if (tctx != 0 && cb(tctx, arg)) + return tctx->tid; + } + return kUnknownTid; +} + +ThreadContextBase * +ThreadRegistry::FindThreadContextLocked(FindThreadCallback cb, void *arg) { + CheckLocked(); + for (u32 tid = 0; tid < n_contexts_; tid++) { + ThreadContextBase *tctx = threads_[tid]; + if (tctx != 0 && cb(tctx, arg)) + return tctx; + } + return 0; +} + +static bool FindThreadContextByOsIdCallback(ThreadContextBase *tctx, + void *arg) { + return (tctx->os_id == (uptr)arg && tctx->status != ThreadStatusInvalid && + tctx->status != ThreadStatusDead); +} + +ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(uptr os_id) { + return FindThreadContextLocked(FindThreadContextByOsIdCallback, + (void *)os_id); +} + +void ThreadRegistry::SetThreadName(u32 tid, const char *name) { + BlockingMutexLock l(&mtx_); + CHECK_LT(tid, n_contexts_); + ThreadContextBase *tctx = threads_[tid]; + CHECK_NE(tctx, 0); + CHECK_EQ(ThreadStatusRunning, tctx->status); + tctx->SetName(name); +} + +void ThreadRegistry::DetachThread(u32 tid) { + BlockingMutexLock l(&mtx_); + CHECK_LT(tid, n_contexts_); + ThreadContextBase *tctx = threads_[tid]; + CHECK_NE(tctx, 0); + if (tctx->status == ThreadStatusInvalid) { + Report("%s: Detach of non-existent thread\n", SanitizerToolName); + return; + } + if (tctx->status == ThreadStatusFinished) { + tctx->SetDead(); + QuarantinePush(tctx); + } else { + tctx->detached = true; + } +} + +void ThreadRegistry::JoinThread(u32 tid, void *arg) { + BlockingMutexLock l(&mtx_); + CHECK_LT(tid, n_contexts_); + ThreadContextBase *tctx = threads_[tid]; + CHECK_NE(tctx, 0); + if (tctx->status == ThreadStatusInvalid) { + Report("%s: Join of non-existent thread\n", SanitizerToolName); + return; + } + tctx->SetJoined(arg); + QuarantinePush(tctx); +} + +void ThreadRegistry::FinishThread(u32 tid) { + BlockingMutexLock l(&mtx_); + CHECK_GT(alive_threads_, 0); + alive_threads_--; + CHECK_GT(running_threads_, 0); + running_threads_--; + CHECK_LT(tid, n_contexts_); + ThreadContextBase *tctx = threads_[tid]; + CHECK_NE(tctx, 0); + CHECK_EQ(ThreadStatusRunning, tctx->status); + tctx->SetFinished(); + if (tctx->detached) { + tctx->SetDead(); + QuarantinePush(tctx); + } +} + +void ThreadRegistry::StartThread(u32 tid, uptr os_id, void *arg) { + BlockingMutexLock l(&mtx_); + running_threads_++; + CHECK_LT(tid, n_contexts_); + ThreadContextBase *tctx = threads_[tid]; + CHECK_NE(tctx, 0); + CHECK_EQ(ThreadStatusCreated, tctx->status); + tctx->SetStarted(os_id, arg); +} + +void ThreadRegistry::QuarantinePush(ThreadContextBase *tctx) { + dead_threads_.push_back(tctx); + if (dead_threads_.size() <= thread_quarantine_size_) + return; + tctx = dead_threads_.front(); + dead_threads_.pop_front(); + CHECK_EQ(tctx->status, ThreadStatusDead); + tctx->Reset(); + invalid_threads_.push_back(tctx); +} + +ThreadContextBase *ThreadRegistry::QuarantinePop() { + if (invalid_threads_.size() == 0) + return 0; + ThreadContextBase *tctx = invalid_threads_.front(); + invalid_threads_.pop_front(); + return tctx; +} + +} // namespace __sanitizer diff --git a/lib/sanitizer_common/sanitizer_thread_registry.h b/lib/sanitizer_common/sanitizer_thread_registry.h new file mode 100644 index 000000000000..6072e7c0a002 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_thread_registry.h @@ -0,0 +1,145 @@ +//===-- sanitizer_thread_registry.h -----------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between sanitizer tools. +// +// General thread bookkeeping functionality. +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_THREAD_REGISTRY_H +#define SANITIZER_THREAD_REGISTRY_H + +#include "sanitizer_common.h" +#include "sanitizer_list.h" +#include "sanitizer_mutex.h" + +namespace __sanitizer { + +enum ThreadStatus { + ThreadStatusInvalid, // Non-existent thread, data is invalid. + ThreadStatusCreated, // Created but not yet running. + ThreadStatusRunning, // The thread is currently running. + ThreadStatusFinished, // Joinable thread is finished but not yet joined. + ThreadStatusDead // Joined, but some info is still available. +}; + +// Generic thread context. Specific sanitizer tools may inherit from it. +// If thread is dead, context may optionally be reused for a new thread. +class ThreadContextBase { + public: + explicit ThreadContextBase(u32 tid); + ~ThreadContextBase(); // Should never be called. + + const u32 tid; // Thread ID. Main thread should have tid = 0. + u64 unique_id; // Unique thread ID. + uptr os_id; // PID (used for reporting). + uptr user_id; // Some opaque user thread id (e.g. pthread_t). + char name[64]; // As annotated by user. + + ThreadStatus status; + bool detached; + int reuse_count; + + u32 parent_tid; + ThreadContextBase *next; // For storing thread contexts in a list. + + void SetName(const char *new_name); + + void SetDead(); + void SetJoined(void *arg); + void SetFinished(); + void SetStarted(uptr _os_id, void *arg); + void SetCreated(uptr _user_id, u64 _unique_id, bool _detached, + u32 _parent_tid, void *arg); + void Reset(); + + // The following methods may be overriden by subclasses. + // Some of them take opaque arg that may be optionally be used + // by subclasses. + virtual void OnDead() {} + virtual void OnJoined(void *arg) {} + virtual void OnFinished() {} + virtual void OnStarted(void *arg) {} + virtual void OnCreated(void *arg) {} + virtual void OnReset() {} +}; + +typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid); + +class ThreadRegistry { + public: + static const u32 kUnknownTid; + + ThreadRegistry(ThreadContextFactory factory, u32 max_threads, + u32 thread_quarantine_size); + void GetNumberOfThreads(uptr *total = 0, uptr *running = 0, uptr *alive = 0); + uptr GetMaxAliveThreads(); + + void Lock() { mtx_.Lock(); } + void CheckLocked() { mtx_.CheckLocked(); } + void Unlock() { mtx_.Unlock(); } + + // Should be guarded by ThreadRegistryLock. + ThreadContextBase *GetThreadLocked(u32 tid) { + DCHECK_LT(tid, n_contexts_); + return threads_[tid]; + } + + u32 CreateThread(uptr user_id, bool detached, u32 parent_tid, void *arg); + + typedef void (*ThreadCallback)(ThreadContextBase *tctx, void *arg); + // Invokes callback with a specified arg for each thread context. + // Should be guarded by ThreadRegistryLock. + void RunCallbackForEachThreadLocked(ThreadCallback cb, void *arg); + + typedef bool (*FindThreadCallback)(ThreadContextBase *tctx, void *arg); + // Finds a thread using the provided callback. Returns kUnknownTid if no + // thread is found. + u32 FindThread(FindThreadCallback cb, void *arg); + // Should be guarded by ThreadRegistryLock. Return 0 if no thread + // is found. + ThreadContextBase *FindThreadContextLocked(FindThreadCallback cb, + void *arg); + ThreadContextBase *FindThreadContextByOsIDLocked(uptr os_id); + + void SetThreadName(u32 tid, const char *name); + void DetachThread(u32 tid); + void JoinThread(u32 tid, void *arg); + void FinishThread(u32 tid); + void StartThread(u32 tid, uptr os_id, void *arg); + + private: + const ThreadContextFactory context_factory_; + const u32 max_threads_; + const u32 thread_quarantine_size_; + + BlockingMutex mtx_; + + u32 n_contexts_; // Number of created thread contexts, + // at most max_threads_. + u64 total_threads_; // Total number of created threads. May be greater than + // max_threads_ if contexts were reused. + uptr alive_threads_; // Created or running. + uptr max_alive_threads_; + uptr running_threads_; + + ThreadContextBase **threads_; // Array of thread contexts is leaked. + IntrusiveList<ThreadContextBase> dead_threads_; + IntrusiveList<ThreadContextBase> invalid_threads_; + + void QuarantinePush(ThreadContextBase *tctx); + ThreadContextBase *QuarantinePop(); +}; + +typedef GenericScopedLock<ThreadRegistry> ThreadRegistryLock; + +} // namespace __sanitizer + +#endif // SANITIZER_THREAD_REGISTRY_H + diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc index 2ae37af8847c..e76f1d1f7fa6 100644 --- a/lib/sanitizer_common/sanitizer_win.cc +++ b/lib/sanitizer_common/sanitizer_win.cc @@ -11,7 +11,10 @@ // run-time libraries and implements windows-specific functions from // sanitizer_libc.h. //===----------------------------------------------------------------------===// -#ifdef _WIN32 + +#include "sanitizer_platform.h" +#if SANITIZER_WINDOWS + #define WIN32_LEAN_AND_MEAN #define NOGDI #include <stdlib.h> @@ -20,11 +23,14 @@ #include "sanitizer_common.h" #include "sanitizer_libc.h" -#include "sanitizer_placement_new.h" #include "sanitizer_mutex.h" +#include "sanitizer_placement_new.h" +#include "sanitizer_stacktrace.h" namespace __sanitizer { +#include "sanitizer_syscall_generic.inc" + // --------------------- sanitizer_common.h uptr GetPageSize() { return 1U << 14; // FIXME: is this configurable? @@ -38,14 +44,20 @@ bool FileExists(const char *filename) { UNIMPLEMENTED(); } -int GetPid() { +uptr internal_getpid() { return GetProcessId(GetCurrentProcess()); } -uptr GetThreadSelf() { +// In contrast to POSIX, on Windows GetCurrentThreadId() +// returns a system-unique identifier. +uptr GetTid() { return GetCurrentThreadId(); } +uptr GetThreadSelf() { + return GetTid(); +} + void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, uptr *stack_bottom) { CHECK(stack_top); @@ -97,6 +109,11 @@ void *Mprotect(uptr fixed_addr, uptr size) { MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS); } +void FlushUnneededShadowMemory(uptr addr, uptr size) { + // This is almost useless on 32-bits. + // FIXME: add madvice-analog when we move to 64-bits. +} + bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { // FIXME: shall we do anything here on Windows? return true; @@ -106,19 +123,38 @@ void *MapFileToMemory(const char *file_name, uptr *buff_size) { UNIMPLEMENTED(); } -const char *GetEnv(const char *name) { - static char env_buffer[32767] = {}; +static const int kMaxEnvNameLength = 128; +static const int kMaxEnvValueLength = 32767; - // Note: this implementation stores the result in a static buffer so we only - // allow it to be called just once. - static bool called_once = false; - if (called_once) - UNIMPLEMENTED(); - called_once = true; +namespace { + +struct EnvVariable { + char name[kMaxEnvNameLength]; + char value[kMaxEnvValueLength]; +}; - DWORD rv = GetEnvironmentVariableA(name, env_buffer, sizeof(env_buffer)); - if (rv > 0 && rv < sizeof(env_buffer)) - return env_buffer; +} // namespace + +static const int kEnvVariables = 5; +static EnvVariable env_vars[kEnvVariables]; +static int num_env_vars; + +const char *GetEnv(const char *name) { + // Note: this implementation caches the values of the environment variables + // and limits their quantity. + for (int i = 0; i < num_env_vars; i++) { + if (0 == internal_strcmp(name, env_vars[i].name)) + return env_vars[i].value; + } + CHECK_LT(num_env_vars, kEnvVariables); + DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value, + kMaxEnvValueLength); + if (rv > 0 && rv < kMaxEnvValueLength) { + CHECK_LT(internal_strlen(name), kMaxEnvNameLength); + internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength); + num_env_vars++; + return env_vars[num_env_vars - 1].value; + } return 0; } @@ -126,6 +162,10 @@ const char *GetPwd() { UNIMPLEMENTED(); } +u32 GetUid() { + UNIMPLEMENTED(); +} + void DumpProcessMap() { UNIMPLEMENTED(); } @@ -158,10 +198,6 @@ void SleepForMillis(int millis) { Sleep(millis); } -void Exit(int exitcode) { - _exit(exitcode); -} - void Abort() { abort(); _exit(-1); // abort is not NORETURN on Windows. @@ -174,16 +210,16 @@ int Atexit(void (*function)(void)) { #endif // ------------------ sanitizer_libc.h -void *internal_mmap(void *addr, uptr length, int prot, int flags, - int fd, u64 offset) { +uptr internal_mmap(void *addr, uptr length, int prot, int flags, + int fd, u64 offset) { UNIMPLEMENTED(); } -int internal_munmap(void *addr, uptr length) { +uptr internal_munmap(void *addr, uptr length) { UNIMPLEMENTED(); } -int internal_close(fd_t fd) { +uptr internal_close(fd_t fd) { UNIMPLEMENTED(); } @@ -191,7 +227,15 @@ int internal_isatty(fd_t fd) { return _isatty(fd); } -fd_t internal_open(const char *filename, bool write) { +uptr internal_open(const char *filename, int flags) { + UNIMPLEMENTED(); +} + +uptr internal_open(const char *filename, int flags, u32 mode) { + UNIMPLEMENTED(); +} + +uptr OpenFile(const char *filename, bool write) { UNIMPLEMENTED(); } @@ -211,11 +255,23 @@ uptr internal_write(fd_t fd, const void *buf, uptr count) { return ret; } +uptr internal_stat(const char *path, void *buf) { + UNIMPLEMENTED(); +} + +uptr internal_lstat(const char *path, void *buf) { + UNIMPLEMENTED(); +} + +uptr internal_fstat(fd_t fd, void *buf) { + UNIMPLEMENTED(); +} + uptr internal_filesize(fd_t fd) { UNIMPLEMENTED(); } -int internal_dup2(int oldfd, int newfd) { +uptr internal_dup2(int oldfd, int newfd) { UNIMPLEMENTED(); } @@ -223,16 +279,18 @@ uptr internal_readlink(const char *path, char *buf, uptr bufsize) { UNIMPLEMENTED(); } -int internal_sched_yield() { +uptr internal_sched_yield() { Sleep(0); return 0; } +void internal__exit(int exitcode) { + _exit(exitcode); +} + // ---------------------- BlockingMutex ---------------- {{{1 -enum LockState { - LOCK_UNINITIALIZED = 0, - LOCK_READY = -1, -}; +const uptr LOCK_UNINITIALIZED = 0; +const uptr LOCK_READY = (uptr)-1; BlockingMutex::BlockingMutex(LinkerInitialized li) { // FIXME: see comments in BlockingMutex::Lock() for the details. @@ -243,6 +301,12 @@ BlockingMutex::BlockingMutex(LinkerInitialized li) { owner_ = LOCK_READY; } +BlockingMutex::BlockingMutex() { + CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_)); + InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_); + owner_ = LOCK_READY; +} + void BlockingMutex::Lock() { if (owner_ == LOCK_UNINITIALIZED) { // FIXME: hm, global BlockingMutex objects are not initialized?!? @@ -254,16 +318,64 @@ void BlockingMutex::Lock() { // locks while we're starting in one thread to avoid double-init races. } EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_); - CHECK(owner_ == LOCK_READY); + CHECK_EQ(owner_, LOCK_READY); owner_ = GetThreadSelf(); } void BlockingMutex::Unlock() { - CHECK(owner_ == GetThreadSelf()); + CHECK_EQ(owner_, GetThreadSelf()); owner_ = LOCK_READY; LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_); } +void BlockingMutex::CheckLocked() { + CHECK_EQ(owner_, GetThreadSelf()); +} + +uptr GetTlsSize() { + return 0; +} + +void InitTlsSize() { +} + +void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, + uptr *tls_addr, uptr *tls_size) { + uptr stack_top, stack_bottom; + GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); + *stk_addr = stack_bottom; + *stk_size = stack_top - stack_bottom; + *tls_addr = 0; + *tls_size = 0; +} + +void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, + uptr stack_top, uptr stack_bottom, bool fast) { + (void)fast; + (void)stack_top; + (void)stack_bottom; + stack->max_size = max_s; + void *tmp[kStackTraceMax]; + + // FIXME: CaptureStackBackTrace might be too slow for us. + // FIXME: Compare with StackWalk64. + // FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc + uptr cs_ret = CaptureStackBackTrace(1, stack->max_size, tmp, 0); + uptr offset = 0; + // Skip the RTL frames by searching for the PC in the stacktrace. + // FIXME: this doesn't work well for the malloc/free stacks yet. + for (uptr i = 0; i < cs_ret; i++) { + if (pc != (uptr)tmp[i]) + continue; + offset = i; + break; + } + + stack->size = cs_ret - offset; + for (uptr i = 0; i < stack->size; i++) + stack->trace[i] = (uptr)tmp[i + offset]; +} + } // namespace __sanitizer #endif // _WIN32 diff --git a/lib/sanitizer_common/scripts/check_lint.sh b/lib/sanitizer_common/scripts/check_lint.sh index e65794df0ce7..3240f6f18cee 100755 --- a/lib/sanitizer_common/scripts/check_lint.sh +++ b/lib/sanitizer_common/scripts/check_lint.sh @@ -13,20 +13,27 @@ fi # Cpplint setup cd ${SCRIPT_DIR} if [ ! -d cpplint ]; then - svn co -r83 http://google-styleguide.googlecode.com/svn/trunk/cpplint cpplint + svn co http://google-styleguide.googlecode.com/svn/trunk/cpplint cpplint +else + (cd cpplint && svn up) fi CPPLINT=${SCRIPT_DIR}/cpplint/cpplint.py # Filters # TODO: remove some of these filters -ASAN_RTL_LINT_FILTER=-readability/casting,-readability/check,-build/include,-build/header_guard,-build/class,-legal/copyright,-build/namespaces -ASAN_TEST_LINT_FILTER=-readability/casting,-build/include,-legal/copyright,-whitespace/newline,-runtime/sizeof,-runtime/int,-runtime/printf,-build/header_guard +COMMON_LINT_FILTER=-build/include,-build/header_guard,-legal/copyright,-whitespace/comments,-readability/casting,\ +-build/namespaces +ASAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER},-runtime/int +ASAN_TEST_LINT_FILTER=${COMMON_LINT_FILTER},-runtime/sizeof,-runtime/int,-runtime/printf ASAN_LIT_TEST_LINT_FILTER=${ASAN_TEST_LINT_FILTER},-whitespace/line_length -TSAN_RTL_LINT_FILTER=-legal/copyright,-build/include,-readability/casting,-build/header_guard,-build/namespaces +TSAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER} TSAN_TEST_LINT_FILTER=${TSAN_RTL_LINT_FILTER},-runtime/threadsafe_fn,-runtime/int TSAN_LIT_TEST_LINT_FILTER=${TSAN_TEST_LINT_FILTER},-whitespace/line_length -MSAN_RTL_LINT_FILTER=-legal/copyright,-build/include,-readability/casting,-build/header_guard,-build/namespaces -TSAN_RTL_INC_LINT_FILTER=${TSAN_TEST_LINT_FILTER},-runtime/sizeof +MSAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER} +LSAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER} +LSAN_LIT_TEST_LINT_FILTER=${LSAN_RTL_LINT_FILTER},-whitespace/line_length +COMMON_RTL_INC_LINT_FILTER=${COMMON_LINT_FILTER},-runtime/int,-runtime/sizeof,-runtime/printf +SANITIZER_INCLUDES_LINT_FILTER=${COMMON_LINT_FILTER},-runtime/int cd ${LLVM_CHECKOUT} @@ -40,12 +47,12 @@ COMPILER_RT=projects/compiler-rt # Headers SANITIZER_INCLUDES=${COMPILER_RT}/include/sanitizer -${CPPLINT} --filter=${TSAN_RTL_LINT_FILTER} ${SANITIZER_INCLUDES}/*.h +${CPPLINT} --filter=${SANITIZER_INCLUDES_LINT_FILTER} ${SANITIZER_INCLUDES}/*.h # Sanitizer_common COMMON_RTL=${COMPILER_RT}/lib/sanitizer_common -${CPPLINT} --filter=${ASAN_RTL_LINT_FILTER} ${COMMON_RTL}/*.{cc,h} -${CPPLINT} --filter=${TSAN_RTL_LINT_FILTER} ${COMMON_RTL}/tests/*.cc +${CPPLINT} --filter=${COMMON_RTL_INC_LINT_FILTER} ${COMMON_RTL}/*.{cc,h} +${CPPLINT} --filter=${COMMON_RTL_INC_LINT_FILTER} ${COMMON_RTL}/tests/*.cc # Interception INTERCEPTION=${COMPILER_RT}/lib/interception @@ -69,6 +76,12 @@ ${CPPLINT} --filter=${TSAN_LIT_TEST_LINT_FILTER} ${TSAN_RTL}/lit_tests/*.cc MSAN_RTL=${COMPILER_RT}/lib/msan ${CPPLINT} --filter=${MSAN_RTL_LINT_FILTER} ${MSAN_RTL}/*.{cc,h} +# LSan +LSAN_RTL=${COMPILER_RT}/lib/lsan +${CPPLINT} --filter=${LSAN_RTL_LINT_FILTER} ${LSAN_RTL}/*.{cc,h} +${CPPLINT} --filter=${LSAN_RTL_LINT_FILTER} ${LSAN_RTL}/tests/*.{cc,h} +${CPPLINT} --filter=${LSAN_LIT_TEST_LINT_FILTER} ${LSAN_RTL}/lit_tests/*.{cc,h} + set +e # Misc files @@ -77,6 +90,6 @@ for FILE in $FILES; do TMPFILE=$(mktemp -u ${FILE}.XXXXX).cc echo "Checking $FILE" cp -f $FILE $TMPFILE && \ - ${CPPLINT} --filter=${TSAN_RTL_INC_LINT_FILTER} $TMPFILE + ${CPPLINT} --filter=${COMMON_RTL_INC_LINT_FILTER} $TMPFILE rm $TMPFILE done diff --git a/lib/sanitizer_common/tests/CMakeLists.txt b/lib/sanitizer_common/tests/CMakeLists.txt index f83a89cbe37c..25e57507ad14 100644 --- a/lib/sanitizer_common/tests/CMakeLists.txt +++ b/lib/sanitizer_common/tests/CMakeLists.txt @@ -2,15 +2,20 @@ include(CompilerRTCompile) set(SANITIZER_UNITTESTS sanitizer_allocator_test.cc + sanitizer_atomic_test.cc sanitizer_common_test.cc sanitizer_flags_test.cc sanitizer_libc_test.cc + sanitizer_linux_test.cc sanitizer_list_test.cc sanitizer_mutex_test.cc sanitizer_printf_test.cc sanitizer_scanf_interceptor_test.cc sanitizer_stackdepot_test.cc + sanitizer_stacktrace_test.cc + sanitizer_stoptheworld_test.cc sanitizer_test_main.cc + sanitizer_thread_registry_test.cc ) set(SANITIZER_TEST_HEADERS) @@ -18,6 +23,18 @@ foreach(header ${SANITIZER_HEADERS}) list(APPEND SANITIZER_TEST_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header}) endforeach() +set(SANITIZER_TEST_CFLAGS_COMMON + ${COMPILER_RT_GTEST_INCLUDE_CFLAGS} + -I${COMPILER_RT_SOURCE_DIR}/include + -I${COMPILER_RT_SOURCE_DIR}/lib + -I${COMPILER_RT_SOURCE_DIR}/lib/sanitizer_common + -DGTEST_HAS_RTTI=0 + -O2 -g -fno-rtti + -Wall -Werror -Werror=sign-compare) + +set(SANITIZER_TEST_LINK_FLAGS_COMMON + -lstdc++ -ldl) + include_directories(..) include_directories(../..) @@ -49,18 +66,12 @@ macro(add_sanitizer_tests_for_arch arch) get_target_flags_for_arch(${arch} TARGET_FLAGS) set(SANITIZER_TEST_SOURCES ${SANITIZER_UNITTESTS} ${COMPILER_RT_GTEST_SOURCE}) - set(SANITIZER_TEST_CFLAGS ${COMPILER_RT_GTEST_INCLUDE_CFLAGS} - -I${COMPILER_RT_SOURCE_DIR}/include - -I${COMPILER_RT_SOURCE_DIR}/lib - -I${COMPILER_RT_SOURCE_DIR}/lib/sanitizer_common - -O2 -g ${TARGET_FLAGS}) - set(SANITIZER_TEST_LINK_FLAGS -lstdc++ -lpthread ${TARGET_FLAGS}) set(SANITIZER_TEST_OBJECTS) foreach(source ${SANITIZER_TEST_SOURCES}) get_filename_component(basename ${source} NAME) set(output_obj "${basename}.${arch}.o") clang_compile(${output_obj} ${source} - CFLAGS ${SANITIZER_TEST_CFLAGS} + CFLAGS ${SANITIZER_TEST_CFLAGS_COMMON} ${TARGET_FLAGS} DEPS gtest ${SANITIZER_RUNTIME_LIBRARIES} ${SANITIZER_TEST_HEADERS}) list(APPEND SANITIZER_TEST_OBJECTS ${output_obj}) @@ -73,7 +84,8 @@ macro(add_sanitizer_tests_for_arch arch) OBJECTS ${SANITIZER_TEST_OBJECTS} ${SANITIZER_COMMON_LIB_NAME} DEPS ${SANITIZER_TEST_OBJECTS} ${SANITIZER_COMMON_LIB} - LINK_FLAGS ${SANITIZER_TEST_LINK_FLAGS}) + LINK_FLAGS ${SANITIZER_TEST_LINK_FLAGS_COMMON} + -lpthread ${TARGET_FLAGS}) endmacro() if(COMPILER_RT_CAN_EXECUTE_TESTS) @@ -85,11 +97,13 @@ if(COMPILER_RT_CAN_EXECUTE_TESTS) else() if(CAN_TARGET_x86_64) add_sanitizer_common_lib("RTSanitizerCommon.test.x86_64" - $<TARGET_OBJECTS:RTSanitizerCommon.x86_64>) + $<TARGET_OBJECTS:RTSanitizerCommon.x86_64> + $<TARGET_OBJECTS:RTSanitizerCommonLibc.x86_64>) endif() if(CAN_TARGET_i386) add_sanitizer_common_lib("RTSanitizerCommon.test.i386" - $<TARGET_OBJECTS:RTSanitizerCommon.i386>) + $<TARGET_OBJECTS:RTSanitizerCommon.i386> + $<TARGET_OBJECTS:RTSanitizerCommonLibc.i386>) endif() endif() if(CAN_TARGET_x86_64) @@ -118,21 +132,14 @@ if(ANDROID) add_executable(SanitizerTest ${SANITIZER_UNITTESTS} ${COMPILER_RT_GTEST_SOURCE} - $<TARGET_OBJECTS:RTSanitizerCommon.arm.android> - ) + $<TARGET_OBJECTS:RTSanitizerCommon.arm.android>) set_target_compile_flags(SanitizerTest ${SANITIZER_COMMON_CFLAGS} - ${COMPILER_RT_GTEST_INCLUDE_CFLAGS} - -I${COMPILER_RT_SOURCE_DIR}/include - -I${COMPILER_RT_SOURCE_DIR}/lib - -I${COMPILER_RT_SOURCE_DIR}/lib/sanitizer_common - -O2 -g - ) + ${SANITIZER_TEST_CFLAGS_COMMON}) # Setup correct output directory and link flags. - get_unittest_directory(OUTPUT_DIR) set_target_properties(SanitizerTest PROPERTIES - RUNTIME_OUTPUT_DIRECTORY ${OUTPUT_DIR}) - set_target_link_flags(SanitizerTest ${SANITIZER_TEST_LINK_FLAGS}) + RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + set_target_link_flags(SanitizerTest ${SANITIZER_TEST_LINK_FLAGS_COMMON}) # Add unit test to test suite. add_dependencies(SanitizerUnitTests SanitizerTest) endif() diff --git a/lib/sanitizer_common/tests/lit.cfg b/lib/sanitizer_common/tests/lit.cfg index d774753985ac..303d56c91079 100644 --- a/lib/sanitizer_common/tests/lit.cfg +++ b/lib/sanitizer_common/tests/lit.cfg @@ -11,9 +11,8 @@ def get_required_attr(config, attr_name): return attr_value # Setup attributes common for all compiler-rt projects. -llvm_src_root = get_required_attr(config, 'llvm_src_root') -compiler_rt_lit_unit_cfg = os.path.join(llvm_src_root, "projects", - "compiler-rt", "lib", +compiler_rt_src_root = get_required_attr(config, 'compiler_rt_src_root') +compiler_rt_lit_unit_cfg = os.path.join(compiler_rt_src_root, "lib", "lit.common.unit.cfg") lit.load_config(config, compiler_rt_lit_unit_cfg) diff --git a/lib/sanitizer_common/tests/lit.site.cfg.in b/lib/sanitizer_common/tests/lit.site.cfg.in index bb9a28d6a6cb..50485aa16ec2 100644 --- a/lib/sanitizer_common/tests/lit.site.cfg.in +++ b/lib/sanitizer_common/tests/lit.site.cfg.in @@ -1,9 +1,16 @@ ## Autogenerated by LLVM/Clang configuration. # Do not edit! -config.build_type = "@CMAKE_BUILD_TYPE@" config.llvm_obj_root = "@LLVM_BINARY_DIR@" config.llvm_src_root = "@LLVM_SOURCE_DIR@" +config.compiler_rt_src_root = "@COMPILER_RT_SOURCE_DIR@" +config.llvm_build_mode = "@LLVM_BUILD_MODE@" + +try: + config.llvm_build_mode = config.llvm_build_mode % lit.params +except KeyError,e: + key, = e.args + lit.fatal("unable to find %r parameter, use '--param=%s=VALUE'" % (key, key)) # Let the main config do the real work. lit.load_config(config, "@CMAKE_CURRENT_SOURCE_DIR@/lit.cfg") diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc index d67f4636ef4f..de949ca7defe 100644 --- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -22,6 +22,7 @@ #include <pthread.h> #include <algorithm> #include <vector> +#include <set> // Too slow for debug build #if TSAN_DEBUG == 0 @@ -40,8 +41,16 @@ typedef SizeClassAllocator64< static const u64 kAddressSpaceSize = 1ULL << 32; #endif +static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24); +static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog; + typedef SizeClassAllocator32< - 0, kAddressSpaceSize, 16, CompactSizeClassMap> Allocator32Compact; + 0, kAddressSpaceSize, + /*kMetadataSize*/16, + CompactSizeClassMap, + kRegionSizeLog, + FlatByteMap<kFlatByteMapSize> > + Allocator32Compact; template <class SizeClassMap> void TestSizeClassMap() { @@ -63,7 +72,8 @@ void TestSizeClassAllocator() { Allocator *a = new Allocator; a->Init(); SizeClassAllocatorLocalCache<Allocator> cache; - cache.Init(); + memset(&cache, 0, sizeof(cache)); + cache.Init(0); static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000, 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000}; @@ -77,7 +87,7 @@ void TestSizeClassAllocator() { uptr size = sizes[s]; if (!a->CanAllocate(size, 1)) continue; // printf("s = %ld\n", size); - uptr n_iter = std::max((uptr)6, 10000000 / size); + uptr n_iter = std::max((uptr)6, 8000000 / size); // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter); for (uptr i = 0; i < n_iter; i++) { uptr class_id0 = Allocator::SizeClassMapT::ClassID(size); @@ -114,6 +124,12 @@ void TestSizeClassAllocator() { CHECK_EQ(last_total_allocated, total_allocated); } + // Check that GetBlockBegin never crashes. + for (uptr x = 0, step = kAddressSpaceSize / 100000; + x < kAddressSpaceSize - step; x += step) + if (a->PointerIsMine(reinterpret_cast<void *>(x))) + Ident(a->GetBlockBegin(reinterpret_cast<void *>(x))); + a->TestOnlyUnmap(); delete a; } @@ -137,25 +153,28 @@ void SizeClassAllocatorMetadataStress() { Allocator *a = new Allocator; a->Init(); SizeClassAllocatorLocalCache<Allocator> cache; - cache.Init(); - static volatile void *sink; + memset(&cache, 0, sizeof(cache)); + cache.Init(0); - const uptr kNumAllocs = 10000; + const uptr kNumAllocs = 1 << 13; void *allocated[kNumAllocs]; + void *meta[kNumAllocs]; for (uptr i = 0; i < kNumAllocs; i++) { void *x = cache.Allocate(a, 1 + i % 50); allocated[i] = x; + meta[i] = a->GetMetaData(x); } // Get Metadata kNumAllocs^2 times. for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) { - sink = a->GetMetaData(allocated[i % kNumAllocs]); + uptr idx = i % kNumAllocs; + void *m = a->GetMetaData(allocated[idx]); + EXPECT_EQ(m, meta[idx]); } for (uptr i = 0; i < kNumAllocs; i++) { cache.Deallocate(a, 1 + i % 50, allocated[i]); } a->TestOnlyUnmap(); - (void)sink; delete a; } @@ -167,11 +186,47 @@ TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) { TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) { SizeClassAllocatorMetadataStress<Allocator64Compact>(); } -#endif +#endif // SANITIZER_WORDSIZE == 64 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) { SizeClassAllocatorMetadataStress<Allocator32Compact>(); } +template <class Allocator> +void SizeClassAllocatorGetBlockBeginStress() { + Allocator *a = new Allocator; + a->Init(); + SizeClassAllocatorLocalCache<Allocator> cache; + memset(&cache, 0, sizeof(cache)); + cache.Init(0); + + uptr max_size_class = Allocator::kNumClasses - 1; + uptr size = Allocator::SizeClassMapT::Size(max_size_class); + u64 G8 = 1ULL << 33; + // Make sure we correctly compute GetBlockBegin() w/o overflow. + for (size_t i = 0; i <= G8 / size; i++) { + void *x = cache.Allocate(a, max_size_class); + void *beg = a->GetBlockBegin(x); + // if ((i & (i - 1)) == 0) + // fprintf(stderr, "[%zd] %p %p\n", i, x, beg); + EXPECT_EQ(x, beg); + } + + a->TestOnlyUnmap(); + delete a; +} + +#if SANITIZER_WORDSIZE == 64 +TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) { + SizeClassAllocatorGetBlockBeginStress<Allocator64>(); +} +TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) { + SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(); +} +TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) { + SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>(); +} +#endif // SANITIZER_WORDSIZE == 64 + struct TestMapUnmapCallback { static int map_count, unmap_count; void OnMap(uptr p, uptr size) const { map_count++; } @@ -191,8 +246,11 @@ TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) { a->Init(); EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state. SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache; - cache.Init(); - a->AllocateBatch(&cache, 64); + memset(&cache, 0, sizeof(cache)); + cache.Init(0); + AllocatorStats stats; + stats.Init(); + a->AllocateBatch(&stats, &cache, 32); EXPECT_EQ(TestMapUnmapCallback::map_count, 3); // State + alloc + metadata. a->TestOnlyUnmap(); EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing. @@ -204,17 +262,25 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) { TestMapUnmapCallback::map_count = 0; TestMapUnmapCallback::unmap_count = 0; typedef SizeClassAllocator32< - 0, kAddressSpaceSize, 16, CompactSizeClassMap, - TestMapUnmapCallback> Allocator32WithCallBack; + 0, kAddressSpaceSize, + /*kMetadataSize*/16, + CompactSizeClassMap, + kRegionSizeLog, + FlatByteMap<kFlatByteMapSize>, + TestMapUnmapCallback> + Allocator32WithCallBack; Allocator32WithCallBack *a = new Allocator32WithCallBack; a->Init(); - EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state. + EXPECT_EQ(TestMapUnmapCallback::map_count, 0); SizeClassAllocatorLocalCache<Allocator32WithCallBack> cache; - cache.Init(); - a->AllocateBatch(&cache, 64); - EXPECT_EQ(TestMapUnmapCallback::map_count, 2); // alloc. + memset(&cache, 0, sizeof(cache)); + cache.Init(0); + AllocatorStats stats; + stats.Init(); + a->AllocateBatch(&stats, &cache, 32); + EXPECT_EQ(TestMapUnmapCallback::map_count, 1); a->TestOnlyUnmap(); - EXPECT_EQ(TestMapUnmapCallback::unmap_count, 2); // The whole thing + alloc. + EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); delete a; // fprintf(stderr, "Map: %d Unmap: %d\n", // TestMapUnmapCallback::map_count, @@ -226,9 +292,11 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) { TestMapUnmapCallback::unmap_count = 0; LargeMmapAllocator<TestMapUnmapCallback> a; a.Init(); - void *x = a.Allocate(1 << 20, 1); + AllocatorStats stats; + stats.Init(); + void *x = a.Allocate(&stats, 1 << 20, 1); EXPECT_EQ(TestMapUnmapCallback::map_count, 1); - a.Deallocate(x); + a.Deallocate(&stats, x); EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); } @@ -237,9 +305,12 @@ void FailInAssertionOnOOM() { Allocator a; a.Init(); SizeClassAllocatorLocalCache<Allocator> cache; - cache.Init(); + memset(&cache, 0, sizeof(cache)); + cache.Init(0); + AllocatorStats stats; + stats.Init(); for (int i = 0; i < 1000000; i++) { - a.AllocateBatch(&cache, 64); + a.AllocateBatch(&stats, &cache, 52); } a.TestOnlyUnmap(); @@ -254,13 +325,15 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) { TEST(SanitizerCommon, LargeMmapAllocator) { LargeMmapAllocator<> a; a.Init(); + AllocatorStats stats; + stats.Init(); static const int kNumAllocs = 1000; char *allocated[kNumAllocs]; static const uptr size = 4000; // Allocate some. for (int i = 0; i < kNumAllocs; i++) { - allocated[i] = (char *)a.Allocate(size, 1); + allocated[i] = (char *)a.Allocate(&stats, size, 1); CHECK(a.PointerIsMine(allocated[i])); } // Deallocate all. @@ -268,14 +341,14 @@ TEST(SanitizerCommon, LargeMmapAllocator) { for (int i = 0; i < kNumAllocs; i++) { char *p = allocated[i]; CHECK(a.PointerIsMine(p)); - a.Deallocate(p); + a.Deallocate(&stats, p); } // Check that non left. CHECK_EQ(a.TotalMemoryUsed(), 0); // Allocate some more, also add metadata. for (int i = 0; i < kNumAllocs; i++) { - char *x = (char *)a.Allocate(size, 1); + char *x = (char *)a.Allocate(&stats, size, 1); CHECK_GE(a.GetActuallyAllocatedSize(x), size); uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x)); *meta = i; @@ -294,7 +367,7 @@ TEST(SanitizerCommon, LargeMmapAllocator) { uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p)); CHECK_EQ(*meta, idx); CHECK(a.PointerIsMine(p)); - a.Deallocate(p); + a.Deallocate(&stats, p); } CHECK_EQ(a.TotalMemoryUsed(), 0); @@ -304,7 +377,7 @@ TEST(SanitizerCommon, LargeMmapAllocator) { const uptr kNumAlignedAllocs = 100; for (uptr i = 0; i < kNumAlignedAllocs; i++) { uptr size = ((i % 10) + 1) * 4096; - char *p = allocated[i] = (char *)a.Allocate(size, alignment); + char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment); CHECK_EQ(p, a.GetBlockBegin(p)); CHECK_EQ(p, a.GetBlockBegin(p + size - 1)); CHECK_EQ(p, a.GetBlockBegin(p + size / 2)); @@ -312,9 +385,17 @@ TEST(SanitizerCommon, LargeMmapAllocator) { p[0] = p[size - 1] = 0; } for (uptr i = 0; i < kNumAlignedAllocs; i++) { - a.Deallocate(allocated[i]); + a.Deallocate(&stats, allocated[i]); } } + + // Regression test for boundary condition in GetBlockBegin(). + uptr page_size = GetPageSizeCached(); + char *p = (char *)a.Allocate(&stats, page_size, 1); + CHECK_EQ(p, a.GetBlockBegin(p)); + CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1)); + CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size)); + a.Deallocate(&stats, p); } template @@ -327,7 +408,8 @@ void TestCombinedAllocator() { a->Init(); AllocatorCache cache; - cache.Init(); + memset(&cache, 0, sizeof(cache)); + a->InitCache(&cache); EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0); EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0); @@ -363,6 +445,7 @@ void TestCombinedAllocator() { allocated.clear(); a->SwallowCache(&cache); } + a->DestroyCache(&cache); a->TestOnlyUnmap(); } @@ -388,14 +471,13 @@ TEST(SanitizerCommon, CombinedAllocator32Compact) { template <class AllocatorCache> void TestSizeClassAllocatorLocalCache() { - static AllocatorCache static_allocator_cache; - static_allocator_cache.Init(); AllocatorCache cache; typedef typename AllocatorCache::Allocator Allocator; Allocator *a = new Allocator(); a->Init(); - cache.Init(); + memset(&cache, 0, sizeof(cache)); + cache.Init(0); const uptr kNumAllocs = 10000; const int kNumIter = 100; @@ -466,6 +548,42 @@ TEST(SanitizerCommon, AllocatorLeakTest) { a.TestOnlyUnmap(); } + +// Struct which is allocated to pass info to new threads. The new thread frees +// it. +struct NewThreadParams { + AllocatorCache *thread_cache; + AllocatorCache::Allocator *allocator; + uptr class_id; +}; + +// Called in a new thread. Just frees its argument. +static void *DeallocNewThreadWorker(void *arg) { + NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg); + params->thread_cache->Deallocate(params->allocator, params->class_id, params); + return NULL; +} + +// The allocator cache is supposed to be POD and zero initialized. We should be +// able to call Deallocate on a zeroed cache, and it will self-initialize. +TEST(Allocator, AllocatorCacheDeallocNewThread) { + AllocatorCache::Allocator allocator; + allocator.Init(); + AllocatorCache main_cache; + AllocatorCache child_cache; + memset(&main_cache, 0, sizeof(main_cache)); + memset(&child_cache, 0, sizeof(child_cache)); + + uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams)); + NewThreadParams *params = reinterpret_cast<NewThreadParams*>( + main_cache.Allocate(&allocator, class_id)); + params->thread_cache = &child_cache; + params->allocator = &allocator; + params->class_id = class_id; + pthread_t t; + EXPECT_EQ(0, pthread_create(&t, 0, DeallocNewThreadWorker, params)); + EXPECT_EQ(0, pthread_join(t, 0)); +} #endif TEST(Allocator, Basic) { @@ -507,4 +625,122 @@ TEST(Allocator, ScopedBuffer) { } } +class IterationTestCallback { + public: + explicit IterationTestCallback(std::set<void *> *chunks) + : chunks_(chunks) {} + void operator()(void *chunk) const { + chunks_->insert(chunk); + } + private: + std::set<void *> *chunks_; +}; + +template <class Allocator> +void TestSizeClassAllocatorIteration() { + Allocator *a = new Allocator; + a->Init(); + SizeClassAllocatorLocalCache<Allocator> cache; + memset(&cache, 0, sizeof(cache)); + cache.Init(0); + + static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000, + 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000}; + + std::vector<void *> allocated; + + // Allocate a bunch of chunks. + for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) { + uptr size = sizes[s]; + if (!a->CanAllocate(size, 1)) continue; + // printf("s = %ld\n", size); + uptr n_iter = std::max((uptr)6, 80000 / size); + // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter); + for (uptr j = 0; j < n_iter; j++) { + uptr class_id0 = Allocator::SizeClassMapT::ClassID(size); + void *x = cache.Allocate(a, class_id0); + allocated.push_back(x); + } + } + + std::set<void *> reported_chunks; + IterationTestCallback callback(&reported_chunks); + a->ForceLock(); + a->ForEachChunk(callback); + a->ForceUnlock(); + + for (uptr i = 0; i < allocated.size(); i++) { + // Don't use EXPECT_NE. Reporting the first mismatch is enough. + ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end()); + } + + a->TestOnlyUnmap(); + delete a; +} + +#if SANITIZER_WORDSIZE == 64 +TEST(SanitizerCommon, SizeClassAllocator64Iteration) { + TestSizeClassAllocatorIteration<Allocator64>(); +} +#endif + +TEST(SanitizerCommon, SizeClassAllocator32Iteration) { + TestSizeClassAllocatorIteration<Allocator32Compact>(); +} + +TEST(SanitizerCommon, LargeMmapAllocatorIteration) { + LargeMmapAllocator<> a; + a.Init(); + AllocatorStats stats; + stats.Init(); + + static const uptr kNumAllocs = 1000; + char *allocated[kNumAllocs]; + static const uptr size = 40; + // Allocate some. + for (uptr i = 0; i < kNumAllocs; i++) { + allocated[i] = (char *)a.Allocate(&stats, size, 1); + } + + std::set<void *> reported_chunks; + IterationTestCallback callback(&reported_chunks); + a.ForceLock(); + a.ForEachChunk(callback); + a.ForceUnlock(); + + for (uptr i = 0; i < kNumAllocs; i++) { + // Don't use EXPECT_NE. Reporting the first mismatch is enough. + ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end()); + } +} + +#if SANITIZER_WORDSIZE == 64 +// Regression test for out-of-memory condition in PopulateFreeList(). +TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) { + // In a world where regions are small and chunks are huge... + typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap; + typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0, + SpecialSizeClassMap> SpecialAllocator64; + const uptr kRegionSize = + kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded; + SpecialAllocator64 *a = new SpecialAllocator64; + a->Init(); + SizeClassAllocatorLocalCache<SpecialAllocator64> cache; + memset(&cache, 0, sizeof(cache)); + cache.Init(0); + + // ...one man is on a mission to overflow a region with a series of + // successive allocations. + const uptr kClassID = 107; + const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID); + ASSERT_LT(2 * kAllocationSize, kRegionSize); + ASSERT_GT(3 * kAllocationSize, kRegionSize); + cache.Allocate(a, kClassID); + EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID), + "The process has exhausted"); + a->TestOnlyUnmap(); + delete a; +} +#endif + #endif // #if TSAN_DEBUG==0 diff --git a/lib/sanitizer_common/tests/sanitizer_atomic_test.cc b/lib/sanitizer_common/tests/sanitizer_atomic_test.cc new file mode 100644 index 000000000000..a4a97c43e00f --- /dev/null +++ b/lib/sanitizer_common/tests/sanitizer_atomic_test.cc @@ -0,0 +1,55 @@ +//===-- sanitizer_atomic_test.cc ------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer/AddressSanitizer runtime. +// +//===----------------------------------------------------------------------===// +#include "sanitizer_common/sanitizer_atomic.h" +#include "gtest/gtest.h" + +namespace __sanitizer { + +// Clang crashes while compiling this test for Android: +// http://llvm.org/bugs/show_bug.cgi?id=15587 +#if !SANITIZER_ANDROID +template<typename T> +void CheckAtomicCompareExchange() { + typedef typename T::Type Type; + { + Type old_val = 42; + Type new_val = 24; + Type var = old_val; + EXPECT_TRUE(atomic_compare_exchange_strong((T*)&var, &old_val, new_val, + memory_order_relaxed)); + EXPECT_FALSE(atomic_compare_exchange_strong((T*)&var, &old_val, new_val, + memory_order_relaxed)); + EXPECT_EQ(new_val, old_val); + } + { + Type old_val = 42; + Type new_val = 24; + Type var = old_val; + EXPECT_TRUE(atomic_compare_exchange_weak((T*)&var, &old_val, new_val, + memory_order_relaxed)); + EXPECT_FALSE(atomic_compare_exchange_weak((T*)&var, &old_val, new_val, + memory_order_relaxed)); + EXPECT_EQ(new_val, old_val); + } +} + +TEST(SanitizerCommon, AtomicCompareExchangeTest) { + CheckAtomicCompareExchange<atomic_uint8_t>(); + CheckAtomicCompareExchange<atomic_uint16_t>(); + CheckAtomicCompareExchange<atomic_uint32_t>(); + CheckAtomicCompareExchange<atomic_uint64_t>(); + CheckAtomicCompareExchange<atomic_uintptr_t>(); +} +#endif //!SANITIZER_ANDROID + +} // namespace __sanitizer diff --git a/lib/sanitizer_common/tests/sanitizer_common_test.cc b/lib/sanitizer_common/tests/sanitizer_common_test.cc index 01d8b5a87c01..424c279d4ada 100644 --- a/lib/sanitizer_common/tests/sanitizer_common_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_common_test.cc @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_platform.h" #include "gtest/gtest.h" namespace __sanitizer { @@ -79,7 +80,7 @@ TEST(SanitizerCommon, MmapAlignedOrDie) { } } -#ifdef __linux__ +#if SANITIZER_LINUX TEST(SanitizerCommon, SanitizerSetThreadName) { const char *names[] = { "0123456789012", @@ -96,4 +97,65 @@ TEST(SanitizerCommon, SanitizerSetThreadName) { } #endif -} // namespace sanitizer +TEST(SanitizerCommon, InternalVector) { + InternalVector<uptr> vector(1); + for (uptr i = 0; i < 100; i++) { + EXPECT_EQ(i, vector.size()); + vector.push_back(i); + } + for (uptr i = 0; i < 100; i++) { + EXPECT_EQ(i, vector[i]); + } + for (int i = 99; i >= 0; i--) { + EXPECT_EQ((uptr)i, vector.back()); + vector.pop_back(); + EXPECT_EQ((uptr)i, vector.size()); + } +} + +void TestThreadInfo(bool main) { + uptr stk_addr = 0; + uptr stk_size = 0; + uptr tls_addr = 0; + uptr tls_size = 0; + GetThreadStackAndTls(main, &stk_addr, &stk_size, &tls_addr, &tls_size); + + int stack_var; + EXPECT_NE(stk_addr, (uptr)0); + EXPECT_NE(stk_size, (uptr)0); + EXPECT_GT((uptr)&stack_var, stk_addr); + EXPECT_LT((uptr)&stack_var, stk_addr + stk_size); + +#if SANITIZER_LINUX && defined(__x86_64__) + static __thread int thread_var; + EXPECT_NE(tls_addr, (uptr)0); + EXPECT_NE(tls_size, (uptr)0); + EXPECT_GT((uptr)&thread_var, tls_addr); + EXPECT_LT((uptr)&thread_var, tls_addr + tls_size); + + // Ensure that tls and stack do not intersect. + uptr tls_end = tls_addr + tls_size; + EXPECT_TRUE(tls_addr < stk_addr || tls_addr >= stk_addr + stk_size); + EXPECT_TRUE(tls_end < stk_addr || tls_end >= stk_addr + stk_size); + EXPECT_TRUE((tls_addr < stk_addr) == (tls_end < stk_addr)); +#endif +} + +static void *WorkerThread(void *arg) { + TestThreadInfo(false); + return 0; +} + +TEST(SanitizerCommon, ThreadStackTlsMain) { + InitTlsSize(); + TestThreadInfo(true); +} + +TEST(SanitizerCommon, ThreadStackTlsWorker) { + InitTlsSize(); + pthread_t t; + pthread_create(&t, 0, WorkerThread, 0); + pthread_join(t, 0); +} + +} // namespace __sanitizer diff --git a/lib/sanitizer_common/tests/sanitizer_flags_test.cc b/lib/sanitizer_common/tests/sanitizer_flags_test.cc index c0589f4d2e90..cd3cac11bc80 100644 --- a/lib/sanitizer_common/tests/sanitizer_flags_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_flags_test.cc @@ -32,7 +32,7 @@ static void TestStrFlag(const char *start_value, const char *env, const char *final_value) { const char *flag = start_value; ParseFlag(env, &flag, kFlagName); - EXPECT_EQ(internal_strcmp(final_value, flag), 0); + EXPECT_EQ(0, internal_strcmp(final_value, flag)); } TEST(SanitizerCommon, BooleanFlags) { @@ -63,6 +63,24 @@ TEST(SanitizerCommon, StrFlags) { TestStrFlag("", "--flag_name='abc zxc'", "abc zxc"); TestStrFlag("", "--flag_name='abc zxcc'", "abc zxcc"); TestStrFlag("", "--flag_name=\"abc qwe\" asd", "abc qwe"); + TestStrFlag("", "other_flag_name=zzz", ""); +} + +static void TestTwoFlags(const char *env, bool expected_flag1, + const char *expected_flag2) { + bool flag1 = !expected_flag1; + const char *flag2 = ""; + ParseFlag(env, &flag1, "flag1"); + ParseFlag(env, &flag2, "flag2"); + EXPECT_EQ(expected_flag1, flag1); + EXPECT_EQ(0, internal_strcmp(flag2, expected_flag2)); +} + +TEST(SanitizerCommon, MultipleFlags) { + TestTwoFlags("flag1=1 flag2='zzz'", true, "zzz"); + TestTwoFlags("flag2='qxx' flag1=0", false, "qxx"); + TestTwoFlags("flag1=false:flag2='zzz'", false, "zzz"); + TestTwoFlags("flag2=qxx:flag1=yes", true, "qxx"); } } // namespace __sanitizer diff --git a/lib/sanitizer_common/tests/sanitizer_libc_test.cc b/lib/sanitizer_common/tests/sanitizer_libc_test.cc index b9d8414e0cbf..39c29d357327 100644 --- a/lib/sanitizer_common/tests/sanitizer_libc_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_libc_test.cc @@ -9,9 +9,18 @@ // Tests for sanitizer_libc.h. //===----------------------------------------------------------------------===// +#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_platform.h" #include "gtest/gtest.h" +#if SANITIZER_LINUX || SANITIZER_MAC +# define SANITIZER_TEST_HAS_STAT_H 1 +# include <sys/stat.h> +#else +# define SANITIZER_TEST_HAS_STAT_H 0 +#endif + // A regression test for internal_memmove() implementation. TEST(SanitizerCommon, InternalMemmoveRegression) { char src[] = "Hello World"; @@ -40,3 +49,69 @@ TEST(SanitizerCommon, mem_is_zero) { } delete [] x; } + +struct stat_and_more { + struct stat st; + unsigned char z; +}; + +TEST(SanitizerCommon, FileOps) { + const char *str1 = "qwerty"; + uptr len1 = internal_strlen(str1); + const char *str2 = "zxcv"; + uptr len2 = internal_strlen(str2); + + u32 uid = GetUid(); + char temp_filename[128]; +#if SANITIZER_ANDROID + // I don't know a way to query temp directory location on Android without + // going through Java interfaces. The code below is not ideal, but should + // work. May require "adb root", but it is needed for almost any use of ASan + // on Android already. + internal_snprintf(temp_filename, sizeof(temp_filename), + "%s/sanitizer_common.tmp.%d", + GetEnv("EXTERNAL_STORAGE"), uid); +#else + internal_snprintf(temp_filename, sizeof(temp_filename), + "/tmp/sanitizer_common.tmp.%d", uid); +#endif + uptr openrv = OpenFile(temp_filename, true); + EXPECT_FALSE(internal_iserror(openrv)); + fd_t fd = openrv; + EXPECT_EQ(len1, internal_write(fd, str1, len1)); + EXPECT_EQ(len2, internal_write(fd, str2, len2)); + internal_close(fd); + + openrv = OpenFile(temp_filename, false); + EXPECT_FALSE(internal_iserror(openrv)); + fd = openrv; + uptr fsize = internal_filesize(fd); + EXPECT_EQ(len1 + len2, fsize); + +#if SANITIZER_TEST_HAS_STAT_H + struct stat st1, st2, st3; + EXPECT_EQ(0u, internal_stat(temp_filename, &st1)); + EXPECT_EQ(0u, internal_lstat(temp_filename, &st2)); + EXPECT_EQ(0u, internal_fstat(fd, &st3)); + EXPECT_EQ(fsize, (uptr)st3.st_size); + + // Verify that internal_fstat does not write beyond the end of the supplied + // buffer. + struct stat_and_more sam; + memset(&sam, 0xAB, sizeof(sam)); + EXPECT_EQ(0u, internal_fstat(fd, &sam.st)); + EXPECT_EQ(0xAB, sam.z); + EXPECT_NE(0xAB, sam.st.st_size); + EXPECT_NE(0, sam.st.st_size); +#endif + + char buf[64] = {}; + EXPECT_EQ(len1, internal_read(fd, buf, len1)); + EXPECT_EQ(0, internal_memcmp(buf, str1, len1)); + EXPECT_EQ((char)0, buf[len1 + 1]); + internal_memset(buf, 0, len1); + EXPECT_EQ(len2, internal_read(fd, buf, len2)); + EXPECT_EQ(0, internal_memcmp(buf, str2, len2)); + internal_close(fd); +} + diff --git a/lib/sanitizer_common/tests/sanitizer_linux_test.cc b/lib/sanitizer_common/tests/sanitizer_linux_test.cc new file mode 100644 index 000000000000..b18aeb030acf --- /dev/null +++ b/lib/sanitizer_common/tests/sanitizer_linux_test.cc @@ -0,0 +1,253 @@ +//===-- sanitizer_linux_test.cc -------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Tests for sanitizer_linux.h +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_LINUX + +#include "sanitizer_common/sanitizer_linux.h" + +#include "sanitizer_common/sanitizer_common.h" +#include "gtest/gtest.h" + +#ifdef __x86_64__ +#include <asm/prctl.h> +#endif +#include <pthread.h> +#include <sched.h> +#include <stdlib.h> + +#include <algorithm> +#include <vector> + +#ifdef __x86_64__ +extern "C" int arch_prctl(int code, __sanitizer::uptr *addr); +#endif + +namespace __sanitizer { + +struct TidReporterArgument { + TidReporterArgument() { + pthread_mutex_init(&terminate_thread_mutex, NULL); + pthread_mutex_init(&tid_reported_mutex, NULL); + pthread_cond_init(&terminate_thread_cond, NULL); + pthread_cond_init(&tid_reported_cond, NULL); + terminate_thread = false; + } + + ~TidReporterArgument() { + pthread_mutex_destroy(&terminate_thread_mutex); + pthread_mutex_destroy(&tid_reported_mutex); + pthread_cond_destroy(&terminate_thread_cond); + pthread_cond_destroy(&tid_reported_cond); + } + + pid_t reported_tid; + // For signaling to spawned threads that they should terminate. + pthread_cond_t terminate_thread_cond; + pthread_mutex_t terminate_thread_mutex; + bool terminate_thread; + // For signaling to main thread that a child thread has reported its tid. + pthread_cond_t tid_reported_cond; + pthread_mutex_t tid_reported_mutex; + + private: + // Disallow evil constructors + TidReporterArgument(const TidReporterArgument &); + void operator=(const TidReporterArgument &); +}; + +class ThreadListerTest : public ::testing::Test { + protected: + virtual void SetUp() { + pthread_t pthread_id; + pid_t tid; + for (uptr i = 0; i < kThreadCount; i++) { + SpawnTidReporter(&pthread_id, &tid); + pthread_ids_.push_back(pthread_id); + tids_.push_back(tid); + } + } + + virtual void TearDown() { + pthread_mutex_lock(&thread_arg.terminate_thread_mutex); + thread_arg.terminate_thread = true; + pthread_cond_broadcast(&thread_arg.terminate_thread_cond); + pthread_mutex_unlock(&thread_arg.terminate_thread_mutex); + for (uptr i = 0; i < pthread_ids_.size(); i++) + pthread_join(pthread_ids_[i], NULL); + } + + void SpawnTidReporter(pthread_t *pthread_id, pid_t *tid); + + static const uptr kThreadCount = 20; + + std::vector<pthread_t> pthread_ids_; + std::vector<pid_t> tids_; + + TidReporterArgument thread_arg; +}; + +// Writes its TID once to reported_tid and waits until signaled to terminate. +void *TidReporterThread(void *argument) { + TidReporterArgument *arg = reinterpret_cast<TidReporterArgument *>(argument); + pthread_mutex_lock(&arg->tid_reported_mutex); + arg->reported_tid = GetTid(); + pthread_cond_broadcast(&arg->tid_reported_cond); + pthread_mutex_unlock(&arg->tid_reported_mutex); + + pthread_mutex_lock(&arg->terminate_thread_mutex); + while (!arg->terminate_thread) + pthread_cond_wait(&arg->terminate_thread_cond, + &arg->terminate_thread_mutex); + pthread_mutex_unlock(&arg->terminate_thread_mutex); + return NULL; +} + +void ThreadListerTest::SpawnTidReporter(pthread_t *pthread_id, + pid_t *tid) { + pthread_mutex_lock(&thread_arg.tid_reported_mutex); + thread_arg.reported_tid = -1; + ASSERT_EQ(0, pthread_create(pthread_id, NULL, + TidReporterThread, + &thread_arg)); + while (thread_arg.reported_tid == -1) + pthread_cond_wait(&thread_arg.tid_reported_cond, + &thread_arg.tid_reported_mutex); + pthread_mutex_unlock(&thread_arg.tid_reported_mutex); + *tid = thread_arg.reported_tid; +} + +static std::vector<pid_t> ReadTidsToVector(ThreadLister *thread_lister) { + std::vector<pid_t> listed_tids; + pid_t tid; + while ((tid = thread_lister->GetNextTID()) >= 0) + listed_tids.push_back(tid); + EXPECT_FALSE(thread_lister->error()); + return listed_tids; +} + +static bool Includes(std::vector<pid_t> first, std::vector<pid_t> second) { + std::sort(first.begin(), first.end()); + std::sort(second.begin(), second.end()); + return std::includes(first.begin(), first.end(), + second.begin(), second.end()); +} + +static bool HasElement(std::vector<pid_t> vector, pid_t element) { + return std::find(vector.begin(), vector.end(), element) != vector.end(); +} + +// ThreadLister's output should include the current thread's TID and the TID of +// every thread we spawned. +TEST_F(ThreadListerTest, ThreadListerSeesAllSpawnedThreads) { + pid_t self_tid = GetTid(); + ThreadLister thread_lister(getpid()); + std::vector<pid_t> listed_tids = ReadTidsToVector(&thread_lister); + ASSERT_TRUE(HasElement(listed_tids, self_tid)); + ASSERT_TRUE(Includes(listed_tids, tids_)); +} + +// Calling Reset() should not cause ThreadLister to forget any threads it's +// supposed to know about. +TEST_F(ThreadListerTest, ResetDoesNotForgetThreads) { + ThreadLister thread_lister(getpid()); + + // Run the loop body twice, because Reset() might behave differently if called + // on a freshly created object. + for (uptr i = 0; i < 2; i++) { + thread_lister.Reset(); + std::vector<pid_t> listed_tids = ReadTidsToVector(&thread_lister); + ASSERT_TRUE(Includes(listed_tids, tids_)); + } +} + +// If new threads have spawned during ThreadLister object's lifetime, calling +// Reset() should cause ThreadLister to recognize their existence. +TEST_F(ThreadListerTest, ResetMakesNewThreadsKnown) { + ThreadLister thread_lister(getpid()); + std::vector<pid_t> threads_before_extra = ReadTidsToVector(&thread_lister); + + pthread_t extra_pthread_id; + pid_t extra_tid; + SpawnTidReporter(&extra_pthread_id, &extra_tid); + // Register the new thread so it gets terminated in TearDown(). + pthread_ids_.push_back(extra_pthread_id); + + // It would be very bizarre if the new TID had been listed before we even + // spawned that thread, but it would also cause a false success in this test, + // so better check for that. + ASSERT_FALSE(HasElement(threads_before_extra, extra_tid)); + + thread_lister.Reset(); + + std::vector<pid_t> threads_after_extra = ReadTidsToVector(&thread_lister); + ASSERT_TRUE(HasElement(threads_after_extra, extra_tid)); +} + +TEST(SanitizerCommon, SetEnvTest) { + const char kEnvName[] = "ENV_FOO"; + SetEnv(kEnvName, "value"); + EXPECT_STREQ("value", getenv(kEnvName)); + unsetenv(kEnvName); + EXPECT_EQ(0, getenv(kEnvName)); +} + +#ifdef __x86_64__ +// libpthread puts the thread descriptor (%fs:0x0) at the end of stack space. +void *thread_descriptor_test_func(void *arg) { + uptr fs; + arch_prctl(ARCH_GET_FS, &fs); + pthread_attr_t attr; + pthread_getattr_np(pthread_self(), &attr); + void *stackaddr; + uptr stacksize; + pthread_attr_getstack(&attr, &stackaddr, &stacksize); + return (void *)((uptr)stackaddr + stacksize - fs); +} + +TEST(SanitizerLinux, ThreadDescriptorSize) { + pthread_t tid; + void *result; + pthread_create(&tid, 0, thread_descriptor_test_func, 0); + ASSERT_EQ(0, pthread_join(tid, &result)); + EXPECT_EQ((uptr)result, ThreadDescriptorSize()); +} +#endif + +TEST(SanitizerCommon, LibraryNameIs) { + EXPECT_FALSE(LibraryNameIs("", "")); + + char full_name[256]; + const char *paths[] = { "", "/", "/path/to/" }; + const char *suffixes[] = { "", "-linux", ".1.2", "-linux.1.2" }; + const char *base_names[] = { "lib", "lib.0", "lib-i386" }; + const char *wrong_names[] = { "", "lib.9", "lib-x86_64" }; + for (uptr i = 0; i < ARRAY_SIZE(paths); i++) + for (uptr j = 0; j < ARRAY_SIZE(suffixes); j++) { + for (uptr k = 0; k < ARRAY_SIZE(base_names); k++) { + internal_snprintf(full_name, ARRAY_SIZE(full_name), "%s%s%s.so", + paths[i], base_names[k], suffixes[j]); + EXPECT_TRUE(LibraryNameIs(full_name, base_names[k])) + << "Full name " << full_name + << " doesn't match base name " << base_names[k]; + for (uptr m = 0; m < ARRAY_SIZE(wrong_names); m++) + EXPECT_FALSE(LibraryNameIs(full_name, wrong_names[m])) + << "Full name " << full_name + << " matches base name " << wrong_names[m]; + } + } +} + +} // namespace __sanitizer + +#endif // SANITIZER_LINUX diff --git a/lib/sanitizer_common/tests/sanitizer_mutex_test.cc b/lib/sanitizer_common/tests/sanitizer_mutex_test.cc index 6bb2ae29a188..1dc9bef20710 100644 --- a/lib/sanitizer_common/tests/sanitizer_mutex_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_mutex_test.cc @@ -92,6 +92,12 @@ static void *try_thread(void *param) { return 0; } +template<typename MutexType> +static void check_locked(MutexType *mtx) { + GenericScopedLock<MutexType> l(mtx); + mtx->CheckLocked(); +} + TEST(SanitizerCommon, SpinMutex) { SpinMutex mtx; mtx.Init(); @@ -123,6 +129,7 @@ TEST(SanitizerCommon, BlockingMutex) { pthread_create(&threads[i], 0, lock_thread<BlockingMutex>, &data); for (int i = 0; i < kThreads; i++) pthread_join(threads[i], 0); + check_locked(mtx); } } // namespace __sanitizer diff --git a/lib/sanitizer_common/tests/sanitizer_scanf_interceptor_test.cc b/lib/sanitizer_common/tests/sanitizer_scanf_interceptor_test.cc index 00b260479da9..1df2bcfd4bec 100644 --- a/lib/sanitizer_common/tests/sanitizer_scanf_interceptor_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_scanf_interceptor_test.cc @@ -19,45 +19,72 @@ using namespace __sanitizer; -#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ +#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ ((std::vector<unsigned> *)ctx)->push_back(size) #include "sanitizer_common/sanitizer_common_interceptors_scanf.inc" -static void testScanf2(void *ctx, const char *format, ...) { +static const char scanf_buf[] = "Test string."; +static size_t scanf_buf_size = sizeof(scanf_buf); +static const unsigned SCANF_ARGS_MAX = 16; + +static void testScanf3(void *ctx, int result, bool allowGnuMalloc, + const char *format, ...) { va_list ap; va_start(ap, format); - scanf_common(ctx, format, ap); + scanf_common(ctx, result, allowGnuMalloc, format, ap); va_end(ap); } -static void testScanf(const char *format, unsigned n, ...) { +static void testScanf2(const char *format, int scanf_result, + bool allowGnuMalloc, unsigned n, + va_list expected_sizes) { std::vector<unsigned> scanf_sizes; // 16 args should be enough. - testScanf2((void *)&scanf_sizes, format, - (void*)0, (void*)0, (void*)0, (void*)0, - (void*)0, (void*)0, (void*)0, (void*)0, - (void*)0, (void*)0, (void*)0, (void*)0, - (void*)0, (void*)0, (void*)0, (void*)0); - ASSERT_EQ(n, scanf_sizes.size()) << - "Unexpected number of format arguments: '" << format << "'"; + testScanf3((void *)&scanf_sizes, scanf_result, allowGnuMalloc, format, + scanf_buf, scanf_buf, scanf_buf, scanf_buf, scanf_buf, scanf_buf, + scanf_buf, scanf_buf, scanf_buf, scanf_buf, scanf_buf, scanf_buf, + scanf_buf, scanf_buf, scanf_buf, scanf_buf); + ASSERT_EQ(n, scanf_sizes.size()) << "Unexpected number of format arguments: '" + << format << "'"; + for (unsigned i = 0; i < n; ++i) + EXPECT_EQ(va_arg(expected_sizes, unsigned), scanf_sizes[i]) + << "Unexpect write size for argument " << i << ", format string '" + << format << "'"; +} + +static void testScanf(const char *format, unsigned n, ...) { va_list ap; va_start(ap, n); - for (unsigned i = 0; i < n; ++i) - EXPECT_EQ(va_arg(ap, unsigned), scanf_sizes[i]) << - "Unexpect write size for argument " << i << ", format string '" << - format << "'"; + testScanf2(format, SCANF_ARGS_MAX, /* allowGnuMalloc */ true, n, ap); + va_end(ap); +} + +static void testScanfPartial(const char *format, int scanf_result, unsigned n, + ...) { + va_list ap; + va_start(ap, n); + testScanf2(format, scanf_result, /* allowGnuMalloc */ true, n, ap); + va_end(ap); +} + +static void testScanfNoGnuMalloc(const char *format, unsigned n, ...) { + va_list ap; + va_start(ap, n); + testScanf2(format, SCANF_ARGS_MAX, /* allowGnuMalloc */ false, n, ap); va_end(ap); } TEST(SanitizerCommonInterceptors, Scanf) { - const unsigned I = sizeof(int); // NOLINT - const unsigned L = sizeof(long); // NOLINT - const unsigned LL = sizeof(long long); // NOLINT - const unsigned S = sizeof(short); // NOLINT - const unsigned C = sizeof(char); // NOLINT - const unsigned D = sizeof(double); // NOLINT - const unsigned F = sizeof(float); // NOLINT + const unsigned I = sizeof(int); // NOLINT + const unsigned L = sizeof(long); // NOLINT + const unsigned LL = sizeof(long long); // NOLINT + const unsigned S = sizeof(short); // NOLINT + const unsigned C = sizeof(char); // NOLINT + const unsigned D = sizeof(double); // NOLINT + const unsigned LD = sizeof(long double); // NOLINT + const unsigned F = sizeof(float); // NOLINT + const unsigned P = sizeof(char *); // NOLINT testScanf("%d", 1, I); testScanf("%d%d%d", 3, I, I, I); @@ -65,6 +92,7 @@ TEST(SanitizerCommonInterceptors, Scanf) { testScanf("%ld", 1, L); testScanf("%llu", 1, LL); testScanf("a %hd%hhx", 2, S, C); + testScanf("%c", 1, C); testScanf("%%", 0); testScanf("a%%", 0); @@ -79,7 +107,72 @@ TEST(SanitizerCommonInterceptors, Scanf) { testScanf("%nf", 1, I); testScanf("%10s", 1, 11); + testScanf("%10c", 1, 10); testScanf("%%10s", 0); testScanf("%*10s", 0); testScanf("%*d", 0); + + testScanf("%4d%8f%c", 3, I, F, C); + testScanf("%s%d", 2, scanf_buf_size, I); + testScanf("%[abc]", 1, scanf_buf_size); + testScanf("%4[bcdef]", 1, 5); + testScanf("%[]]", 1, scanf_buf_size); + testScanf("%8[^]%d0-9-]%c", 2, 9, C); + + testScanf("%*[^:]%n:%d:%1[ ]%n", 4, I, I, 2, I); + + testScanf("%*d%u", 1, I); + + testScanf("%c%d", 2, C, I); + testScanf("%A%lf", 2, F, D); + + testScanf("%ms %Lf", 2, P, LD); + testScanf("s%Las", 1, LD); + testScanf("%ar", 1, F); + + // In the cases with std::min below the format spec can be interpreted as + // either floating-something, or (GNU extension) callee-allocated string. + // Our conservative implementation reports one of the two possibilities with + // the least store range. + testScanf("%a[", 0); + testScanf("%a[]", 0); + testScanf("%a[]]", 1, std::min(F, P)); + testScanf("%a[abc]", 1, std::min(F, P)); + testScanf("%a[^abc]", 1, std::min(F, P)); + testScanf("%a[ab%c] %d", 0); + testScanf("%a[^ab%c] %d", 0); + testScanf("%as", 1, std::min(F, P)); + testScanf("%aS", 1, std::min(F, P)); + testScanf("%a13S", 1, std::min(F, P)); + testScanf("%alS", 1, std::min(F, P)); + + testScanfNoGnuMalloc("s%Las", 1, LD); + testScanfNoGnuMalloc("%ar", 1, F); + testScanfNoGnuMalloc("%a[", 1, F); + testScanfNoGnuMalloc("%a[]", 1, F); + testScanfNoGnuMalloc("%a[]]", 1, F); + testScanfNoGnuMalloc("%a[abc]", 1, F); + testScanfNoGnuMalloc("%a[^abc]", 1, F); + testScanfNoGnuMalloc("%a[ab%c] %d", 3, F, C, I); + testScanfNoGnuMalloc("%a[^ab%c] %d", 3, F, C, I); + testScanfNoGnuMalloc("%as", 1, F); + testScanfNoGnuMalloc("%aS", 1, F); + testScanfNoGnuMalloc("%a13S", 1, F); + testScanfNoGnuMalloc("%alS", 1, F); + + testScanf("%5$d", 0); + testScanf("%md", 0); + testScanf("%m10s", 0); + + testScanfPartial("%d%d%d%d //1\n", 1, 1, I); + testScanfPartial("%d%d%d%d //2\n", 2, 2, I, I); + testScanfPartial("%d%d%d%d //3\n", 3, 3, I, I, I); + testScanfPartial("%d%d%d%d //4\n", 4, 4, I, I, I, I); + + testScanfPartial("%d%n%n%d //1\n", 1, 1, I); + testScanfPartial("%d%n%n%d //2\n", 2, 4, I, I, I, I); + + testScanfPartial("%d%n%n%d %s %s", 3, 5, I, I, I, I, scanf_buf_size); + testScanfPartial("%d%n%n%d %s %s", 4, 6, I, I, I, I, scanf_buf_size, + scanf_buf_size); } diff --git a/lib/sanitizer_common/tests/sanitizer_stacktrace_test.cc b/lib/sanitizer_common/tests/sanitizer_stacktrace_test.cc new file mode 100644 index 000000000000..3d352cb97a5e --- /dev/null +++ b/lib/sanitizer_common/tests/sanitizer_stacktrace_test.cc @@ -0,0 +1,96 @@ +//===-- sanitizer_stacktrace_test.cc --------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer/AddressSanitizer runtime. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "gtest/gtest.h" + +namespace __sanitizer { + +class FastUnwindTest : public ::testing::Test { + protected: + virtual void SetUp(); + + uptr fake_stack[10]; + uptr start_pc; + uptr fake_top; + uptr fake_bottom; + StackTrace trace; +}; + +static uptr PC(uptr idx) { + return (1<<20) + idx; +} + +void FastUnwindTest::SetUp() { + // Fill an array of pointers with fake fp+retaddr pairs. Frame pointers have + // even indices. + for (uptr i = 0; i+1 < ARRAY_SIZE(fake_stack); i += 2) { + fake_stack[i] = (uptr)&fake_stack[i+2]; // fp + fake_stack[i+1] = PC(i + 1); // retaddr + } + // Mark the last fp as zero to terminate the stack trace. + fake_stack[RoundDownTo(ARRAY_SIZE(fake_stack) - 1, 2)] = 0; + + // Top is two slots past the end because FastUnwindStack subtracts two. + fake_top = (uptr)&fake_stack[ARRAY_SIZE(fake_stack) + 2]; + // Bottom is one slot before the start because FastUnwindStack uses >. + fake_bottom = (uptr)&fake_stack[-1]; + start_pc = PC(0); + + // This is common setup done by __asan::GetStackTrace(). + trace.size = 0; + trace.max_size = ARRAY_SIZE(fake_stack); + trace.trace[0] = start_pc; +} + +TEST_F(FastUnwindTest, Basic) { + trace.FastUnwindStack(start_pc, (uptr)&fake_stack[0], + fake_top, fake_bottom); + // Should get all on-stack retaddrs and start_pc. + EXPECT_EQ(6U, trace.size); + EXPECT_EQ(start_pc, trace.trace[0]); + for (uptr i = 1; i <= 5; i++) { + EXPECT_EQ(PC(i*2 - 1), trace.trace[i]); + } +} + +// From: http://code.google.com/p/address-sanitizer/issues/detail?id=162 +TEST_F(FastUnwindTest, FramePointerLoop) { + // Make one fp point to itself. + fake_stack[4] = (uptr)&fake_stack[4]; + trace.FastUnwindStack(start_pc, (uptr)&fake_stack[0], + fake_top, fake_bottom); + // Should get all on-stack retaddrs up to the 4th slot and start_pc. + EXPECT_EQ(4U, trace.size); + EXPECT_EQ(start_pc, trace.trace[0]); + for (uptr i = 1; i <= 3; i++) { + EXPECT_EQ(PC(i*2 - 1), trace.trace[i]); + } +} + +TEST_F(FastUnwindTest, MisalignedFramePointer) { + // Make one fp misaligned. + fake_stack[4] += 3; + trace.FastUnwindStack(start_pc, (uptr)&fake_stack[0], + fake_top, fake_bottom); + // Should get all on-stack retaddrs up to the 4th slot and start_pc. + EXPECT_EQ(4U, trace.size); + EXPECT_EQ(start_pc, trace.trace[0]); + for (uptr i = 1; i < 4U; i++) { + EXPECT_EQ(PC(i*2 - 1), trace.trace[i]); + } +} + + +} // namespace __sanitizer diff --git a/lib/sanitizer_common/tests/sanitizer_stoptheworld_test.cc b/lib/sanitizer_common/tests/sanitizer_stoptheworld_test.cc new file mode 100644 index 000000000000..a5f8516df575 --- /dev/null +++ b/lib/sanitizer_common/tests/sanitizer_stoptheworld_test.cc @@ -0,0 +1,194 @@ +//===-- sanitizer_stoptheworld_test.cc ------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Tests for sanitizer_stoptheworld.h +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_LINUX + +#include "sanitizer_common/sanitizer_stoptheworld.h" +#include "gtest/gtest.h" + +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_common.h" + +#include <pthread.h> +#include <sched.h> + +namespace __sanitizer { + +static pthread_mutex_t incrementer_thread_exit_mutex; + +struct CallbackArgument { + volatile int counter; + volatile bool threads_stopped; + volatile bool callback_executed; + CallbackArgument() + : counter(0), + threads_stopped(false), + callback_executed(false) {} +}; + +void *IncrementerThread(void *argument) { + CallbackArgument *callback_argument = (CallbackArgument *)argument; + while (true) { + __sync_fetch_and_add(&callback_argument->counter, 1); + if (pthread_mutex_trylock(&incrementer_thread_exit_mutex) == 0) { + pthread_mutex_unlock(&incrementer_thread_exit_mutex); + return NULL; + } else { + sched_yield(); + } + } +} + +// This callback checks that IncrementerThread is suspended at the time of its +// execution. +void Callback(const SuspendedThreadsList &suspended_threads_list, + void *argument) { + CallbackArgument *callback_argument = (CallbackArgument *)argument; + callback_argument->callback_executed = true; + int counter_at_init = __sync_fetch_and_add(&callback_argument->counter, 0); + for (uptr i = 0; i < 1000; i++) { + sched_yield(); + if (__sync_fetch_and_add(&callback_argument->counter, 0) != + counter_at_init) { + callback_argument->threads_stopped = false; + return; + } + } + callback_argument->threads_stopped = true; +} + +TEST(StopTheWorld, SuspendThreadsSimple) { + pthread_mutex_init(&incrementer_thread_exit_mutex, NULL); + CallbackArgument argument; + pthread_t thread_id; + int pthread_create_result; + pthread_mutex_lock(&incrementer_thread_exit_mutex); + pthread_create_result = pthread_create(&thread_id, NULL, IncrementerThread, + &argument); + ASSERT_EQ(0, pthread_create_result); + StopTheWorld(&Callback, &argument); + pthread_mutex_unlock(&incrementer_thread_exit_mutex); + EXPECT_TRUE(argument.callback_executed); + EXPECT_TRUE(argument.threads_stopped); + // argument is on stack, so we have to wait for the incrementer thread to + // terminate before we can return from this function. + ASSERT_EQ(0, pthread_join(thread_id, NULL)); + pthread_mutex_destroy(&incrementer_thread_exit_mutex); +} + +// A more comprehensive test where we spawn a bunch of threads while executing +// StopTheWorld in parallel. +static const uptr kThreadCount = 50; +static const uptr kStopWorldAfter = 10; // let this many threads spawn first + +static pthread_mutex_t advanced_incrementer_thread_exit_mutex; + +struct AdvancedCallbackArgument { + volatile uptr thread_index; + volatile int counters[kThreadCount]; + pthread_t thread_ids[kThreadCount]; + volatile bool threads_stopped; + volatile bool callback_executed; + volatile bool fatal_error; + AdvancedCallbackArgument() + : thread_index(0), + threads_stopped(false), + callback_executed(false), + fatal_error(false) {} +}; + +void *AdvancedIncrementerThread(void *argument) { + AdvancedCallbackArgument *callback_argument = + (AdvancedCallbackArgument *)argument; + uptr this_thread_index = __sync_fetch_and_add( + &callback_argument->thread_index, 1); + // Spawn the next thread. + int pthread_create_result; + if (this_thread_index + 1 < kThreadCount) { + pthread_create_result = + pthread_create(&callback_argument->thread_ids[this_thread_index + 1], + NULL, AdvancedIncrementerThread, argument); + // Cannot use ASSERT_EQ in non-void-returning functions. If there's a + // problem, defer failing to the main thread. + if (pthread_create_result != 0) { + callback_argument->fatal_error = true; + __sync_fetch_and_add(&callback_argument->thread_index, + kThreadCount - callback_argument->thread_index); + } + } + // Do the actual work. + while (true) { + __sync_fetch_and_add(&callback_argument->counters[this_thread_index], 1); + if (pthread_mutex_trylock(&advanced_incrementer_thread_exit_mutex) == 0) { + pthread_mutex_unlock(&advanced_incrementer_thread_exit_mutex); + return NULL; + } else { + sched_yield(); + } + } +} + +void AdvancedCallback(const SuspendedThreadsList &suspended_threads_list, + void *argument) { + AdvancedCallbackArgument *callback_argument = + (AdvancedCallbackArgument *)argument; + callback_argument->callback_executed = true; + + int counters_at_init[kThreadCount]; + for (uptr j = 0; j < kThreadCount; j++) + counters_at_init[j] = __sync_fetch_and_add(&callback_argument->counters[j], + 0); + for (uptr i = 0; i < 10; i++) { + sched_yield(); + for (uptr j = 0; j < kThreadCount; j++) + if (__sync_fetch_and_add(&callback_argument->counters[j], 0) != + counters_at_init[j]) { + callback_argument->threads_stopped = false; + return; + } + } + callback_argument->threads_stopped = true; +} + +TEST(StopTheWorld, SuspendThreadsAdvanced) { + pthread_mutex_init(&advanced_incrementer_thread_exit_mutex, NULL); + AdvancedCallbackArgument argument; + + pthread_mutex_lock(&advanced_incrementer_thread_exit_mutex); + int pthread_create_result; + pthread_create_result = pthread_create(&argument.thread_ids[0], NULL, + AdvancedIncrementerThread, + &argument); + ASSERT_EQ(0, pthread_create_result); + // Wait for several threads to spawn before proceeding. + while (__sync_fetch_and_add(&argument.thread_index, 0) < kStopWorldAfter) + sched_yield(); + StopTheWorld(&AdvancedCallback, &argument); + EXPECT_TRUE(argument.callback_executed); + EXPECT_TRUE(argument.threads_stopped); + + // Wait for all threads to spawn before we start terminating them. + while (__sync_fetch_and_add(&argument.thread_index, 0) < kThreadCount) + sched_yield(); + ASSERT_FALSE(argument.fatal_error); // a pthread_create has failed + // Signal the threads to terminate. + pthread_mutex_unlock(&advanced_incrementer_thread_exit_mutex); + for (uptr i = 0; i < kThreadCount; i++) + ASSERT_EQ(0, pthread_join(argument.thread_ids[i], NULL)); + pthread_mutex_destroy(&advanced_incrementer_thread_exit_mutex); +} + +} // namespace __sanitizer + +#endif // SANITIZER_LINUX diff --git a/lib/sanitizer_common/tests/sanitizer_stoptheworld_testlib.cc b/lib/sanitizer_common/tests/sanitizer_stoptheworld_testlib.cc new file mode 100644 index 000000000000..d8be2afb19e9 --- /dev/null +++ b/lib/sanitizer_common/tests/sanitizer_stoptheworld_testlib.cc @@ -0,0 +1,53 @@ +//===-- sanitizer_stoptheworld_testlib.cc ---------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// Dynamic library to test StopTheWorld functionality. +// When loaded with LD_PRELOAD, it will periodically suspend all threads. +//===----------------------------------------------------------------------===// +/* Usage: +clang++ -fno-exceptions -g -fPIC -I. \ + sanitizer_common/tests/sanitizer_stoptheworld_testlib.cc \ + sanitizer_common/sanitizer_*.cc -shared -lpthread -o teststoptheworld.so +LD_PRELOAD=`pwd`/teststoptheworld.so /your/app +*/ + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_LINUX + +#include <dlfcn.h> +#include <stddef.h> +#include <stdio.h> +#include <pthread.h> +#include <unistd.h> + +#include "sanitizer_common/sanitizer_stoptheworld.h" + +namespace { +const uptr kSuspendDuration = 3; +const uptr kRunDuration = 3; + +void Callback(const SuspendedThreadsList &suspended_threads_list, + void *argument) { + sleep(kSuspendDuration); +} + +void *SuspenderThread(void *argument) { + while (true) { + sleep(kRunDuration); + StopTheWorld(Callback, NULL); + } + return NULL; +} + +__attribute__((constructor)) void StopTheWorldTestLibConstructor(void) { + pthread_t thread_id; + pthread_create(&thread_id, NULL, SuspenderThread, NULL); +} +} // namespace + +#endif // SANITIZER_LINUX diff --git a/lib/sanitizer_common/tests/sanitizer_test_utils.h b/lib/sanitizer_common/tests/sanitizer_test_utils.h index 6129ea8a5370..a770d0fbd39e 100644 --- a/lib/sanitizer_common/tests/sanitizer_test_utils.h +++ b/lib/sanitizer_common/tests/sanitizer_test_utils.h @@ -36,12 +36,14 @@ typedef __int64 int64_t; #define __has_feature(x) 0 #endif -#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) -# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \ - __attribute__((no_address_safety_analysis)) -#else -# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS -#endif +#ifndef ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS +# if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) +# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \ + __attribute__((no_sanitize_address)) +# else +# define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS +# endif +#endif // ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS #if __LP64__ || defined(_WIN64) # define SANITIZER_WORDSIZE 64 diff --git a/lib/sanitizer_common/tests/sanitizer_thread_registry_test.cc b/lib/sanitizer_common/tests/sanitizer_thread_registry_test.cc new file mode 100644 index 000000000000..e080403fb56c --- /dev/null +++ b/lib/sanitizer_common/tests/sanitizer_thread_registry_test.cc @@ -0,0 +1,230 @@ +//===-- sanitizer_thread_registry_test.cc ---------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of shared sanitizer runtime. +// +//===----------------------------------------------------------------------===// +#include "sanitizer_common/sanitizer_thread_registry.h" +#include "gtest/gtest.h" + +#include <vector> + +namespace __sanitizer { + +static BlockingMutex tctx_allocator_lock(LINKER_INITIALIZED); +static LowLevelAllocator tctx_allocator; + +template<typename TCTX> +static ThreadContextBase *GetThreadContext(u32 tid) { + BlockingMutexLock l(&tctx_allocator_lock); + void *mem = tctx_allocator.Allocate(sizeof(TCTX)); + return new(mem) TCTX(tid); +} + +static const u32 kMaxRegistryThreads = 1000; +static const u32 kRegistryQuarantine = 2; + +static void CheckThreadQuantity(ThreadRegistry *registry, uptr exp_total, + uptr exp_running, uptr exp_alive) { + uptr total, running, alive; + registry->GetNumberOfThreads(&total, &running, &alive); + EXPECT_EQ(exp_total, total); + EXPECT_EQ(exp_running, running); + EXPECT_EQ(exp_alive, alive); +} + +static bool is_detached(u32 tid) { + return (tid % 2 == 0); +} + +static uptr get_uid(u32 tid) { + return tid * 2; +} + +static bool HasName(ThreadContextBase *tctx, void *arg) { + char *name = (char*)arg; + return (tctx->name && 0 == internal_strcmp(tctx->name, name)); +} + +static bool HasUid(ThreadContextBase *tctx, void *arg) { + uptr uid = (uptr)arg; + return (tctx->user_id == uid); +} + +static void MarkUidAsPresent(ThreadContextBase *tctx, void *arg) { + bool *arr = (bool*)arg; + arr[tctx->tid] = true; +} + +static void TestRegistry(ThreadRegistry *registry, bool has_quarantine) { + // Create and start a main thread. + EXPECT_EQ(0U, registry->CreateThread(get_uid(0), true, -1, 0)); + registry->StartThread(0, 0, 0); + // Create a bunch of threads. + for (u32 i = 1; i <= 10; i++) { + EXPECT_EQ(i, registry->CreateThread(get_uid(i), is_detached(i), 0, 0)); + } + CheckThreadQuantity(registry, 11, 1, 11); + // Start some of them. + for (u32 i = 1; i <= 5; i++) { + registry->StartThread(i, 0, 0); + } + CheckThreadQuantity(registry, 11, 6, 11); + // Finish, create and start more threads. + for (u32 i = 1; i <= 5; i++) { + registry->FinishThread(i); + if (!is_detached(i)) + registry->JoinThread(i, 0); + } + for (u32 i = 6; i <= 10; i++) { + registry->StartThread(i, 0, 0); + } + std::vector<u32> new_tids; + for (u32 i = 11; i <= 15; i++) { + new_tids.push_back( + registry->CreateThread(get_uid(i), is_detached(i), 0, 0)); + } + ASSERT_LE(kRegistryQuarantine, 5U); + u32 exp_total = 16 - (has_quarantine ? 5 - kRegistryQuarantine : 0); + CheckThreadQuantity(registry, exp_total, 6, 11); + // Test SetThreadName and FindThread. + registry->SetThreadName(6, "six"); + registry->SetThreadName(7, "seven"); + EXPECT_EQ(7U, registry->FindThread(HasName, (void*)"seven")); + EXPECT_EQ(ThreadRegistry::kUnknownTid, + registry->FindThread(HasName, (void*)"none")); + EXPECT_EQ(0U, registry->FindThread(HasUid, (void*)get_uid(0))); + EXPECT_EQ(10U, registry->FindThread(HasUid, (void*)get_uid(10))); + EXPECT_EQ(ThreadRegistry::kUnknownTid, + registry->FindThread(HasUid, (void*)0x1234)); + // Detach and finish and join remaining threads. + for (u32 i = 6; i <= 10; i++) { + registry->DetachThread(i); + registry->FinishThread(i); + } + for (u32 i = 0; i < new_tids.size(); i++) { + u32 tid = new_tids[i]; + registry->StartThread(tid, 0, 0); + registry->DetachThread(tid); + registry->FinishThread(tid); + } + CheckThreadQuantity(registry, exp_total, 1, 1); + // Test methods that require the caller to hold a ThreadRegistryLock. + bool has_tid[16]; + internal_memset(&has_tid[0], 0, sizeof(has_tid)); + { + ThreadRegistryLock l(registry); + registry->RunCallbackForEachThreadLocked(MarkUidAsPresent, &has_tid[0]); + } + for (u32 i = 0; i < exp_total; i++) { + EXPECT_TRUE(has_tid[i]); + } + { + ThreadRegistryLock l(registry); + registry->CheckLocked(); + ThreadContextBase *main_thread = registry->GetThreadLocked(0); + EXPECT_EQ(main_thread, registry->FindThreadContextLocked( + HasUid, (void*)get_uid(0))); + } + EXPECT_EQ(11U, registry->GetMaxAliveThreads()); +} + +TEST(SanitizerCommon, ThreadRegistryTest) { + ThreadRegistry quarantine_registry(GetThreadContext<ThreadContextBase>, + kMaxRegistryThreads, + kRegistryQuarantine); + TestRegistry(&quarantine_registry, true); + + ThreadRegistry no_quarantine_registry(GetThreadContext<ThreadContextBase>, + kMaxRegistryThreads, + kMaxRegistryThreads); + TestRegistry(&no_quarantine_registry, false); +} + +static const int kThreadsPerShard = 20; +static const int kNumShards = 25; + +static int num_created[kNumShards + 1]; +static int num_started[kNumShards + 1]; +static int num_joined[kNumShards + 1]; + +namespace { + +struct RunThreadArgs { + ThreadRegistry *registry; + uptr shard; // started from 1. +}; + +class TestThreadContext : public ThreadContextBase { + public: + explicit TestThreadContext(int tid) : ThreadContextBase(tid) {} + void OnJoined(void *arg) { + uptr shard = (uptr)arg; + num_joined[shard]++; + } + void OnStarted(void *arg) { + uptr shard = (uptr)arg; + num_started[shard]++; + } + void OnCreated(void *arg) { + uptr shard = (uptr)arg; + num_created[shard]++; + } +}; + +} // namespace + +void *RunThread(void *arg) { + RunThreadArgs *args = static_cast<RunThreadArgs*>(arg); + std::vector<int> tids; + for (int i = 0; i < kThreadsPerShard; i++) + tids.push_back( + args->registry->CreateThread(0, false, 0, (void*)args->shard)); + for (int i = 0; i < kThreadsPerShard; i++) + args->registry->StartThread(tids[i], 0, (void*)args->shard); + for (int i = 0; i < kThreadsPerShard; i++) + args->registry->FinishThread(tids[i]); + for (int i = 0; i < kThreadsPerShard; i++) + args->registry->JoinThread(tids[i], (void*)args->shard); + return 0; +} + +static void ThreadedTestRegistry(ThreadRegistry *registry) { + // Create and start a main thread. + EXPECT_EQ(0U, registry->CreateThread(0, true, -1, 0)); + registry->StartThread(0, 0, 0); + pthread_t threads[kNumShards]; + RunThreadArgs args[kNumShards]; + for (int i = 0; i < kNumShards; i++) { + args[i].registry = registry; + args[i].shard = i + 1; + pthread_create(&threads[i], 0, RunThread, &args[i]); + } + for (int i = 0; i < kNumShards; i++) { + pthread_join(threads[i], 0); + } + // Check that each thread created/started/joined correct amount + // of "threads" in thread_registry. + EXPECT_EQ(1, num_created[0]); + EXPECT_EQ(1, num_started[0]); + EXPECT_EQ(0, num_joined[0]); + for (int i = 1; i <= kNumShards; i++) { + EXPECT_EQ(kThreadsPerShard, num_created[i]); + EXPECT_EQ(kThreadsPerShard, num_started[i]); + EXPECT_EQ(kThreadsPerShard, num_joined[i]); + } +} + +TEST(SanitizerCommon, ThreadRegistryThreadedTest) { + ThreadRegistry registry(GetThreadContext<TestThreadContext>, + kThreadsPerShard * kNumShards + 1, 10); + ThreadedTestRegistry(®istry); +} + +} // namespace __sanitizer diff --git a/lib/tsan/CMakeLists.txt b/lib/tsan/CMakeLists.txt index 34e3a2ea524e..282889567509 100644 --- a/lib/tsan/CMakeLists.txt +++ b/lib/tsan/CMakeLists.txt @@ -2,7 +2,12 @@ include_directories(..) -set(TSAN_CFLAGS ${SANITIZER_COMMON_CFLAGS}) +# SANITIZER_COMMON_CFLAGS contains -fPIC, but it's performance-critical for +# TSan runtime to be built with -fPIE to reduce the number of register spills. +set(TSAN_CFLAGS + ${SANITIZER_COMMON_CFLAGS} + -fPIE + -fno-rtti) # FIXME: Add support for compile flags: # -Wframe-larger-than=512, # -Wglobal-constructors, diff --git a/lib/tsan/Makefile.old b/lib/tsan/Makefile.old index 593482fbb5da..b548f5d2f6ee 100644 --- a/lib/tsan/Makefile.old +++ b/lib/tsan/Makefile.old @@ -1,13 +1,16 @@ DEBUG=0 LDFLAGS=-ldl -lpthread -pie -CXXFLAGS = -fPIE -g -Wall -Werror -DTSAN_DEBUG=$(DEBUG) -DSANITIZER_DEBUG=$(DEBUG) +CXXFLAGS = -fPIE -fno-rtti -g -Wall -Werror \ + -DGTEST_HAS_RTTI=0 -DTSAN_DEBUG=$(DEBUG) -DSANITIZER_DEBUG=$(DEBUG) +CLANG=clang +FILECHECK=FileCheck # Silence warnings that Clang produces for gtest code. # Use -Wno-attributes so that gcc doesn't complain about unknown warning types. CXXFLAGS += -Wno-attributes ifeq ($(DEBUG), 0) CXXFLAGS += -O3 endif -ifeq ($(CXX), clang++) +ifeq ($(CXX), $(CLANG)++) CXXFLAGS+= -Wno-unused-private-field -Wno-static-in-inline -Wgnu endif @@ -54,16 +57,16 @@ test: libtsan tsan_test run: all (ulimit -s 8192; ./tsan_test) - ./lit_tests/test_output.sh + CC=$(CLANG) CXX=$(CLANG)++ FILECHECK=$(FILECHECK) ./lit_tests/test_output.sh presubmit: ../sanitizer_common/scripts/check_lint.sh # Debug build with clang. $(MAKE) -f Makefile.old clean - $(MAKE) -f Makefile.old run DEBUG=1 -j 16 CC=clang CXX=clang++ + $(MAKE) -f Makefile.old run DEBUG=1 -j 16 CC=$(CLANG) CXX=$(CLANG)++ # Release build with clang. $(MAKE) -f Makefile.old clean - $(MAKE) -f Makefile.old run DEBUG=0 -j 16 CC=clang CXX=clang++ + $(MAKE) -f Makefile.old run DEBUG=0 -j 16 CC=$(CLANG) CXX=$(CLANG)++ # Debug build with gcc $(MAKE) -f Makefile.old clean $(MAKE) -f Makefile.old run DEBUG=1 -j 16 CC=gcc CXX=g++ @@ -93,3 +96,5 @@ clean: rm -f asm_*.s libtsan.nm libtsan.objdump */*.o tsan_test rm -rf $(GTEST_BUILD_DIR) $(MAKE) clean -C rtl -f Makefile.old + rm -f go/*.s + rm -rf build diff --git a/lib/tsan/analyze_libtsan.sh b/lib/tsan/analyze_libtsan.sh index e0805610714b..705e4c5460f2 100755 --- a/lib/tsan/analyze_libtsan.sh +++ b/lib/tsan/analyze_libtsan.sh @@ -4,7 +4,7 @@ set -e set -u get_asm() { - grep tsan_$1.: -A 10000 libtsan.objdump | \ + grep __tsan_$1.: -A 10000 libtsan.objdump | \ awk "/[^:]$/ {print;} />:/ {c++; if (c == 2) {exit}}" } @@ -27,7 +27,7 @@ for f in $list; do file=asm_$f.s get_asm $f > $file tot=$(wc -l < $file) - size=$(grep $f$ libtsan.nm | awk --non-decimal-data '{print ("0x"$2)+0}') + size=$(grep __tsan_$f$ libtsan.nm | awk --non-decimal-data '{print ("0x"$2)+0}') rsp=$(grep '(%rsp)' $file | wc -l) push=$(grep 'push' $file | wc -l) pop=$(grep 'pop' $file | wc -l) diff --git a/lib/tsan/check_cmake.sh b/lib/tsan/check_cmake.sh index 5f11e727f091..52c97c339096 100755 --- a/lib/tsan/check_cmake.sh +++ b/lib/tsan/check_cmake.sh @@ -7,5 +7,6 @@ mkdir -p $ROOT/build cd $ROOT/build CC=clang CXX=clang++ cmake -DLLVM_ENABLE_WERROR=ON -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=ON $ROOT/../../../.. make -j64 -make check-tsan check-sanitizer -j64 - +make check-sanitizer -j64 +make check-tsan -j64 +make check-asan -j64 diff --git a/lib/tsan/go/buildgo.sh b/lib/tsan/go/buildgo.sh index a153afd6ee8e..51f1a7975b57 100755 --- a/lib/tsan/go/buildgo.sh +++ b/lib/tsan/go/buildgo.sh @@ -20,6 +20,7 @@ SRCS=" ../../sanitizer_common/sanitizer_flags.cc ../../sanitizer_common/sanitizer_libc.cc ../../sanitizer_common/sanitizer_printf.cc + ../../sanitizer_common/sanitizer_thread_registry.cc " if [ "`uname -a | grep Linux`" != "" ]; then @@ -29,7 +30,9 @@ if [ "`uname -a | grep Linux`" != "" ]; then SRCS+=" ../rtl/tsan_platform_linux.cc ../../sanitizer_common/sanitizer_posix.cc + ../../sanitizer_common/sanitizer_posix_libcdep.cc ../../sanitizer_common/sanitizer_linux.cc + ../../sanitizer_common/sanitizer_linux_libcdep.cc " elif [ "`uname -a | grep Darwin`" != "" ]; then SUFFIX="darwin_amd64" @@ -60,7 +63,7 @@ for F in $SRCS; do cat $F >> gotsan.cc done -FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -m64 -Wall -Werror -fno-exceptions -DTSAN_GO -DSANITIZER_GO -DTSAN_SHADOW_COUNT=4 $OSCFLAGS" +FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -m64 -Wall -Werror -fno-exceptions -fno-rtti -DTSAN_GO -DSANITIZER_GO -DTSAN_SHADOW_COUNT=4 $OSCFLAGS" if [ "$DEBUG" == "" ]; then FLAGS+=" -DTSAN_DEBUG=0 -O3 -fomit-frame-pointer" else diff --git a/lib/tsan/go/test.c b/lib/tsan/go/test.c index 2414a1e9925f..902dfc915582 100644 --- a/lib/tsan/go/test.c +++ b/lib/tsan/go/test.c @@ -13,20 +13,20 @@ #include <stdio.h> -void __tsan_init(); +void __tsan_init(void **thr); void __tsan_fini(); void __tsan_map_shadow(void *addr, unsigned long size); -void __tsan_go_start(int pgoid, int chgoid, void *pc); -void __tsan_go_end(int goid); -void __tsan_read(int goid, void *addr, void *pc); -void __tsan_write(int goid, void *addr, void *pc); -void __tsan_func_enter(int goid, void *pc); -void __tsan_func_exit(int goid); -void __tsan_malloc(int goid, void *p, unsigned long sz, void *pc); +void __tsan_go_start(void *thr, void **chthr, void *pc); +void __tsan_go_end(void *thr); +void __tsan_read(void *thr, void *addr, void *pc); +void __tsan_write(void *thr, void *addr, void *pc); +void __tsan_func_enter(void *thr, void *pc); +void __tsan_func_exit(void *thr); +void __tsan_malloc(void *thr, void *p, unsigned long sz, void *pc); void __tsan_free(void *p); -void __tsan_acquire(int goid, void *addr); -void __tsan_release(int goid, void *addr); -void __tsan_release_merge(int goid, void *addr); +void __tsan_acquire(void *thr, void *addr); +void __tsan_release(void *thr, void *addr); +void __tsan_release_merge(void *thr, void *addr); int __tsan_symbolize(void *pc, char **img, char **rtn, char **file, int *l) { return 0; @@ -35,19 +35,21 @@ int __tsan_symbolize(void *pc, char **img, char **rtn, char **file, int *l) { char buf[10]; int main(void) { - __tsan_init(); + void *thr0 = 0; + __tsan_init(&thr0); __tsan_map_shadow(buf, sizeof(buf) + 4096); - __tsan_func_enter(0, &main); - __tsan_malloc(0, buf, 10, 0); - __tsan_release(0, buf); - __tsan_release_merge(0, buf); - __tsan_go_start(0, 1, 0); - __tsan_write(1, buf, 0); - __tsan_acquire(1, buf); - __tsan_go_end(1); - __tsan_read(0, buf, 0); + __tsan_func_enter(thr0, &main); + __tsan_malloc(thr0, buf, 10, 0); + __tsan_release(thr0, buf); + __tsan_release_merge(thr0, buf); + void *thr1 = 0; + __tsan_go_start(thr0, &thr1, 0); + __tsan_write(thr1, buf, 0); + __tsan_acquire(thr1, buf); + __tsan_go_end(thr1); + __tsan_read(thr0, buf, 0); __tsan_free(buf); - __tsan_func_exit(0); + __tsan_func_exit(thr0); __tsan_fini(); return 0; } diff --git a/lib/tsan/go/tsan_go.cc b/lib/tsan/go/tsan_go.cc index 360608a0cf1b..957d58211281 100644 --- a/lib/tsan/go/tsan_go.cc +++ b/lib/tsan/go/tsan_go.cc @@ -18,10 +18,6 @@ namespace __tsan { -const int kMaxGoroutinesEver = 128*1024; - -static ThreadState *goroutines[kMaxGoroutinesEver]; - void InitializeInterceptors() { } @@ -80,20 +76,18 @@ ReportStack *SymbolizeCode(uptr addr) { extern "C" { -static void AllocGoroutine(int tid) { - if (tid >= kMaxGoroutinesEver) { - Printf("FATAL: Reached goroutine limit\n"); - Die(); - } +static ThreadState *main_thr; + +static ThreadState *AllocGoroutine() { ThreadState *thr = (ThreadState*)internal_alloc(MBlockThreadContex, sizeof(ThreadState)); internal_memset(thr, 0, sizeof(*thr)); - goroutines[tid] = thr; + return thr; } -void __tsan_init() { - AllocGoroutine(0); - ThreadState *thr = goroutines[0]; +void __tsan_init(ThreadState **thrp) { + ThreadState *thr = AllocGoroutine(); + main_thr = *thrp = thr; thr->in_rtl++; Initialize(thr); thr->in_rtl--; @@ -101,7 +95,7 @@ void __tsan_init() { void __tsan_fini() { // FIXME: Not necessary thread 0. - ThreadState *thr = goroutines[0]; + ThreadState *thr = main_thr; thr->in_rtl++; int res = Finalize(thr); thr->in_rtl--; @@ -112,44 +106,37 @@ void __tsan_map_shadow(uptr addr, uptr size) { MapShadow(addr, size); } -void __tsan_read(int goid, void *addr, void *pc) { - ThreadState *thr = goroutines[goid]; - MemoryAccess(thr, (uptr)pc, (uptr)addr, 0, false); +void __tsan_read(ThreadState *thr, void *addr, void *pc) { + MemoryRead(thr, (uptr)pc, (uptr)addr, kSizeLog1); } -void __tsan_write(int goid, void *addr, void *pc) { - ThreadState *thr = goroutines[goid]; - MemoryAccess(thr, (uptr)pc, (uptr)addr, 0, true); +void __tsan_write(ThreadState *thr, void *addr, void *pc) { + MemoryWrite(thr, (uptr)pc, (uptr)addr, kSizeLog1); } -void __tsan_read_range(int goid, void *addr, uptr size, uptr step, void *pc) { - ThreadState *thr = goroutines[goid]; - for (uptr i = 0; i < size; i += step) - MemoryAccess(thr, (uptr)pc, (uptr)addr + i, 0, false); +void __tsan_read_range(ThreadState *thr, void *addr, uptr size, uptr step, + void *pc) { + MemoryAccessRangeStep(thr, (uptr)pc, (uptr)addr, size, step, false); } -void __tsan_write_range(int goid, void *addr, uptr size, uptr step, void *pc) { - ThreadState *thr = goroutines[goid]; - for (uptr i = 0; i < size; i += step) - MemoryAccess(thr, (uptr)pc, (uptr)addr + i, 0, true); +void __tsan_write_range(ThreadState *thr, void *addr, uptr size, uptr step, + void *pc) { + MemoryAccessRangeStep(thr, (uptr)pc, (uptr)addr, size, step, true); } -void __tsan_func_enter(int goid, void *pc) { - ThreadState *thr = goroutines[goid]; +void __tsan_func_enter(ThreadState *thr, void *pc) { FuncEntry(thr, (uptr)pc); } -void __tsan_func_exit(int goid) { - ThreadState *thr = goroutines[goid]; +void __tsan_func_exit(ThreadState *thr) { FuncExit(thr); } -void __tsan_malloc(int goid, void *p, uptr sz, void *pc) { - ThreadState *thr = goroutines[goid]; +void __tsan_malloc(ThreadState *thr, void *p, uptr sz, void *pc) { if (thr == 0) // probably before __tsan_init() return; thr->in_rtl++; - MemoryRangeImitateWrite(thr, (uptr)pc, (uptr)p, sz); + MemoryResetRange(thr, (uptr)pc, (uptr)p, sz); thr->in_rtl--; } @@ -157,56 +144,47 @@ void __tsan_free(void *p) { (void)p; } -void __tsan_go_start(int pgoid, int chgoid, void *pc) { - if (chgoid == 0) - return; - AllocGoroutine(chgoid); - ThreadState *thr = goroutines[chgoid]; - ThreadState *parent = goroutines[pgoid]; +void __tsan_go_start(ThreadState *parent, ThreadState **pthr, void *pc) { + ThreadState *thr = AllocGoroutine(); + *pthr = thr; thr->in_rtl++; parent->in_rtl++; - int goid2 = ThreadCreate(parent, (uptr)pc, 0, true); - ThreadStart(thr, goid2, 0); + int goid = ThreadCreate(parent, (uptr)pc, 0, true); + ThreadStart(thr, goid, 0); parent->in_rtl--; thr->in_rtl--; } -void __tsan_go_end(int goid) { - ThreadState *thr = goroutines[goid]; +void __tsan_go_end(ThreadState *thr) { thr->in_rtl++; ThreadFinish(thr); thr->in_rtl--; internal_free(thr); - goroutines[goid] = 0; } -void __tsan_acquire(int goid, void *addr) { - ThreadState *thr = goroutines[goid]; +void __tsan_acquire(ThreadState *thr, void *addr) { thr->in_rtl++; Acquire(thr, 0, (uptr)addr); thr->in_rtl--; } -void __tsan_release(int goid, void *addr) { - ThreadState *thr = goroutines[goid]; +void __tsan_release(ThreadState *thr, void *addr) { thr->in_rtl++; ReleaseStore(thr, 0, (uptr)addr); thr->in_rtl--; } -void __tsan_release_merge(int goid, void *addr) { - ThreadState *thr = goroutines[goid]; +void __tsan_release_merge(ThreadState *thr, void *addr) { thr->in_rtl++; Release(thr, 0, (uptr)addr); thr->in_rtl--; } -void __tsan_finalizer_goroutine(int goid) { - ThreadState *thr = goroutines[goid]; +void __tsan_finalizer_goroutine(ThreadState *thr) { AcquireGlobal(thr, 0); } -#ifdef _WIN32 +#if SANITIZER_WINDOWS // MinGW gcc emits calls to the function. void ___chkstk_ms(void) { // The implementation must be along the lines of: @@ -242,3 +220,11 @@ void ___chkstk_ms(void) { } // extern "C" } // namespace __tsan + +namespace __sanitizer { + +void SymbolizerPrepareForSandboxing() { + // Nothing to do here for Go. +} + +} // namespace __sanitizer diff --git a/lib/tsan/lit_tests/CMakeLists.txt b/lib/tsan/lit_tests/CMakeLists.txt index ff2508dd75af..53e5015d1bc4 100644 --- a/lib/tsan/lit_tests/CMakeLists.txt +++ b/lib/tsan/lit_tests/CMakeLists.txt @@ -11,9 +11,8 @@ configure_lit_site_cfg( if(COMPILER_RT_CAN_EXECUTE_TESTS) # Run TSan output tests only if we're sure we can produce working binaries. set(TSAN_TEST_DEPS - clang clang-headers FileCheck count not llvm-symbolizer - ${TSAN_RUNTIME_LIBRARIES} - ) + ${SANITIZER_COMMON_LIT_TEST_DEPS} + ${TSAN_RUNTIME_LIBRARIES}) set(TSAN_TEST_PARAMS tsan_site_config=${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg ) diff --git a/lib/tsan/lit_tests/SharedLibs/lit.local.cfg b/lib/tsan/lit_tests/SharedLibs/lit.local.cfg new file mode 100644 index 000000000000..b3677c17a0f2 --- /dev/null +++ b/lib/tsan/lit_tests/SharedLibs/lit.local.cfg @@ -0,0 +1,4 @@ +# Sources in this directory are compiled as shared libraries and used by +# tests in parent directory. + +config.suffixes = [] diff --git a/lib/tsan/lit_tests/SharedLibs/load_shared_lib-so.cc b/lib/tsan/lit_tests/SharedLibs/load_shared_lib-so.cc new file mode 100644 index 000000000000..d05aa6a40d18 --- /dev/null +++ b/lib/tsan/lit_tests/SharedLibs/load_shared_lib-so.cc @@ -0,0 +1,22 @@ +//===----------- load_shared_lib-so.cc --------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +//===----------------------------------------------------------------------===// + +#include <stddef.h> + +int GLOB_SHARED = 0; + +extern "C" +void *write_from_so(void *unused) { + GLOB_SHARED++; + return NULL; +} diff --git a/lib/tsan/lit_tests/Unit/lit.cfg b/lib/tsan/lit_tests/Unit/lit.cfg index 6688697c0c1b..0a0dbbfa5495 100644 --- a/lib/tsan/lit_tests/Unit/lit.cfg +++ b/lib/tsan/lit_tests/Unit/lit.cfg @@ -11,9 +11,8 @@ def get_required_attr(config, attr_name): return attr_value # Setup attributes common for all compiler-rt projects. -llvm_src_root = get_required_attr(config, 'llvm_src_root') -compiler_rt_lit_unit_cfg = os.path.join(llvm_src_root, "projects", - "compiler-rt", "lib", +compiler_rt_src_root = get_required_attr(config, 'compiler_rt_src_root') +compiler_rt_lit_unit_cfg = os.path.join(compiler_rt_src_root, "lib", "lit.common.unit.cfg") lit.load_config(config, compiler_rt_lit_unit_cfg) diff --git a/lib/tsan/lit_tests/Unit/lit.site.cfg.in b/lib/tsan/lit_tests/Unit/lit.site.cfg.in index 23654b9be2ee..6eedc2180876 100644 --- a/lib/tsan/lit_tests/Unit/lit.site.cfg.in +++ b/lib/tsan/lit_tests/Unit/lit.site.cfg.in @@ -1,15 +1,17 @@ ## Autogenerated by LLVM/Clang configuration. # Do not edit! -config.build_type = "@CMAKE_BUILD_TYPE@" config.llvm_obj_root = "@LLVM_BINARY_DIR@" config.llvm_src_root = "@LLVM_SOURCE_DIR@" +config.compiler_rt_src_root = "@COMPILER_RT_SOURCE_DIR@" config.llvm_tools_dir = "@LLVM_TOOLS_DIR@" +config.llvm_build_mode = "@LLVM_BUILD_MODE@" # LLVM tools dir can be passed in lit parameters, so try to # apply substitution. try: config.llvm_tools_dir = config.llvm_tools_dir % lit.params + config.llvm_build_mode = config.llvm_build_mode % lit.params except KeyError,e: key, = e.args lit.fatal("unable to find %r parameter, use '--param=%s=VALUE'" % (key, key)) diff --git a/lib/tsan/lit_tests/aligned_vs_unaligned_race.cc b/lib/tsan/lit_tests/aligned_vs_unaligned_race.cc new file mode 100644 index 000000000000..f4533d08306c --- /dev/null +++ b/lib/tsan/lit_tests/aligned_vs_unaligned_race.cc @@ -0,0 +1,34 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +// Race between an aligned access and an unaligned access, which +// touches the same memory region. +// This is a real race which is not detected by tsan. +// https://code.google.com/p/thread-sanitizer/issues/detail?id=17 +#include <pthread.h> +#include <stdio.h> +#include <stdint.h> + +uint64_t Global[2]; + +void *Thread1(void *x) { + Global[1]++; + return NULL; +} + +void *Thread2(void *x) { + char *p1 = reinterpret_cast<char *>(&Global[0]); + uint64_t *p4 = reinterpret_cast<uint64_t *>(p1 + 1); + (*p4)++; + return NULL; +} + +int main() { + pthread_t t[2]; + pthread_create(&t[0], NULL, Thread1, NULL); + pthread_create(&t[1], NULL, Thread2, NULL); + pthread_join(t[0], NULL); + pthread_join(t[1], NULL); + printf("Pass\n"); + // CHECK-NOT: ThreadSanitizer: data race + // CHECK: Pass + return 0; +} diff --git a/lib/tsan/lit_tests/atomic_free.cc b/lib/tsan/lit_tests/atomic_free.cc new file mode 100644 index 000000000000..ba9bd5ac4aed --- /dev/null +++ b/lib/tsan/lit_tests/atomic_free.cc @@ -0,0 +1,19 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <unistd.h> + +void *Thread(void *a) { + __atomic_fetch_add((int*)a, 1, __ATOMIC_SEQ_CST); + return 0; +} + +int main() { + int *a = new int(0); + pthread_t t; + pthread_create(&t, 0, Thread, a); + sleep(1); + delete a; + pthread_join(t, 0); +} + +// CHECK: WARNING: ThreadSanitizer: data race diff --git a/lib/tsan/lit_tests/atomic_free2.cc b/lib/tsan/lit_tests/atomic_free2.cc new file mode 100644 index 000000000000..5517bf7ce902 --- /dev/null +++ b/lib/tsan/lit_tests/atomic_free2.cc @@ -0,0 +1,19 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <unistd.h> + +void *Thread(void *a) { + sleep(1); + __atomic_fetch_add((int*)a, 1, __ATOMIC_SEQ_CST); + return 0; +} + +int main() { + int *a = new int(0); + pthread_t t; + pthread_create(&t, 0, Thread, a); + delete a; + pthread_join(t, 0); +} + +// CHECK: WARNING: ThreadSanitizer: heap-use-after-free diff --git a/lib/tsan/lit_tests/atomic_norace.cc b/lib/tsan/lit_tests/atomic_norace.cc new file mode 100644 index 000000000000..265459b0758e --- /dev/null +++ b/lib/tsan/lit_tests/atomic_norace.cc @@ -0,0 +1,61 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <stdio.h> +#include <unistd.h> + +const int kTestCount = 4; +typedef long long T; +T atomics[kTestCount * 2]; + +void Test(int test, T *p, bool main_thread) { + volatile T sink; + if (test == 0) { + if (main_thread) + __atomic_fetch_add(p, 1, __ATOMIC_RELAXED); + else + __atomic_fetch_add(p, 1, __ATOMIC_RELAXED); + } else if (test == 1) { + if (main_thread) + __atomic_exchange_n(p, 1, __ATOMIC_ACQ_REL); + else + __atomic_exchange_n(p, 1, __ATOMIC_ACQ_REL); + } else if (test == 2) { + if (main_thread) + sink = __atomic_load_n(p, __ATOMIC_SEQ_CST); + else + __atomic_store_n(p, 1, __ATOMIC_SEQ_CST); + } else if (test == 3) { + if (main_thread) + sink = __atomic_load_n(p, __ATOMIC_SEQ_CST); + else + sink = *p; + } +} + +void *Thread(void *p) { + for (int i = 0; i < kTestCount; i++) { + Test(i, &atomics[i], false); + } + sleep(2); + for (int i = 0; i < kTestCount; i++) { + fprintf(stderr, "Test %d reverse\n", i); + Test(i, &atomics[kTestCount + i], false); + } + return 0; +} + +int main() { + pthread_t t; + pthread_create(&t, 0, Thread, 0); + sleep(1); + for (int i = 0; i < kTestCount; i++) { + fprintf(stderr, "Test %d\n", i); + Test(i, &atomics[i], true); + } + for (int i = 0; i < kTestCount; i++) { + Test(i, &atomics[kTestCount + i], true); + } + pthread_join(t, 0); +} + +// CHECK-NOT: ThreadSanitizer: data race diff --git a/lib/tsan/lit_tests/atomic_race.cc b/lib/tsan/lit_tests/atomic_race.cc new file mode 100644 index 000000000000..360b81238889 --- /dev/null +++ b/lib/tsan/lit_tests/atomic_race.cc @@ -0,0 +1,80 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <unistd.h> +#include <stdio.h> + +const int kTestCount = 4; +typedef long long T; +T atomics[kTestCount * 2]; + +void Test(int test, T *p, bool main_thread) { + volatile T sink; + if (test == 0) { + if (main_thread) + __atomic_fetch_add(p, 1, __ATOMIC_RELAXED); + else + *p = 42; + } else if (test == 1) { + if (main_thread) + __atomic_fetch_add(p, 1, __ATOMIC_RELAXED); + else + sink = *p; + } else if (test == 2) { + if (main_thread) + sink = __atomic_load_n(p, __ATOMIC_SEQ_CST); + else + *p = 42; + } else if (test == 3) { + if (main_thread) + __atomic_store_n(p, 1, __ATOMIC_SEQ_CST); + else + sink = *p; + } +} + +void *Thread(void *p) { + for (int i = 0; i < kTestCount; i++) { + Test(i, &atomics[i], false); + } + sleep(2); + for (int i = 0; i < kTestCount; i++) { + fprintf(stderr, "Test %d reverse\n", i); + Test(i, &atomics[kTestCount + i], false); + } + return 0; +} + +int main() { + pthread_t t; + pthread_create(&t, 0, Thread, 0); + sleep(1); + for (int i = 0; i < kTestCount; i++) { + fprintf(stderr, "Test %d\n", i); + Test(i, &atomics[i], true); + } + for (int i = 0; i < kTestCount; i++) { + Test(i, &atomics[kTestCount + i], true); + } + pthread_join(t, 0); +} + +// CHECK: Test 0 +// CHECK: ThreadSanitizer: data race +// CHECK-NOT: SUMMARY{{.*}}tsan_interface_atomic +// CHECK: Test 1 +// CHECK: ThreadSanitizer: data race +// CHECK-NOT: SUMMARY{{.*}}tsan_interface_atomic +// CHECK: Test 2 +// CHECK: ThreadSanitizer: data race +// CHECK-NOT: SUMMARY{{.*}}tsan_interface_atomic +// CHECK: Test 3 +// CHECK: ThreadSanitizer: data race +// CHECK-NOT: SUMMARY{{.*}}tsan_interface_atomic +// CHECK: Test 0 reverse +// CHECK: ThreadSanitizer: data race +// CHECK: Test 1 reverse +// CHECK: ThreadSanitizer: data race +// CHECK: Test 2 reverse +// CHECK: ThreadSanitizer: data race +// CHECK: Test 3 reverse +// CHECK: ThreadSanitizer: data race diff --git a/lib/tsan/lit_tests/atomic_stack.cc b/lib/tsan/lit_tests/atomic_stack.cc new file mode 100644 index 000000000000..50f6a8a889ca --- /dev/null +++ b/lib/tsan/lit_tests/atomic_stack.cc @@ -0,0 +1,29 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <unistd.h> + +int Global; + +void *Thread1(void *x) { + sleep(1); + __atomic_fetch_add(&Global, 1, __ATOMIC_RELAXED); + return NULL; +} + +void *Thread2(void *x) { + Global++; + return NULL; +} + +int main() { + pthread_t t[2]; + pthread_create(&t[0], NULL, Thread1, NULL); + pthread_create(&t[1], NULL, Thread2, NULL); + pthread_join(t[0], NULL); + pthread_join(t[1], NULL); +} + +// CHECK: WARNING: ThreadSanitizer: data race +// CHECK: Atomic write of size 4 +// CHECK: #0 __tsan_atomic32_fetch_add +// CHECK: #1 Thread1 diff --git a/lib/tsan/lit_tests/benign_race.cc b/lib/tsan/lit_tests/benign_race.cc new file mode 100644 index 000000000000..a4d4d23c362a --- /dev/null +++ b/lib/tsan/lit_tests/benign_race.cc @@ -0,0 +1,39 @@ +// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <stdio.h> +#include <unistd.h> + +int Global; +int WTFGlobal; + +extern "C" { +void AnnotateBenignRaceSized(const char *f, int l, + void *mem, unsigned int size, const char *desc); +void WTFAnnotateBenignRaceSized(const char *f, int l, + void *mem, unsigned int size, + const char *desc); +} + + +void *Thread(void *x) { + Global = 42; + WTFGlobal = 142; + return 0; +} + +int main() { + AnnotateBenignRaceSized(__FILE__, __LINE__, + &Global, sizeof(Global), "Race on Global"); + WTFAnnotateBenignRaceSized(__FILE__, __LINE__, + &WTFGlobal, sizeof(WTFGlobal), + "Race on WTFGlobal"); + pthread_t t; + pthread_create(&t, 0, Thread, 0); + sleep(1); + Global = 43; + WTFGlobal = 143; + pthread_join(t, 0); + printf("OK\n"); +} + +// CHECK-NOT: WARNING: ThreadSanitizer: data race diff --git a/lib/tsan/lit_tests/free_race.c b/lib/tsan/lit_tests/free_race.c index 7a2ec0cdbed0..ff71a4d2116b 100644 --- a/lib/tsan/lit_tests/free_race.c +++ b/lib/tsan/lit_tests/free_race.c @@ -40,4 +40,5 @@ int main() { // CHECK: #1 main // CHECK: Previous write of size 8 at {{.*}} by thread T1{{.*}}: // CHECK: #0 free -// CHECK: #1 Thread1 +// CHECK: #{{(1|2)}} Thread1 +// CHECK: SUMMARY: ThreadSanitizer: heap-use-after-free{{.*}}Thread2 diff --git a/lib/tsan/lit_tests/free_race2.c b/lib/tsan/lit_tests/free_race2.c index 095f82ea0818..f20774b2d8d4 100644 --- a/lib/tsan/lit_tests/free_race2.c +++ b/lib/tsan/lit_tests/free_race2.c @@ -22,5 +22,5 @@ int main() { // CHECK: #1 main // CHECK: Previous write of size 8 at {{.*}} by main thread: // CHECK: #0 free -// CHECK: #1 foo -// CHECK: #2 main +// CHECK: #{{1|2}} foo +// CHECK: #{{2|3}} main diff --git a/lib/tsan/lit_tests/inlined_memcpy_race.cc b/lib/tsan/lit_tests/inlined_memcpy_race.cc new file mode 100644 index 000000000000..6efe5a956e9d --- /dev/null +++ b/lib/tsan/lit_tests/inlined_memcpy_race.cc @@ -0,0 +1,55 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <stddef.h> +#include <stdio.h> +#include <string.h> +#include <unistd.h> + +int x[4], y[4], z[4]; + +void *MemCpyThread(void *a) { + memcpy((int*)a, z, 16); + return NULL; +} + +void *MemMoveThread(void *a) { + memmove((int*)a, z, 16); + return NULL; +} + +void *MemSetThread(void *a) { + sleep(1); + memset((int*)a, 0, 16); + return NULL; +} + +int main() { + pthread_t t[2]; + // Race on x between memcpy and memset + pthread_create(&t[0], NULL, MemCpyThread, x); + pthread_create(&t[1], NULL, MemSetThread, x); + pthread_join(t[0], NULL); + pthread_join(t[1], NULL); + // Race on y between memmove and memset + pthread_create(&t[0], NULL, MemMoveThread, y); + pthread_create(&t[1], NULL, MemSetThread, y); + pthread_join(t[0], NULL); + pthread_join(t[1], NULL); + + printf("PASS\n"); + return 0; +} + +// CHECK: WARNING: ThreadSanitizer: data race +// CHECK: #0 memset +// CHECK: #1 MemSetThread +// CHECK: Previous write +// CHECK: #0 memcpy +// CHECK: #1 MemCpyThread + +// CHECK: WARNING: ThreadSanitizer: data race +// CHECK: #0 memset +// CHECK: #1 MemSetThread +// CHECK: Previous write +// CHECK: #0 memmove +// CHECK: #1 MemMoveThread diff --git a/lib/tsan/lit_tests/java.h b/lib/tsan/lit_tests/java.h index 7d61f5802864..04094197edb7 100644 --- a/lib/tsan/lit_tests/java.h +++ b/lib/tsan/lit_tests/java.h @@ -14,4 +14,6 @@ void __tsan_java_mutex_lock(jptr addr); void __tsan_java_mutex_unlock(jptr addr); void __tsan_java_mutex_read_lock(jptr addr); void __tsan_java_mutex_read_unlock(jptr addr); +void __tsan_java_mutex_lock_rec(jptr addr, int rec); +int __tsan_java_mutex_unlock_rec(jptr addr); } diff --git a/lib/tsan/lit_tests/java_lock.cc b/lib/tsan/lit_tests/java_lock.cc index f66f1e7097fa..d9db103504de 100644 --- a/lib/tsan/lit_tests/java_lock.cc +++ b/lib/tsan/lit_tests/java_lock.cc @@ -1,10 +1,12 @@ // RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s #include "java.h" +#include <unistd.h> jptr varaddr; jptr lockaddr; void *Thread(void *p) { + sleep(1); __tsan_java_mutex_lock(lockaddr); *(int*)varaddr = 42; __tsan_java_mutex_unlock(lockaddr); diff --git a/lib/tsan/lit_tests/java_lock_rec.cc b/lib/tsan/lit_tests/java_lock_rec.cc new file mode 100644 index 000000000000..5cc80d4a33ef --- /dev/null +++ b/lib/tsan/lit_tests/java_lock_rec.cc @@ -0,0 +1,54 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include "java.h" +#include <unistd.h> + +jptr varaddr; +jptr lockaddr; + +void *Thread(void *p) { + __tsan_java_mutex_lock(lockaddr); + __tsan_java_mutex_lock(lockaddr); + *(int*)varaddr = 42; + int rec = __tsan_java_mutex_unlock_rec(lockaddr); + if (rec != 2) { + printf("FAILED 0 rec=%d\n", rec); + exit(1); + } + sleep(2); + __tsan_java_mutex_lock_rec(lockaddr, rec); + if (*(int*)varaddr != 43) { + printf("FAILED 3 var=%d\n", *(int*)varaddr); + exit(1); + } + __tsan_java_mutex_unlock(lockaddr); + __tsan_java_mutex_unlock(lockaddr); + return 0; +} + +int main() { + int const kHeapSize = 1024 * 1024; + void *jheap = malloc(kHeapSize); + __tsan_java_init((jptr)jheap, kHeapSize); + const int kBlockSize = 16; + __tsan_java_alloc((jptr)jheap, kBlockSize); + varaddr = (jptr)jheap; + *(int*)varaddr = 0; + lockaddr = (jptr)jheap + 8; + pthread_t th; + pthread_create(&th, 0, Thread, 0); + sleep(1); + __tsan_java_mutex_lock(lockaddr); + if (*(int*)varaddr != 42) { + printf("FAILED 1 var=%d\n", *(int*)varaddr); + exit(1); + } + *(int*)varaddr = 43; + __tsan_java_mutex_unlock(lockaddr); + pthread_join(th, 0); + __tsan_java_free((jptr)jheap, kBlockSize); + printf("OK\n"); + return __tsan_java_fini(); +} + +// CHECK-NOT: WARNING: ThreadSanitizer: data race +// CHECK-NOT: FAILED diff --git a/lib/tsan/lit_tests/java_lock_rec_race.cc b/lib/tsan/lit_tests/java_lock_rec_race.cc new file mode 100644 index 000000000000..61626aaddc0d --- /dev/null +++ b/lib/tsan/lit_tests/java_lock_rec_race.cc @@ -0,0 +1,48 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include "java.h" +#include <unistd.h> + +jptr varaddr; +jptr lockaddr; + +void *Thread(void *p) { + __tsan_java_mutex_lock(lockaddr); + __tsan_java_mutex_lock(lockaddr); + __tsan_java_mutex_lock(lockaddr); + int rec = __tsan_java_mutex_unlock_rec(lockaddr); + if (rec != 3) { + printf("FAILED 0 rec=%d\n", rec); + exit(1); + } + *(int*)varaddr = 42; + sleep(2); + __tsan_java_mutex_lock_rec(lockaddr, rec); + __tsan_java_mutex_unlock(lockaddr); + __tsan_java_mutex_unlock(lockaddr); + __tsan_java_mutex_unlock(lockaddr); + return 0; +} + +int main() { + int const kHeapSize = 1024 * 1024; + void *jheap = malloc(kHeapSize); + __tsan_java_init((jptr)jheap, kHeapSize); + const int kBlockSize = 16; + __tsan_java_alloc((jptr)jheap, kBlockSize); + varaddr = (jptr)jheap; + *(int*)varaddr = 0; + lockaddr = (jptr)jheap + 8; + pthread_t th; + pthread_create(&th, 0, Thread, 0); + sleep(1); + __tsan_java_mutex_lock(lockaddr); + *(int*)varaddr = 43; + __tsan_java_mutex_unlock(lockaddr); + pthread_join(th, 0); + __tsan_java_free((jptr)jheap, kBlockSize); + printf("OK\n"); + return __tsan_java_fini(); +} + +// CHECK: WARNING: ThreadSanitizer: data race +// CHECK-NOT: FAILED diff --git a/lib/tsan/lit_tests/java_rwlock.cc b/lib/tsan/lit_tests/java_rwlock.cc index 1e8940afd7d0..d1f38733ba03 100644 --- a/lib/tsan/lit_tests/java_rwlock.cc +++ b/lib/tsan/lit_tests/java_rwlock.cc @@ -1,10 +1,12 @@ // RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s #include "java.h" +#include <unistd.h> jptr varaddr; jptr lockaddr; void *Thread(void *p) { + sleep(1); __tsan_java_mutex_read_lock(lockaddr); *(int*)varaddr = 42; __tsan_java_mutex_read_unlock(lockaddr); diff --git a/lib/tsan/lit_tests/lit.cfg b/lib/tsan/lit_tests/lit.cfg index 7e2db7b8fd0b..d483d2fcbdc6 100644 --- a/lib/tsan/lit_tests/lit.cfg +++ b/lib/tsan/lit_tests/lit.cfg @@ -2,6 +2,14 @@ import os +def get_required_attr(config, attr_name): + attr_value = getattr(config, attr_name, None) + if not attr_value: + lit.fatal("No attribute %r in test configuration! You may need to run " + "tests from your build directory or add this attribute " + "to lit.site.cfg " % attr_name) + return attr_value + # Setup config name. config.name = 'ThreadSanitizer' @@ -30,14 +38,6 @@ if llvm_src_root is None: if not llvm_config: DisplayNoConfigMessage() - # Validate that llvm-config points to the same source tree. - llvm_src_root = lit.util.capture(["llvm-config", "--src-root"]).strip() - tsan_test_src_root = os.path.join(llvm_src_root, "projects", "compiler-rt", - "lib", "tsan", "lit_tests") - if (os.path.realpath(tsan_test_src_root) != - os.path.realpath(config.test_source_root)): - DisplayNoConfigMessage() - # Find out the presumed location of generated site config. llvm_obj_root = lit.util.capture(["llvm-config", "--obj-root"]).strip() tsan_site_cfg = os.path.join(llvm_obj_root, "projects", "compiler-rt", @@ -49,8 +49,9 @@ if llvm_src_root is None: raise SystemExit # Setup attributes common for all compiler-rt projects. -compiler_rt_lit_cfg = os.path.join(llvm_src_root, "projects", "compiler-rt", - "lib", "lit.common.cfg") +compiler_rt_src_root = get_required_attr(config, 'compiler_rt_src_root') +compiler_rt_lit_cfg = os.path.join(compiler_rt_src_root, "lib", + "lit.common.cfg") if (not compiler_rt_lit_cfg) or (not os.path.exists(compiler_rt_lit_cfg)): lit.fatal("Can't find common compiler-rt lit config at: %r" % compiler_rt_lit_cfg) @@ -69,11 +70,8 @@ config.environment['TSAN_OPTIONS'] = tsan_options # Setup default compiler flags used with -fsanitize=thread option. # FIXME: Review the set of required flags and check if it can be reduced. clang_tsan_cflags = ("-fsanitize=thread " - + "-fPIE " - + "-fno-builtin " + "-g " + "-Wall " - + "-pie " + "-lpthread " + "-ldl ") clang_tsan_cxxflags = "-ccc-cxx " + clang_tsan_cflags diff --git a/lib/tsan/lit_tests/lit.site.cfg.in b/lib/tsan/lit_tests/lit.site.cfg.in index b1c6ccf544ea..07b521af061f 100644 --- a/lib/tsan/lit_tests/lit.site.cfg.in +++ b/lib/tsan/lit_tests/lit.site.cfg.in @@ -4,6 +4,7 @@ config.clang = "@LLVM_BINARY_DIR@/bin/clang" config.host_os = "@HOST_OS@" config.llvm_src_root = "@LLVM_SOURCE_DIR@" +config.compiler_rt_src_root = "@COMPILER_RT_SOURCE_DIR@" config.llvm_tools_dir = "@LLVM_TOOLS_DIR@" config.target_triple = "@TARGET_TRIPLE@" diff --git a/lib/tsan/lit_tests/load_shared_lib.cc b/lib/tsan/lit_tests/load_shared_lib.cc new file mode 100644 index 000000000000..dd6fa0964f4a --- /dev/null +++ b/lib/tsan/lit_tests/load_shared_lib.cc @@ -0,0 +1,44 @@ +// Check that if the list of shared libraries changes between the two race +// reports, the second report occurring in a new shared library is still +// symbolized correctly. + +// RUN: %clangxx_tsan -O1 %p/SharedLibs/load_shared_lib-so.cc \ +// RUN: -fPIC -shared -o %t-so.so +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s + +#include <dlfcn.h> +#include <pthread.h> +#include <stdio.h> + +#include <string> + +int GLOB = 0; + +void *write_glob(void *unused) { + GLOB++; + return NULL; +} + +void race_two_threads(void *(*access_callback)(void *unused)) { + pthread_t t1, t2; + pthread_create(&t1, NULL, access_callback, NULL); + pthread_create(&t2, NULL, access_callback, NULL); + pthread_join(t1, NULL); + pthread_join(t2, NULL); +} + +int main(int argc, char *argv[]) { + std::string path = std::string(argv[0]) + std::string("-so.so"); + race_two_threads(write_glob); + // CHECK: write_glob + void *lib = dlopen(path.c_str(), RTLD_NOW); + if (!lib) { + printf("error in dlopen(): %s\n", dlerror()); + return 1; + } + void *(*write_from_so)(void *unused); + *(void **)&write_from_so = dlsym(lib, "write_from_so"); + race_two_threads(write_from_so); + // CHECK: write_from_so + return 0; +} diff --git a/lib/tsan/lit_tests/longjmp.cc b/lib/tsan/lit_tests/longjmp.cc new file mode 100644 index 000000000000..d9ca4ca5e6e9 --- /dev/null +++ b/lib/tsan/lit_tests/longjmp.cc @@ -0,0 +1,22 @@ +// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <stdio.h> +#include <stdlib.h> +#include <setjmp.h> + +int foo(jmp_buf env) { + longjmp(env, 42); +} + +int main() { + jmp_buf env; + if (setjmp(env) == 42) { + printf("JUMPED\n"); + return 0; + } + foo(env); + printf("FAILED\n"); + return 0; +} + +// CHECK-NOT: FAILED +// CHECK: JUMPED diff --git a/lib/tsan/lit_tests/longjmp2.cc b/lib/tsan/lit_tests/longjmp2.cc new file mode 100644 index 000000000000..0d551fa19d94 --- /dev/null +++ b/lib/tsan/lit_tests/longjmp2.cc @@ -0,0 +1,24 @@ +// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <stdio.h> +#include <stdlib.h> +#include <setjmp.h> + +int foo(sigjmp_buf env) { + printf("env=%p\n", env); + siglongjmp(env, 42); +} + +int main() { + sigjmp_buf env; + printf("env=%p\n", env); + if (sigsetjmp(env, 1) == 42) { + printf("JUMPED\n"); + return 0; + } + foo(env); + printf("FAILED\n"); + return 0; +} + +// CHECK-NOT: FAILED +// CHECK: JUMPED diff --git a/lib/tsan/lit_tests/longjmp3.cc b/lib/tsan/lit_tests/longjmp3.cc new file mode 100644 index 000000000000..87fabd0b3be2 --- /dev/null +++ b/lib/tsan/lit_tests/longjmp3.cc @@ -0,0 +1,48 @@ +// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <stdio.h> +#include <stdlib.h> +#include <setjmp.h> + +void bar(jmp_buf env) { + volatile int x = 42; + longjmp(env, 42); + x++; +} + +void foo(jmp_buf env) { + volatile int x = 42; + bar(env); + x++; +} + +void badguy() { + pthread_mutex_t mtx; + pthread_mutex_init(&mtx, 0); + pthread_mutex_lock(&mtx); + pthread_mutex_destroy(&mtx); +} + +void mymain() { + jmp_buf env; + if (setjmp(env) == 42) { + badguy(); + return; + } + foo(env); + printf("FAILED\n"); +} + +int main() { + volatile int x = 42; + mymain(); + return x; +} + +// CHECK-NOT: FAILED +// CHECK: WARNING: ThreadSanitizer: destroy of a locked mutex +// CHECK: #0 pthread_mutex_destroy +// CHECK: #1 badguy +// CHECK: #2 mymain +// CHECK: #3 main + diff --git a/lib/tsan/lit_tests/longjmp4.cc b/lib/tsan/lit_tests/longjmp4.cc new file mode 100644 index 000000000000..a8764dda5a6b --- /dev/null +++ b/lib/tsan/lit_tests/longjmp4.cc @@ -0,0 +1,51 @@ +// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <stdio.h> +#include <stdlib.h> +#include <setjmp.h> +#include <string.h> + +void bar(jmp_buf env) { + volatile int x = 42; + jmp_buf env2; + memcpy(env2, env, sizeof(jmp_buf)); + longjmp(env2, 42); + x++; +} + +void foo(jmp_buf env) { + volatile int x = 42; + bar(env); + x++; +} + +void badguy() { + pthread_mutex_t mtx; + pthread_mutex_init(&mtx, 0); + pthread_mutex_lock(&mtx); + pthread_mutex_destroy(&mtx); +} + +void mymain() { + jmp_buf env; + if (setjmp(env) == 42) { + badguy(); + return; + } + foo(env); + printf("FAILED\n"); +} + +int main() { + volatile int x = 42; + mymain(); + return x; +} + +// CHECK-NOT: FAILED +// CHECK: WARNING: ThreadSanitizer: destroy of a locked mutex +// CHECK: #0 pthread_mutex_destroy +// CHECK: #1 badguy +// CHECK: #2 mymain +// CHECK: #3 main + diff --git a/lib/tsan/lit_tests/malloc_overflow.cc b/lib/tsan/lit_tests/malloc_overflow.cc new file mode 100644 index 000000000000..19423c5f93f1 --- /dev/null +++ b/lib/tsan/lit_tests/malloc_overflow.cc @@ -0,0 +1,22 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <stdio.h> +#include <stdlib.h> + +int main() { + void *p = malloc((size_t)-1); + if (p != 0) + printf("FAIL malloc(-1) = %p\n", p); + p = malloc((size_t)-1 / 2); + if (p != 0) + printf("FAIL malloc(-1/2) = %p\n", p); + p = calloc((size_t)-1, (size_t)-1); + if (p != 0) + printf("FAIL calloc(-1, -1) = %p\n", p); + p = calloc((size_t)-1 / 2, (size_t)-1 / 2); + if (p != 0) + printf("FAIL calloc(-1/2, -1/2) = %p\n", p); + printf("OK\n"); +} + +// CHECK-NOT: FAIL +// CHECK-NOT: failed to allocate diff --git a/lib/tsan/lit_tests/malloc_stack.cc b/lib/tsan/lit_tests/malloc_stack.cc new file mode 100644 index 000000000000..c185623ff5ca --- /dev/null +++ b/lib/tsan/lit_tests/malloc_stack.cc @@ -0,0 +1,25 @@ +// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <unistd.h> + +_Atomic(int*) p; + +void *thr(void *a) { + sleep(1); + int *pp = __c11_atomic_load(&p, __ATOMIC_RELAXED); + *pp = 42; + return 0; +} + +int main() { + pthread_t th; + pthread_create(&th, 0, thr, p); + __c11_atomic_store(&p, new int, __ATOMIC_RELAXED); + pthread_join(th, 0); +} + +// CHECK: data race +// CHECK: Previous write +// CHECK: #0 operator new +// CHECK: Location is heap block +// CHECK: #0 operator new diff --git a/lib/tsan/lit_tests/memcpy_race.cc b/lib/tsan/lit_tests/memcpy_race.cc index 806740dda241..857728ba0540 100644 --- a/lib/tsan/lit_tests/memcpy_race.cc +++ b/lib/tsan/lit_tests/memcpy_race.cc @@ -10,13 +10,15 @@ char *data1 = new char[10]; char *data2 = new char[10]; void *Thread1(void *x) { - memcpy(data+5, data1, 1); + static volatile int size = 1; + memcpy(data+5, data1, size); return NULL; } void *Thread2(void *x) { + static volatile int size = 4; sleep(1); - memcpy(data+3, data2, 4); + memcpy(data+3, data2, size); return NULL; } diff --git a/lib/tsan/lit_tests/mutex_destroy_locked.cc b/lib/tsan/lit_tests/mutex_destroy_locked.cc index 991eaf5426e2..27a04248b172 100644 --- a/lib/tsan/lit_tests/mutex_destroy_locked.cc +++ b/lib/tsan/lit_tests/mutex_destroy_locked.cc @@ -19,3 +19,4 @@ int main() { // CHECK: Mutex {{.*}} created at: // CHECK: #0 pthread_mutex_init // CHECK: #1 main +// CHECK: SUMMARY: ThreadSanitizer: destroy of a locked mutex{{.*}}main diff --git a/lib/tsan/lit_tests/mutexset7.cc b/lib/tsan/lit_tests/mutexset7.cc index 141bde2b5015..3ec1b5202983 100644 --- a/lib/tsan/lit_tests/mutexset7.cc +++ b/lib/tsan/lit_tests/mutexset7.cc @@ -4,6 +4,7 @@ #include <unistd.h> int Global; +__thread int huge[1024*1024]; void *Thread1(void *x) { sleep(1); diff --git a/lib/tsan/lit_tests/mutexset8.cc b/lib/tsan/lit_tests/mutexset8.cc new file mode 100644 index 000000000000..6db63f7d16db --- /dev/null +++ b/lib/tsan/lit_tests/mutexset8.cc @@ -0,0 +1,39 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <stdio.h> +#include <unistd.h> + +int Global; +pthread_mutex_t *mtx; + +void *Thread1(void *x) { + sleep(1); + pthread_mutex_lock(mtx); + Global++; + pthread_mutex_unlock(mtx); + return NULL; +} + +void *Thread2(void *x) { + Global--; + return NULL; +} + +int main() { + // CHECK: WARNING: ThreadSanitizer: data race + // CHECK: Write of size 4 at {{.*}} by thread T1 + // CHECK: (mutexes: write [[M1:M[0-9]+]]): + // CHECK: Previous write of size 4 at {{.*}} by thread T2: + // CHECK: Mutex [[M1]] created at: + // CHECK: #0 pthread_mutex_init + // CHECK: #1 main {{.*}}/mutexset8.cc + mtx = new pthread_mutex_t; + pthread_mutex_init(mtx, 0); + pthread_t t[2]; + pthread_create(&t[0], NULL, Thread1, NULL); + pthread_create(&t[1], NULL, Thread2, NULL); + pthread_join(t[0], NULL); + pthread_join(t[1], NULL); + pthread_mutex_destroy(mtx); + delete mtx; +} diff --git a/lib/tsan/lit_tests/oob_race.cc b/lib/tsan/lit_tests/oob_race.cc new file mode 100644 index 000000000000..2e7f0593fd8d --- /dev/null +++ b/lib/tsan/lit_tests/oob_race.cc @@ -0,0 +1,24 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <stdio.h> + +const long kOffset = 64*1024; + +void *Thread(void *p) { + ((char*)p)[-kOffset] = 43; + return 0; +} + +int main() { + char *volatile p0 = new char[16]; + delete[] p0; + char *p = new char[32]; + pthread_t th; + pthread_create(&th, 0, Thread, p); + p[-kOffset] = 42; + pthread_join(th, 0); +} + +// Used to crash with CHECK failed. +// CHECK: WARNING: ThreadSanitizer: data race + diff --git a/lib/tsan/lit_tests/race_on_heap.cc b/lib/tsan/lit_tests/race_on_heap.cc index dc679e8bf3f9..35434eac1850 100644 --- a/lib/tsan/lit_tests/race_on_heap.cc +++ b/lib/tsan/lit_tests/race_on_heap.cc @@ -39,8 +39,8 @@ int main() { // ... // CHECK: Location is heap block of size 99 at [[ADDR]] allocated by thread T1: // CHCEKL #0 malloc -// CHECK: #1 alloc -// CHECK: #2 AllocThread +// CHECK: #{{1|2}} alloc +// CHECK: #{{2|3}} AllocThread // ... // CHECK: Thread T1 (tid={{.*}}, finished) created by main thread at: // CHECK: #0 pthread_create diff --git a/lib/tsan/lit_tests/race_on_mutex.c b/lib/tsan/lit_tests/race_on_mutex.c index de1c2d4160a6..aff32f9bb1a2 100644 --- a/lib/tsan/lit_tests/race_on_mutex.c +++ b/lib/tsan/lit_tests/race_on_mutex.c @@ -34,7 +34,7 @@ int main() { } // CHECK: WARNING: ThreadSanitizer: data race -// CHECK-NEXT: Read of size 1 at {{.*}} by thread T2: +// CHECK-NEXT: Atomic read of size 1 at {{.*}} by thread T2: // CHECK-NEXT: #0 pthread_mutex_lock // CHECK-NEXT: #1 Thread2{{.*}} {{.*}}race_on_mutex.c:20{{(:3)?}} ({{.*}}) // CHECK: Previous write of size 1 at {{.*}} by thread T1: diff --git a/lib/tsan/lit_tests/race_on_mutex2.c b/lib/tsan/lit_tests/race_on_mutex2.c new file mode 100644 index 000000000000..84bef75a3449 --- /dev/null +++ b/lib/tsan/lit_tests/race_on_mutex2.c @@ -0,0 +1,24 @@ +// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <stdio.h> +#include <stddef.h> +#include <unistd.h> + +void *Thread(void *x) { + pthread_mutex_lock((pthread_mutex_t*)x); + pthread_mutex_unlock((pthread_mutex_t*)x); + return 0; +} + +int main() { + pthread_mutex_t Mtx; + pthread_mutex_init(&Mtx, 0); + pthread_t t; + pthread_create(&t, 0, Thread, &Mtx); + sleep(1); + pthread_mutex_destroy(&Mtx); + pthread_join(t, 0); + return 0; +} + +// CHECK: WARNING: ThreadSanitizer: data race diff --git a/lib/tsan/lit_tests/race_on_write.cc b/lib/tsan/lit_tests/race_on_write.cc new file mode 100644 index 000000000000..f1b0bb1cbd6e --- /dev/null +++ b/lib/tsan/lit_tests/race_on_write.cc @@ -0,0 +1,39 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <stdio.h> +#include <unistd.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> + +int fd; +char buf; + +void *Thread1(void *x) { + buf = 1; + sleep(1); + return NULL; +} + +void *Thread2(void *x) { + write(fd, &buf, 1); + return NULL; +} + +int main() { + fd = open("/dev/null", O_WRONLY); + if (fd < 0) return 1; + pthread_t t[2]; + pthread_create(&t[0], NULL, Thread1, NULL); + sleep(1); + pthread_create(&t[1], NULL, Thread2, NULL); + pthread_join(t[0], NULL); + pthread_join(t[1], NULL); + close(fd); +} + +// CHECK: WARNING: ThreadSanitizer: data race +// CHECK: Read of size 1 +// CHECK: #0 write +// CHECK: Previous write of size 1 +// CHECK: #0 Thread1 diff --git a/lib/tsan/lit_tests/signal_errno.cc b/lib/tsan/lit_tests/signal_errno.cc index af9ccce9045a..8181555f6f63 100644 --- a/lib/tsan/lit_tests/signal_errno.cc +++ b/lib/tsan/lit_tests/signal_errno.cc @@ -10,7 +10,7 @@ pthread_t mainth; volatile int done; -static void handler(int, siginfo_t *s, void *c) { +static void MyHandler(int, siginfo_t *s, void *c) { errno = 1; done = 1; } @@ -23,7 +23,7 @@ static void* sendsignal(void *p) { int main() { mainth = pthread_self(); struct sigaction act = {}; - act.sa_sigaction = &handler; + act.sa_sigaction = &MyHandler; sigaction(SIGPROF, &act, 0); pthread_t th; pthread_create(&th, 0, sendsignal, 0); @@ -38,5 +38,6 @@ int main() { } // CHECK: WARNING: ThreadSanitizer: signal handler spoils errno -// CHECK: #0 handler(int, siginfo*, void*) {{.*}}signal_errno.cc +// CHECK: #0 MyHandler(int, siginfo{{(_t)?}}*, void*) {{.*}}signal_errno.cc +// CHECK: SUMMARY: ThreadSanitizer: signal handler spoils errno{{.*}}MyHandler diff --git a/lib/tsan/lit_tests/signal_malloc.cc b/lib/tsan/lit_tests/signal_malloc.cc index cee997cdb763..4dbc2f78ab17 100644 --- a/lib/tsan/lit_tests/signal_malloc.cc +++ b/lib/tsan/lit_tests/signal_malloc.cc @@ -8,7 +8,8 @@ static void handler(int, siginfo_t*, void*) { // CHECK: WARNING: ThreadSanitizer: signal-unsafe call inside of a signal // CHECK: #0 malloc - // CHECK: #1 handler(int, siginfo*, void*) {{.*}}signal_malloc.cc:[[@LINE+1]] + // CHECK: #{{(1|2)}} handler(int, siginfo{{(_t)?}}*, void*) {{.*}}signal_malloc.cc:[[@LINE+2]] + // CHECK: SUMMARY: ThreadSanitizer: signal-unsafe call inside of a signal{{.*}}handler volatile char *p = (char*)malloc(1); p[0] = 0; free((void*)p); diff --git a/lib/tsan/lit_tests/simple_race.cc b/lib/tsan/lit_tests/simple_race.cc index ec29c92ee1a8..99cf228ac2f2 100644 --- a/lib/tsan/lit_tests/simple_race.cc +++ b/lib/tsan/lit_tests/simple_race.cc @@ -23,3 +23,4 @@ int main() { } // CHECK: WARNING: ThreadSanitizer: data race +// CHECK: SUMMARY: ThreadSanitizer: data race{{.*}}Thread diff --git a/lib/tsan/lit_tests/test_output.sh b/lib/tsan/lit_tests/test_output.sh index d21c9a797ad3..1eedf6eb20a3 100755 --- a/lib/tsan/lit_tests/test_output.sh +++ b/lib/tsan/lit_tests/test_output.sh @@ -6,12 +6,13 @@ set -e # fail on any error ROOTDIR=$(dirname $0)/.. BLACKLIST=$ROOTDIR/lit_tests/Helpers/blacklist.txt -# Assuming clang is in path. -CC=clang -CXX=clang++ +# Assume clang and clang++ are in path. +: ${CC:=clang} +: ${CXX:=clang++} +: ${FILECHECK:=FileCheck} # TODO: add testing for all of -O0...-O3 -CFLAGS="-fsanitize=thread -fsanitize-blacklist=$BLACKLIST -fPIE -O1 -g -fno-builtin -Wall" +CFLAGS="-fsanitize=thread -fsanitize-blacklist=$BLACKLIST -fPIE -O1 -g -Wall" LDFLAGS="-pie -lpthread -ldl $ROOTDIR/rtl/libtsan.a" test_file() { @@ -23,7 +24,7 @@ test_file() { $COMPILER $SRC $CFLAGS -c -o $OBJ $COMPILER $OBJ $LDFLAGS -o $EXE RES=$($EXE 2>&1 || true) - printf "%s\n" "$RES" | FileCheck $SRC + printf "%s\n" "$RES" | $FILECHECK $SRC if [ "$3" == "" ]; then rm -f $EXE $OBJ fi @@ -35,6 +36,10 @@ if [ "$1" == "" ]; then echo SKIPPING FAILING TEST $c continue fi + if [[ $c == */load_shared_lib.cc ]]; then + echo TEST $c is not supported + continue + fi COMPILER=$CXX case $c in *.c) COMPILER=$CC diff --git a/lib/tsan/lit_tests/thread_end_with_ignore.cc b/lib/tsan/lit_tests/thread_end_with_ignore.cc new file mode 100644 index 000000000000..960a477c5ad3 --- /dev/null +++ b/lib/tsan/lit_tests/thread_end_with_ignore.cc @@ -0,0 +1,19 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <stdio.h> + +extern "C" void AnnotateIgnoreReadsBegin(const char *f, int l); + +void *Thread(void *x) { + AnnotateIgnoreReadsBegin("", 0); + return 0; +} + +int main() { + pthread_t t; + pthread_create(&t, 0, Thread, 0); + pthread_join(t, 0); +} + +// CHECK: ThreadSanitizer: thread T1 finished with ignores enabled + diff --git a/lib/tsan/lit_tests/thread_end_with_ignore2.cc b/lib/tsan/lit_tests/thread_end_with_ignore2.cc new file mode 100644 index 000000000000..8f743ae2f4a4 --- /dev/null +++ b/lib/tsan/lit_tests/thread_end_with_ignore2.cc @@ -0,0 +1,9 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +extern "C" void AnnotateIgnoreWritesBegin(const char *f, int l); + +int main() { + AnnotateIgnoreWritesBegin("", 0); +} + +// CHECK: ThreadSanitizer: thread T0 finished with ignores enabled + diff --git a/lib/tsan/lit_tests/thread_leak3.c b/lib/tsan/lit_tests/thread_leak3.c index c48219fe73fa..3577164cad4a 100644 --- a/lib/tsan/lit_tests/thread_leak3.c +++ b/lib/tsan/lit_tests/thread_leak3.c @@ -1,5 +1,6 @@ // RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s #include <pthread.h> +#include <unistd.h> void *Thread(void *x) { return 0; @@ -8,7 +9,9 @@ void *Thread(void *x) { int main() { pthread_t t; pthread_create(&t, 0, Thread, 0); + sleep(1); return 0; } // CHECK: WARNING: ThreadSanitizer: thread leak +// CHECK: SUMMARY: ThreadSanitizer: thread leak{{.*}}main diff --git a/lib/tsan/lit_tests/thread_leak4.c b/lib/tsan/lit_tests/thread_leak4.c new file mode 100644 index 000000000000..f9fad0360d34 --- /dev/null +++ b/lib/tsan/lit_tests/thread_leak4.c @@ -0,0 +1,18 @@ +// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <unistd.h> +#include <stdio.h> + +void *Thread(void *x) { + sleep(10); + return 0; +} + +int main() { + pthread_t t; + pthread_create(&t, 0, Thread, 0); + printf("OK\n"); + return 0; +} + +// CHECK-NOT: WARNING: ThreadSanitizer: thread leak diff --git a/lib/tsan/lit_tests/thread_leak5.c b/lib/tsan/lit_tests/thread_leak5.c new file mode 100644 index 000000000000..fc72b149ec25 --- /dev/null +++ b/lib/tsan/lit_tests/thread_leak5.c @@ -0,0 +1,19 @@ +// RUN: %clang_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <unistd.h> + +void *Thread(void *x) { + return 0; +} + +int main() { + for (int i = 0; i < 5; i++) { + pthread_t t; + pthread_create(&t, 0, Thread, 0); + } + sleep(1); + return 0; +} + +// CHECK: WARNING: ThreadSanitizer: thread leak +// CHECK: And 4 more similar thread leaks diff --git a/lib/tsan/lit_tests/thread_name.cc b/lib/tsan/lit_tests/thread_name.cc index 0ca0b1769976..37f308ffbc0c 100644 --- a/lib/tsan/lit_tests/thread_name.cc +++ b/lib/tsan/lit_tests/thread_name.cc @@ -15,7 +15,11 @@ void *Thread1(void *x) { } void *Thread2(void *x) { +#if SANITIZER_LINUX && __GLIBC_PREREQ(2, 12) pthread_setname_np(pthread_self(), "Thread2"); +#else + AnnotateThreadName(__FILE__, __LINE__, "Thread2"); +#endif Global--; return NULL; } diff --git a/lib/tsan/lit_tests/tsan-vs-gvn.cc b/lib/tsan/lit_tests/tsan-vs-gvn.cc new file mode 100644 index 000000000000..40ae724b78e1 --- /dev/null +++ b/lib/tsan/lit_tests/tsan-vs-gvn.cc @@ -0,0 +1,38 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +// RUN: %clangxx_tsan -O2 %s -o %t && %t 2>&1 | FileCheck %s +// RUN: %clangxx_tsan -O3 %s -o %t && %t 2>&1 | FileCheck %s +// +// Check that load widening is not tsan-hostile. +#include <pthread.h> +#include <stdio.h> +#include <string.h> + +struct { + int i; + char c1, c2, c3, c4; +} S; + +int G; + +void *Thread1(void *x) { + G = S.c1 + S.c3; + return NULL; +} + +void *Thread2(void *x) { + S.c2 = 1; + return NULL; +} + +int main() { + pthread_t t[2]; + memset(&S, 123, sizeof(S)); + pthread_create(&t[0], NULL, Thread1, NULL); + pthread_create(&t[1], NULL, Thread2, NULL); + pthread_join(t[0], NULL); + pthread_join(t[1], NULL); + printf("PASS\n"); +} + +// CHECK-NOT: WARNING: ThreadSanitizer: data race +// CHECK: PASS diff --git a/lib/tsan/lit_tests/unaligned_norace.cc b/lib/tsan/lit_tests/unaligned_norace.cc new file mode 100644 index 000000000000..792224b80126 --- /dev/null +++ b/lib/tsan/lit_tests/unaligned_norace.cc @@ -0,0 +1,84 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> + +uint64_t objs[8*3*3*2][3]; + +extern "C" { +uint16_t __tsan_unaligned_read2(void *addr); +uint32_t __tsan_unaligned_read4(void *addr); +uint64_t __tsan_unaligned_read8(void *addr); +void __tsan_unaligned_write2(void *addr, uint16_t v); +void __tsan_unaligned_write4(void *addr, uint32_t v); +void __tsan_unaligned_write8(void *addr, uint64_t v); +} + +static void access(char *p, int sz, int rw) { + if (rw) { + switch (sz) { + case 0: __tsan_unaligned_write2(p, 0); break; + case 1: __tsan_unaligned_write4(p, 0); break; + case 2: __tsan_unaligned_write8(p, 0); break; + default: exit(1); + } + } else { + switch (sz) { + case 0: __tsan_unaligned_read2(p); break; + case 1: __tsan_unaligned_read4(p); break; + case 2: __tsan_unaligned_read8(p); break; + default: exit(1); + } + } +} + +static int accesssize(int sz) { + switch (sz) { + case 0: return 2; + case 1: return 4; + case 2: return 8; + } + exit(1); +} + +void Test(bool main) { + uint64_t *obj = objs[0]; + for (int off = 0; off < 8; off++) { + for (int sz1 = 0; sz1 < 3; sz1++) { + for (int sz2 = 0; sz2 < 3; sz2++) { + for (int rw = 0; rw < 2; rw++) { + char *p = (char*)obj + off; + if (main) { + // printf("thr=%d off=%d sz1=%d sz2=%d rw=%d p=%p\n", + // main, off, sz1, sz2, rw, p); + access(p, sz1, true); + } else { + p += accesssize(sz1); + // printf("thr=%d off=%d sz1=%d sz2=%d rw=%d p=%p\n", + // main, off, sz1, sz2, rw, p); + access(p, sz2, rw); + } + obj += 3; + } + } + } + } +} + +void *Thread(void *p) { + (void)p; + Test(false); + return 0; +} + +int main() { + pthread_t th; + pthread_create(&th, 0, Thread, 0); + Test(true); + pthread_join(th, 0); + printf("OK\n"); +} + +// CHECK-NOT: WARNING: ThreadSanitizer: +// CHECK: OK diff --git a/lib/tsan/lit_tests/unaligned_race.cc b/lib/tsan/lit_tests/unaligned_race.cc new file mode 100644 index 000000000000..18bed8555cc5 --- /dev/null +++ b/lib/tsan/lit_tests/unaligned_race.cc @@ -0,0 +1,135 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <unistd.h> + +uint64_t objs[8*2*(2 + 4 + 8)][2]; + +extern "C" { +uint16_t __sanitizer_unaligned_load16(void *addr); +uint32_t __sanitizer_unaligned_load32(void *addr); +uint64_t __sanitizer_unaligned_load64(void *addr); +void __sanitizer_unaligned_store16(void *addr, uint16_t v); +void __sanitizer_unaligned_store32(void *addr, uint32_t v); +void __sanitizer_unaligned_store64(void *addr, uint64_t v); +} + +// All this mess is to generate unique stack for each race, +// otherwise tsan will suppress similar stacks. + +static void access(char *p, int sz, int rw) { + if (rw) { + switch (sz) { + case 0: __sanitizer_unaligned_store16(p, 0); break; + case 1: __sanitizer_unaligned_store32(p, 0); break; + case 2: __sanitizer_unaligned_store64(p, 0); break; + default: exit(1); + } + } else { + switch (sz) { + case 0: __sanitizer_unaligned_load16(p); break; + case 1: __sanitizer_unaligned_load32(p); break; + case 2: __sanitizer_unaligned_load64(p); break; + default: exit(1); + } + } +} + +static int accesssize(int sz) { + switch (sz) { + case 0: return 2; + case 1: return 4; + case 2: return 8; + } + exit(1); +} + +template<int off, int off2> +static void access3(bool main, int sz1, bool rw, char *p) { + p += off; + if (main) { + access(p, sz1, true); + } else { + p += off2; + if (rw) { + *p = 42; + } else { + if (*p == 42) + printf("bingo!\n"); + } + } +} + +template<int off> +static void access2(bool main, int sz1, int off2, bool rw, char *obj) { + if (off2 == 0) + access3<off, 0>(main, sz1, rw, obj); + else if (off2 == 1) + access3<off, 1>(main, sz1, rw, obj); + else if (off2 == 2) + access3<off, 2>(main, sz1, rw, obj); + else if (off2 == 3) + access3<off, 3>(main, sz1, rw, obj); + else if (off2 == 4) + access3<off, 4>(main, sz1, rw, obj); + else if (off2 == 5) + access3<off, 5>(main, sz1, rw, obj); + else if (off2 == 6) + access3<off, 6>(main, sz1, rw, obj); + else if (off2 == 7) + access3<off, 7>(main, sz1, rw, obj); +} + +static void access1(bool main, int off, int sz1, int off2, bool rw, char *obj) { + if (off == 0) + access2<0>(main, sz1, off2, rw, obj); + else if (off == 1) + access2<1>(main, sz1, off2, rw, obj); + else if (off == 2) + access2<2>(main, sz1, off2, rw, obj); + else if (off == 3) + access2<3>(main, sz1, off2, rw, obj); + else if (off == 4) + access2<4>(main, sz1, off2, rw, obj); + else if (off == 5) + access2<5>(main, sz1, off2, rw, obj); + else if (off == 6) + access2<6>(main, sz1, off2, rw, obj); + else if (off == 7) + access2<7>(main, sz1, off2, rw, obj); +} + +void Test(bool main) { + uint64_t *obj = objs[0]; + for (int off = 0; off < 8; off++) { + for (int sz1 = 0; sz1 < 3; sz1++) { + for (int off2 = 0; off2 < accesssize(sz1); off2++) { + for (int rw = 0; rw < 2; rw++) { + // printf("thr=%d off=%d sz1=%d off2=%d rw=%d p=%p\n", + // main, off, sz1, off2, rw, obj); + access1(main, off, sz1, off2, rw, (char*)obj); + obj += 2; + } + } + } + } +} + +void *Thread(void *p) { + (void)p; + sleep(1); + Test(false); + return 0; +} + +int main() { + pthread_t th; + pthread_create(&th, 0, Thread, 0); + Test(true); + pthread_join(th, 0); +} + +// CHECK: WARNING: ThreadSanitizer: data race +// CHECK: ThreadSanitizer: reported 224 warnings diff --git a/lib/tsan/lit_tests/vptr_harmful_race.cc b/lib/tsan/lit_tests/vptr_harmful_race.cc index f51ba7ee57f0..76d31c00ad4f 100644 --- a/lib/tsan/lit_tests/vptr_harmful_race.cc +++ b/lib/tsan/lit_tests/vptr_harmful_race.cc @@ -2,6 +2,7 @@ #include <pthread.h> #include <semaphore.h> #include <stdio.h> +#include <unistd.h> struct A { A() { @@ -34,6 +35,7 @@ void *Thread1(void *x) { } void *Thread2(void *x) { + sleep(1); delete obj; return NULL; } @@ -46,4 +48,4 @@ int main() { pthread_join(t[1], NULL); } -// CHECK: WARNING: ThreadSanitizer: data race +// CHECK: WARNING: ThreadSanitizer: data race on vptr diff --git a/lib/tsan/lit_tests/vptr_harmful_race2.cc b/lib/tsan/lit_tests/vptr_harmful_race2.cc new file mode 100644 index 000000000000..d7e1d19a11bd --- /dev/null +++ b/lib/tsan/lit_tests/vptr_harmful_race2.cc @@ -0,0 +1,51 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s +#include <pthread.h> +#include <semaphore.h> +#include <stdio.h> +#include <unistd.h> + +struct A { + A() { + sem_init(&sem_, 0, 0); + } + virtual void F() { + } + void Done() { + sem_post(&sem_); + } + virtual ~A() { + sem_wait(&sem_); + sem_destroy(&sem_); + } + sem_t sem_; +}; + +struct B : A { + virtual void F() { + } + virtual ~B() { } +}; + +static A *obj = new B; + +void *Thread1(void *x) { + sleep(1); + obj->F(); + obj->Done(); + return NULL; +} + +void *Thread2(void *x) { + delete obj; + return NULL; +} + +int main() { + pthread_t t[2]; + pthread_create(&t[0], NULL, Thread1, NULL); + pthread_create(&t[1], NULL, Thread2, NULL); + pthread_join(t[0], NULL); + pthread_join(t[1], NULL); +} + +// CHECK: WARNING: ThreadSanitizer: data race on vptr diff --git a/lib/tsan/rtl/CMakeLists.txt b/lib/tsan/rtl/CMakeLists.txt index d91e2e43ca4c..f1a8ff4d6558 100644 --- a/lib/tsan/rtl/CMakeLists.txt +++ b/lib/tsan/rtl/CMakeLists.txt @@ -37,22 +37,15 @@ if(CAN_TARGET_x86_64 AND UNIX AND NOT APPLE) set(TSAN_ASM_SOURCES tsan_rtl_amd64.S) # Pass ASM file directly to the C++ compiler. set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES - LANGUAGE C - ) - add_library(clang_rt.tsan-x86_64 STATIC - ${TSAN_SOURCES} - ${TSAN_ASM_SOURCES} - $<TARGET_OBJECTS:RTInterception.x86_64> - $<TARGET_OBJECTS:RTSanitizerCommon.x86_64> - ) - set_target_compile_flags(clang_rt.tsan-x86_64 - ${TSAN_CFLAGS} ${TARGET_x86_64_CFLAGS} - ) - list(APPEND TSAN_RUNTIME_LIBRARIES clang_rt.tsan-x86_64) -endif() - -if(TSAN_RUNTIME_LIBRARIES) - set_property(TARGET ${TSAN_RUNTIME_LIBRARIES} APPEND PROPERTY - COMPILE_DEFINITIONS ${TSAN_COMMON_DEFINITIONS}) - add_clang_compiler_rt_libraries(${TSAN_RUNTIME_LIBRARIES}) + LANGUAGE C) + set(arch "x86_64") + add_compiler_rt_static_runtime(clang_rt.tsan-${arch} ${arch} + SOURCES ${TSAN_SOURCES} ${TSAN_ASM_SOURCES} + $<TARGET_OBJECTS:RTInterception.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + CFLAGS ${TSAN_CFLAGS} + DEFS ${TSAN_COMMON_DEFINITIONS} + SYMS tsan.syms) + list(APPEND TSAN_RUNTIME_LIBRARIES clang_rt.tsan-${arch}) endif() diff --git a/lib/tsan/rtl/Makefile.mk b/lib/tsan/rtl/Makefile.mk index a6a7fc8b86e8..2687123f731d 100644 --- a/lib/tsan/rtl/Makefile.mk +++ b/lib/tsan/rtl/Makefile.mk @@ -19,7 +19,7 @@ Implementation := Generic # FIXME: use automatic dependencies? Dependencies := $(wildcard $(Dir)/*.h) Dependencies += $(wildcard $(Dir)/../../interception/*.h) -Dependencies += $(wildcard $(Dir)/../../interception/mach_override/*.h) +Dependencies += $(wildcard $(Dir)/../../sanitizer_common/*.h) # Define a convenience variable for all the tsan functions. TsanFunctions += $(Sources:%.cc=%) $(AsmSources:%.S=%) diff --git a/lib/tsan/rtl/Makefile.old b/lib/tsan/rtl/Makefile.old index f522ec6b47d7..33944ffe9675 100644 --- a/lib/tsan/rtl/Makefile.old +++ b/lib/tsan/rtl/Makefile.old @@ -1,16 +1,14 @@ CXXFLAGS = -fPIE -g -Wall -Werror -fno-builtin -DTSAN_DEBUG=$(DEBUG) -DSANITIZER_DEBUG=$(DEBUG) +CLANG=clang ifeq ($(DEBUG), 0) CXXFLAGS += -O3 endif -ifeq ($(CXX), clang++) - CXXFLAGS+= -Wgnu -endif # For interception. FIXME: move interception one level higher. INTERCEPTION=../../interception COMMON=../../sanitizer_common INCLUDES= -I../.. -I../../../include -EXTRA_CXXFLAGS=-fno-exceptions +EXTRA_CXXFLAGS=-fno-exceptions -fno-rtti NO_SYSROOT=--sysroot=. CXXFLAGS+=$(EXTRA_CXXFLAGS) CXXFLAGS+=$(CFLAGS) @@ -21,7 +19,7 @@ ifeq ($(CXX), g++) endif # CXX=g++ endif # DEBUG=0 -ifeq ($(CXX), clang++) +ifeq ($(CXX), $(CLANG)++) # Global constructors are banned. CXXFLAGS+=-Wglobal-constructors endif diff --git a/lib/tsan/rtl/tsan.syms b/lib/tsan/rtl/tsan.syms new file mode 100644 index 000000000000..4464a0a231c9 --- /dev/null +++ b/lib/tsan/rtl/tsan.syms @@ -0,0 +1,5 @@ +{ + __tsan_*; + __sanitizer_syscall_pre_*; + __sanitizer_syscall_post_*; +}; diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h index e0c04733f0a3..7150e2e255d8 100644 --- a/lib/tsan/rtl/tsan_defs.h +++ b/lib/tsan/rtl/tsan_defs.h @@ -28,16 +28,19 @@ namespace __tsan { const bool kGoMode = true; const bool kCppMode = false; const char *const kTsanOptionsEnv = "GORACE"; +// Go linker does not support weak symbols. +#define CPP_WEAK #else const bool kGoMode = false; const bool kCppMode = true; const char *const kTsanOptionsEnv = "TSAN_OPTIONS"; +#define CPP_WEAK WEAK #endif const int kTidBits = 13; const unsigned kMaxTid = 1 << kTidBits; const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit. -const int kClkBits = 43; +const int kClkBits = 42; #ifndef TSAN_GO const int kShadowStackSize = 4 * 1024; const int kTraceStackSize = 256; @@ -153,13 +156,13 @@ struct MD5Hash { MD5Hash md5_hash(const void *data, uptr size); struct ThreadState; -struct ThreadContext; struct Context; struct ReportStack; class ReportDesc; class RegionAlloc; class StackTrace; struct MBlock; +struct Suppression; } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_fd.cc b/lib/tsan/rtl/tsan_fd.cc index ef375a4d98f6..14bdbb53b322 100644 --- a/lib/tsan/rtl/tsan_fd.cc +++ b/lib/tsan/rtl/tsan_fd.cc @@ -74,13 +74,14 @@ static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) { uptr l1 = atomic_load(pl1, memory_order_consume); if (l1 == 0) { uptr size = kTableSizeL2 * sizeof(FdDesc); - void *p = internal_alloc(MBlockFD, size); + // We need this to reside in user memory to properly catch races on it. + void *p = user_alloc(thr, pc, size); internal_memset(p, 0, size); MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size); if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel)) l1 = (uptr)p; else - internal_free(p); + user_free(thr, pc, p); } return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT } @@ -150,7 +151,7 @@ void FdAcquire(ThreadState *thr, uptr pc, int fd) { FdDesc *d = fddesc(thr, pc, fd); FdSync *s = d->sync; DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s); - MemoryRead8Byte(thr, pc, (uptr)d); + MemoryRead(thr, pc, (uptr)d, kSizeLog8); if (s) Acquire(thr, pc, (uptr)s); } @@ -161,20 +162,20 @@ void FdRelease(ThreadState *thr, uptr pc, int fd) { DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s); if (s) Release(thr, pc, (uptr)s); - MemoryRead8Byte(thr, pc, (uptr)d); + MemoryRead(thr, pc, (uptr)d, kSizeLog8); } void FdAccess(ThreadState *thr, uptr pc, int fd) { DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd); FdDesc *d = fddesc(thr, pc, fd); - MemoryRead8Byte(thr, pc, (uptr)d); + MemoryRead(thr, pc, (uptr)d, kSizeLog8); } void FdClose(ThreadState *thr, uptr pc, int fd) { DPrintf("#%d: FdClose(%d)\n", thr->tid, fd); FdDesc *d = fddesc(thr, pc, fd); // To catch races between fd usage and close. - MemoryWrite8Byte(thr, pc, (uptr)d); + MemoryWrite(thr, pc, (uptr)d, kSizeLog8); // We need to clear it, because if we do not intercept any call out there // that creates fd, we will hit false postives. MemoryResetRange(thr, pc, (uptr)d, 8); @@ -193,7 +194,7 @@ void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) { DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd); // Ignore the case when user dups not yet connected socket. FdDesc *od = fddesc(thr, pc, oldfd); - MemoryRead8Byte(thr, pc, (uptr)od); + MemoryRead(thr, pc, (uptr)od, kSizeLog8); FdClose(thr, pc, newfd); init(thr, pc, newfd, ref(od->sync)); } diff --git a/lib/tsan/rtl/tsan_flags.cc b/lib/tsan/rtl/tsan_flags.cc index 88c4bb6a2e44..c062592f482d 100644 --- a/lib/tsan/rtl/tsan_flags.cc +++ b/lib/tsan/rtl/tsan_flags.cc @@ -45,15 +45,19 @@ void InitializeFlags(Flags *f, const char *env) { f->report_thread_leaks = true; f->report_destroy_locked = true; f->report_signal_unsafe = true; + f->report_atomic_races = true; f->force_seq_cst_atomics = false; f->strip_path_prefix = ""; f->suppressions = ""; + f->print_suppressions = false; + f->print_benign = false; f->exitcode = 66; f->log_path = "stderr"; f->atexit_sleep_ms = 1000; f->verbosity = 0; f->profile_memory = ""; f->flush_memory_ms = 0; + f->flush_symbolizer_ms = 5000; f->stop_on_start = false; f->running_on_valgrind = false; f->external_symbolizer_path = ""; @@ -72,15 +76,19 @@ void InitializeFlags(Flags *f, const char *env) { ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks"); ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked"); ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe"); + ParseFlag(env, &f->report_atomic_races, "report_atomic_races"); ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics"); ParseFlag(env, &f->strip_path_prefix, "strip_path_prefix"); ParseFlag(env, &f->suppressions, "suppressions"); + ParseFlag(env, &f->print_suppressions, "print_suppressions"); + ParseFlag(env, &f->print_benign, "print_benign"); ParseFlag(env, &f->exitcode, "exitcode"); ParseFlag(env, &f->log_path, "log_path"); ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms"); ParseFlag(env, &f->verbosity, "verbosity"); ParseFlag(env, &f->profile_memory, "profile_memory"); ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms"); + ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms"); ParseFlag(env, &f->stop_on_start, "stop_on_start"); ParseFlag(env, &f->external_symbolizer_path, "external_symbolizer_path"); ParseFlag(env, &f->history_size, "history_size"); diff --git a/lib/tsan/rtl/tsan_flags.h b/lib/tsan/rtl/tsan_flags.h index 6547911ec7a3..aaacd98a6223 100644 --- a/lib/tsan/rtl/tsan_flags.h +++ b/lib/tsan/rtl/tsan_flags.h @@ -43,6 +43,8 @@ struct Flags { // Report violations of async signal-safety // (e.g. malloc() call from a signal handler). bool report_signal_unsafe; + // Report races between atomic and plain memory accesses. + bool report_atomic_races; // If set, all atomics are effectively sequentially consistent (seq_cst), // regardless of what user actually specified. bool force_seq_cst_atomics; @@ -50,6 +52,10 @@ struct Flags { const char *strip_path_prefix; // Suppressions filename. const char *suppressions; + // Print matched suppressions at exit. + bool print_suppressions; + // Print matched "benign" races at exit. + bool print_benign; // Override exit status if something was reported. int exitcode; // Write logs to "log_path.pid". @@ -65,6 +71,8 @@ struct Flags { const char *profile_memory; // Flush shadow memory every X ms. int flush_memory_ms; + // Flush symbolizer caches every X ms. + int flush_symbolizer_ms; // Stops on start until __tsan_resume() is called (for debugging). bool stop_on_start; // Controls whether RunningOnValgrind() returns true or false. @@ -86,6 +94,6 @@ struct Flags { Flags *flags(); void InitializeFlags(Flags *flags, const char *env); -} +} // namespace __tsan #endif // TSAN_FLAGS_H diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc index be58ca92cf91..f18b26f6abe4 100644 --- a/lib/tsan/rtl/tsan_interceptors.cc +++ b/lib/tsan/rtl/tsan_interceptors.cc @@ -10,11 +10,13 @@ // This file is a part of ThreadSanitizer (TSan), a race detector. // // FIXME: move as many interceptors as possible into -// sanitizer_common/sanitizer_common_interceptors.h +// sanitizer_common/sanitizer_common_interceptors.inc //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_linux.h" +#include "sanitizer_common/sanitizer_platform_limits_posix.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "interception/interception.h" @@ -26,18 +28,21 @@ using namespace __tsan; // NOLINT -const int kSigCount = 128; +const int kSigCount = 64; struct my_siginfo_t { - int opaque[128]; + // The size is determined by looking at sizeof of real siginfo_t on linux. + u64 opaque[128 / sizeof(u64)]; }; struct sigset_t { - u64 val[1024 / 8 / sizeof(u64)]; + // The size is determined by looking at sizeof of real sigset_t on linux. + u64 val[128 / sizeof(u64)]; }; struct ucontext_t { - uptr opaque[117]; + // The size is determined by looking at sizeof of real ucontext_t on linux. + u64 opaque[936 / sizeof(u64) + 1]; }; extern "C" int pthread_attr_init(void *attr); @@ -53,9 +58,13 @@ extern "C" int pthread_sigmask(int how, const sigset_t *set, sigset_t *oldset); extern "C" int sigfillset(sigset_t *set); extern "C" void *pthread_self(); extern "C" void _exit(int status); -extern "C" int __cxa_atexit(void (*func)(void *arg), void *arg, void *dso); extern "C" int *__errno_location(); extern "C" int fileno_unlocked(void *stream); +extern "C" void *__libc_malloc(uptr size); +extern "C" void *__libc_calloc(uptr size, uptr n); +extern "C" void *__libc_realloc(void *ptr, uptr size); +extern "C" void __libc_free(void *ptr); +extern "C" int mallopt(int param, int value); const int PTHREAD_MUTEX_RECURSIVE = 1; const int PTHREAD_MUTEX_RECURSIVE_NP = 1; const int kPthreadAttrSize = 56; @@ -83,11 +92,6 @@ typedef void (*sighandler_t)(int sig); #define errno (*__errno_location()) -union pthread_attr_t { - char size[kPthreadAttrSize]; - void *align; -}; - struct sigaction_t { union { sighandler_t sa_handler; @@ -124,7 +128,7 @@ struct SignalContext { int pending_signal_count; SignalDesc pending_signals[kSigCount]; }; -} +} // namespace __tsan static SignalContext *SigCtx(ThreadState *thr) { SignalContext *ctx = (SignalContext*)thr->signal_ctx; @@ -240,12 +244,15 @@ class AtExitContext { typedef void(*atexit_t)(); - int atexit(ThreadState *thr, uptr pc, atexit_t f) { + int atexit(ThreadState *thr, uptr pc, bool is_on_exit, + atexit_t f, void *arg) { Lock l(&mtx_); if (pos_ == kMaxAtExit) return 1; Release(thr, pc, (uptr)this); stack_[pos_] = f; + args_[pos_] = arg; + is_on_exits_[pos_] = is_on_exit; pos_++; return 0; } @@ -254,11 +261,15 @@ class AtExitContext { CHECK_EQ(thr->in_rtl, 0); for (;;) { atexit_t f = 0; + void *arg = 0; + bool is_on_exit = false; { Lock l(&mtx_); if (pos_) { pos_--; f = stack_[pos_]; + arg = args_[pos_]; + is_on_exit = is_on_exits_[pos_]; ScopedInRtl in_rtl; Acquire(thr, pc, (uptr)this); } @@ -267,7 +278,10 @@ class AtExitContext { break; DPrintf("#%d: executing atexit func %p\n", thr->tid, f); CHECK_EQ(thr->in_rtl, 0); - f(); + if (is_on_exit) + ((void(*)(int status, void *arg))f)(0, arg); + else + ((void(*)(void *arg, void *dso))f)(arg, 0); } } @@ -275,42 +289,133 @@ class AtExitContext { static const int kMaxAtExit = 128; Mutex mtx_; atexit_t stack_[kMaxAtExit]; + void *args_[kMaxAtExit]; + bool is_on_exits_[kMaxAtExit]; int pos_; }; static AtExitContext *atexit_ctx; -static void finalize(void *arg) { - ThreadState * thr = cur_thread(); - uptr pc = 0; - atexit_ctx->exit(thr, pc); - { - ScopedInRtl in_rtl; - DestroyAndFree(atexit_ctx); +TSAN_INTERCEPTOR(int, atexit, void (*f)()) { + if (cur_thread()->in_symbolizer) + return 0; + SCOPED_TSAN_INTERCEPTOR(atexit, f); + return atexit_ctx->atexit(thr, pc, false, (void(*)())f, 0); +} + +TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) { + if (cur_thread()->in_symbolizer) + return 0; + SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg); + return atexit_ctx->atexit(thr, pc, true, (void(*)())f, arg); +} + +TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) { + if (cur_thread()->in_symbolizer) + return 0; + SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso); + if (dso) + return REAL(__cxa_atexit)(f, arg, dso); + return atexit_ctx->atexit(thr, pc, false, (void(*)())f, arg); +} + +// Cleanup old bufs. +static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) { + for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { + JmpBuf *buf = &thr->jmp_bufs[i]; + if (buf->sp <= sp) { + uptr sz = thr->jmp_bufs.Size(); + thr->jmp_bufs[i] = thr->jmp_bufs[sz - 1]; + thr->jmp_bufs.PopBack(); + i--; + } } - int status = Finalize(cur_thread()); - if (status) - _exit(status); } -TSAN_INTERCEPTOR(int, atexit, void (*f)()) { - SCOPED_TSAN_INTERCEPTOR(atexit, f); - return atexit_ctx->atexit(thr, pc, f); +static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) { + if (thr->shadow_stack_pos == 0) // called from libc guts during bootstrap + return; + // Cleanup old bufs. + JmpBufGarbageCollect(thr, sp); + // Remember the buf. + JmpBuf *buf = thr->jmp_bufs.PushBack(); + buf->sp = sp; + buf->mangled_sp = mangled_sp; + buf->shadow_stack_pos = thr->shadow_stack_pos; +} + +static void LongJmp(ThreadState *thr, uptr *env) { + uptr mangled_sp = env[6]; + // Find the saved buf by mangled_sp. + for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { + JmpBuf *buf = &thr->jmp_bufs[i]; + if (buf->mangled_sp == mangled_sp) { + CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos); + // Unwind the stack. + while (thr->shadow_stack_pos > buf->shadow_stack_pos) + FuncExit(thr); + JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp + return; + } + } + Printf("ThreadSanitizer: can't find longjmp buf\n"); + CHECK(0); } -TSAN_INTERCEPTOR(void, longjmp, void *env, int val) { - SCOPED_TSAN_INTERCEPTOR(longjmp, env, val); - Printf("ThreadSanitizer: longjmp() is not supported\n"); - Die(); +extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) { + ScopedInRtl in_rtl; + SetJmp(cur_thread(), sp, mangled_sp); } -TSAN_INTERCEPTOR(void, siglongjmp, void *env, int val) { - SCOPED_TSAN_INTERCEPTOR(siglongjmp, env, val); - Printf("ThreadSanitizer: siglongjmp() is not supported\n"); - Die(); +// Not called. Merely to satisfy TSAN_INTERCEPT(). +extern "C" int __interceptor_setjmp(void *env) { + CHECK(0); + return 0; +} + +extern "C" int __interceptor__setjmp(void *env) { + CHECK(0); + return 0; +} + +extern "C" int __interceptor_sigsetjmp(void *env) { + CHECK(0); + return 0; +} + +extern "C" int __interceptor___sigsetjmp(void *env) { + CHECK(0); + return 0; +} + +extern "C" int setjmp(void *env); +extern "C" int _setjmp(void *env); +extern "C" int sigsetjmp(void *env); +extern "C" int __sigsetjmp(void *env); +DEFINE_REAL(int, setjmp, void *env) +DEFINE_REAL(int, _setjmp, void *env) +DEFINE_REAL(int, sigsetjmp, void *env) +DEFINE_REAL(int, __sigsetjmp, void *env) + +TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) { + { + SCOPED_TSAN_INTERCEPTOR(longjmp, env, val); + } + LongJmp(cur_thread(), env); + REAL(longjmp)(env, val); +} + +TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) { + { + SCOPED_TSAN_INTERCEPTOR(siglongjmp, env, val); + } + LongJmp(cur_thread(), env); + REAL(siglongjmp)(env, val); } TSAN_INTERCEPTOR(void*, malloc, uptr size) { + if (cur_thread()->in_symbolizer) + return __libc_malloc(size); void *p = 0; { SCOPED_INTERCEPTOR_RAW(malloc, size); @@ -326,17 +431,23 @@ TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) { } TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) { + if (cur_thread()->in_symbolizer) + return __libc_calloc(size, n); + if (__sanitizer::CallocShouldReturnNullDueToOverflow(size, n)) return 0; void *p = 0; { SCOPED_INTERCEPTOR_RAW(calloc, size, n); p = user_alloc(thr, pc, n * size); - if (p) internal_memset(p, 0, n * size); + if (p) + internal_memset(p, 0, n * size); } invoke_malloc_hook(p, n * size); return p; } TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) { + if (cur_thread()->in_symbolizer) + return __libc_realloc(p, size); if (p) invoke_free_hook(p); { @@ -350,6 +461,8 @@ TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) { TSAN_INTERCEPTOR(void, free, void *p) { if (p == 0) return; + if (cur_thread()->in_symbolizer) + return __libc_free(p); invoke_free_hook(p); SCOPED_INTERCEPTOR_RAW(free, p); user_free(thr, pc, p); @@ -358,12 +471,21 @@ TSAN_INTERCEPTOR(void, free, void *p) { TSAN_INTERCEPTOR(void, cfree, void *p) { if (p == 0) return; + if (cur_thread()->in_symbolizer) + return __libc_free(p); invoke_free_hook(p); SCOPED_INTERCEPTOR_RAW(cfree, p); user_free(thr, pc, p); } +TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) { + SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p); + return user_alloc_usable_size(thr, pc, p); +} + #define OPERATOR_NEW_BODY(mangled_name) \ + if (cur_thread()->in_symbolizer) \ + return __libc_malloc(size); \ void *p = 0; \ { \ SCOPED_INTERCEPTOR_RAW(mangled_name, size); \ @@ -387,6 +509,8 @@ void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) { #define OPERATOR_DELETE_BODY(mangled_name) \ if (ptr == 0) return; \ + if (cur_thread()->in_symbolizer) \ + return __libc_free(ptr); \ invoke_free_hook(ptr); \ SCOPED_INTERCEPTOR_RAW(mangled_name, ptr); \ user_free(thr, pc, ptr); @@ -551,7 +675,9 @@ TSAN_INTERCEPTOR(void*, mmap, void *addr, long_t sz, int prot, return MAP_FAILED; void *res = REAL(mmap)(addr, sz, prot, flags, fd, off); if (res != MAP_FAILED) { - MemoryResetRange(thr, pc, (uptr)res, sz); + if (fd > 0) + FdAccess(thr, pc, fd); + MemoryRangeImitateWrite(thr, pc, (uptr)res, sz); } return res; } @@ -563,13 +689,16 @@ TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot, return MAP_FAILED; void *res = REAL(mmap64)(addr, sz, prot, flags, fd, off); if (res != MAP_FAILED) { - MemoryResetRange(thr, pc, (uptr)res, sz); + if (fd > 0) + FdAccess(thr, pc, fd); + MemoryRangeImitateWrite(thr, pc, (uptr)res, sz); } return res; } TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) { SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz); + DontNeedShadowFor((uptr)addr, sz); int res = REAL(munmap)(addr, sz); return res; } @@ -681,21 +810,21 @@ extern "C" void *__tsan_thread_start_func(void *arg) { TSAN_INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*), void * param) { SCOPED_TSAN_INTERCEPTOR(pthread_create, th, attr, callback, param); - pthread_attr_t myattr; + __sanitizer_pthread_attr_t myattr; if (attr == 0) { pthread_attr_init(&myattr); attr = &myattr; } int detached = 0; pthread_attr_getdetachstate(attr, &detached); - uptr stacksize = 0; - pthread_attr_getstacksize(attr, &stacksize); - // We place the huge ThreadState object into TLS, account for that. - const uptr minstacksize = GetTlsSize() + 128*1024; - if (stacksize < minstacksize) { - DPrintf("ThreadSanitizer: stacksize %zu->%zu\n", stacksize, minstacksize); - pthread_attr_setstacksize(attr, minstacksize); - } + +#if defined(TSAN_DEBUG_OUTPUT) + int verbosity = (TSAN_DEBUG_OUTPUT); +#else + int verbosity = 0; +#endif + AdjustStackSizeLinux(attr, verbosity); + ThreadParam p; p.callback = callback; p.param = param; @@ -960,14 +1089,14 @@ TSAN_INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) { TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) { SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count); - MemoryWrite1Byte(thr, pc, (uptr)b); + MemoryWrite(thr, pc, (uptr)b, kSizeLog1); int res = REAL(pthread_barrier_init)(b, a, count); return res; } TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) { SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b); - MemoryWrite1Byte(thr, pc, (uptr)b); + MemoryWrite(thr, pc, (uptr)b, kSizeLog1); int res = REAL(pthread_barrier_destroy)(b); return res; } @@ -975,9 +1104,9 @@ TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) { TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) { SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b); Release(thr, pc, (uptr)b); - MemoryRead1Byte(thr, pc, (uptr)b); + MemoryRead(thr, pc, (uptr)b, kSizeLog1); int res = REAL(pthread_barrier_wait)(b); - MemoryRead1Byte(thr, pc, (uptr)b); + MemoryRead(thr, pc, (uptr)b, kSizeLog1); if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) { Acquire(thr, pc, (uptr)b); } @@ -1064,6 +1193,74 @@ TSAN_INTERCEPTOR(int, sem_getvalue, void *s, int *sval) { return res; } +TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__xstat, version, path, buf); + return REAL(__xstat)(version, path, buf); +} + +TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__xstat, 0, path, buf); + return REAL(__xstat)(0, path, buf); +} + +TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__xstat64, version, path, buf); + return REAL(__xstat64)(version, path, buf); +} + +TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__xstat64, 0, path, buf); + return REAL(__xstat64)(0, path, buf); +} + +TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__lxstat, version, path, buf); + return REAL(__lxstat)(version, path, buf); +} + +TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__lxstat, 0, path, buf); + return REAL(__lxstat)(0, path, buf); +} + +TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__lxstat64, version, path, buf); + return REAL(__lxstat64)(version, path, buf); +} + +TSAN_INTERCEPTOR(int, lstat64, const char *path, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__lxstat64, 0, path, buf); + return REAL(__lxstat64)(0, path, buf); +} + +TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf); + if (fd > 0) + FdAccess(thr, pc, fd); + return REAL(__fxstat)(version, fd, buf); +} + +TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf); + if (fd > 0) + FdAccess(thr, pc, fd); + return REAL(__fxstat)(0, fd, buf); +} + +TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf); + if (fd > 0) + FdAccess(thr, pc, fd); + return REAL(__fxstat64)(version, fd, buf); +} + +TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) { + SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf); + if (fd > 0) + FdAccess(thr, pc, fd); + return REAL(__fxstat64)(0, fd, buf); +} + TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) { SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode); int fd = REAL(open)(name, flags, mode); @@ -1179,6 +1376,22 @@ TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) { return res; } +TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) { + SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen); + int res = REAL(bind)(fd, addr, addrlen); + if (fd > 0 && res == 0) + FdAccess(thr, pc, fd); + return res; +} + +TSAN_INTERCEPTOR(int, listen, int fd, int backlog) { + SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog); + int res = REAL(listen)(fd, backlog); + if (fd > 0 && res == 0) + FdAccess(thr, pc, fd); + return res; +} + TSAN_INTERCEPTOR(int, accept, int fd, void *addr, unsigned *addrlen) { SCOPED_TSAN_INTERCEPTOR(accept, fd, addr, addrlen); int fd2 = REAL(accept)(fd, addr, addrlen); @@ -1225,6 +1438,18 @@ TSAN_INTERCEPTOR(int, __close, int fd) { return REAL(__close)(fd); } +// glibc guts +TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) { + SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr); + int fds[64]; + int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds)); + for (int i = 0; i < cnt; i++) { + if (fds[i] > 0) + FdClose(thr, pc, fds[i]); + } + REAL(__res_iclose)(state, free_addr); +} + TSAN_INTERCEPTOR(int, pipe, int *pipefd) { SCOPED_TSAN_INTERCEPTOR(pipe, pipefd); int res = REAL(pipe)(pipefd); @@ -1373,6 +1598,17 @@ TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) { return REAL(fwrite)(p, size, nmemb, f); } +TSAN_INTERCEPTOR(int, fflush, void *stream) { + SCOPED_TSAN_INTERCEPTOR(fflush, stream); + return REAL(fflush)(stream); +} + +TSAN_INTERCEPTOR(void, abort, int fake) { + SCOPED_TSAN_INTERCEPTOR(abort, fake); + REAL(fflush)(0); + REAL(abort)(fake); +} + TSAN_INTERCEPTOR(int, puts, const char *s) { SCOPED_TSAN_INTERCEPTOR(puts, s); MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s), false); @@ -1420,7 +1656,7 @@ TSAN_INTERCEPTOR(int, poll, void *fds, long_t nfds, int timeout) { return res; } -static void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig, +void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig, my_siginfo_t *info, void *ctx) { ThreadState *thr = cur_thread(); SignalContext *sctx = SigCtx(thr); @@ -1433,7 +1669,6 @@ static void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig, // (but check if we are in a recursive interceptor, // i.e. pthread_join()->munmap()). (sctx && sctx->in_blocking_func == 1 && thr->in_rtl == 1)) { - CHECK(thr->in_rtl == 0 || thr->in_rtl == 1); int in_rtl = thr->in_rtl; thr->in_rtl = 0; CHECK_EQ(thr->in_signal_handler, false); @@ -1519,11 +1754,11 @@ TSAN_INTERCEPTOR(int, kill, int pid, int sig) { SignalContext *sctx = SigCtx(thr); CHECK_NE(sctx, 0); int prev = sctx->int_signal_send; - if (pid == GetPid()) { + if (pid == (int)internal_getpid()) { sctx->int_signal_send = sig; } int res = REAL(kill)(pid, sig); - if (pid == GetPid()) { + if (pid == (int)internal_getpid()) { CHECK_EQ(sctx->int_signal_send, sig); sctx->int_signal_send = prev; } @@ -1600,6 +1835,13 @@ struct TsanInterceptorContext { const uptr pc; }; +#include "sanitizer_common/sanitizer_platform_interceptors.h" +// Causes interceptor recursion (getpwuid_r() calls fopen()) +#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS +#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS +// Causes interceptor recursion (glob64() calls lstat64()) +#undef SANITIZER_INTERCEPT_GLOB + #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ MemoryAccessRange(((TsanInterceptorContext*)ctx)->thr, \ ((TsanInterceptorContext*)ctx)->pc, \ @@ -1621,6 +1863,13 @@ struct TsanInterceptorContext { ThreadSetName(((TsanInterceptorContext*)ctx)->thr, name) #include "sanitizer_common/sanitizer_common_interceptors.inc" +// FIXME: Implement these with MemoryAccessRange(). +#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) +#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) +#define COMMON_SYSCALL_POST_READ_RANGE(p, s) +#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) +#include "sanitizer_common/sanitizer_common_syscalls.inc" + namespace __tsan { void ProcessPendingSignals(ThreadState *thr) { @@ -1655,7 +1904,7 @@ void ProcessPendingSignals(ThreadState *thr) { (uptr)sigactions[sig].sa_sigaction : (uptr)sigactions[sig].sa_handler; stack.Init(&pc, 1); - Lock l(&ctx->thread_mtx); + ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeErrnoInSignal); if (!IsFiredSuppression(ctx, rep, stack)) { rep.AddStack(&stack); @@ -1671,6 +1920,16 @@ void ProcessPendingSignals(ThreadState *thr) { thr->in_signal_handler = false; } +static void finalize(void *arg) { + ThreadState * thr = cur_thread(); + uptr pc = 0; + atexit_ctx->exit(thr, pc); + int status = Finalize(cur_thread()); + REAL(fflush)(0); + if (status) + _exit(status); +} + static void unreachable() { Printf("FATAL: ThreadSanitizer: unreachable called\n"); Die(); @@ -1684,8 +1943,16 @@ void InitializeInterceptors() { REAL(memcpy) = internal_memcpy; REAL(memcmp) = internal_memcmp; + // Instruct libc malloc to consume less memory. + mallopt(1, 0); // M_MXFAST + mallopt(-3, 32*1024); // M_MMAP_THRESHOLD + SANITIZER_COMMON_INTERCEPTORS_INIT; + TSAN_INTERCEPT(setjmp); + TSAN_INTERCEPT(_setjmp); + TSAN_INTERCEPT(sigsetjmp); + TSAN_INTERCEPT(__sigsetjmp); TSAN_INTERCEPT(longjmp); TSAN_INTERCEPT(siglongjmp); @@ -1767,6 +2034,18 @@ void InitializeInterceptors() { TSAN_INTERCEPT(sem_post); TSAN_INTERCEPT(sem_getvalue); + TSAN_INTERCEPT(stat); + TSAN_INTERCEPT(__xstat); + TSAN_INTERCEPT(stat64); + TSAN_INTERCEPT(__xstat64); + TSAN_INTERCEPT(lstat); + TSAN_INTERCEPT(__lxstat); + TSAN_INTERCEPT(lstat64); + TSAN_INTERCEPT(__lxstat64); + TSAN_INTERCEPT(fstat); + TSAN_INTERCEPT(__fxstat); + TSAN_INTERCEPT(fstat64); + TSAN_INTERCEPT(__fxstat64); TSAN_INTERCEPT(open); TSAN_INTERCEPT(open64); TSAN_INTERCEPT(creat); @@ -1781,11 +2060,15 @@ void InitializeInterceptors() { TSAN_INTERCEPT(socket); TSAN_INTERCEPT(socketpair); TSAN_INTERCEPT(connect); + TSAN_INTERCEPT(bind); + TSAN_INTERCEPT(listen); TSAN_INTERCEPT(accept); TSAN_INTERCEPT(accept4); TSAN_INTERCEPT(epoll_create); TSAN_INTERCEPT(epoll_create1); TSAN_INTERCEPT(close); + TSAN_INTERCEPT(__close); + TSAN_INTERCEPT(__res_iclose); TSAN_INTERCEPT(pipe); TSAN_INTERCEPT(pipe2); @@ -1804,6 +2087,8 @@ void InitializeInterceptors() { TSAN_INTERCEPT(fclose); TSAN_INTERCEPT(fread); TSAN_INTERCEPT(fwrite); + TSAN_INTERCEPT(fflush); + TSAN_INTERCEPT(abort); TSAN_INTERCEPT(puts); TSAN_INTERCEPT(rmdir); TSAN_INTERCEPT(opendir); @@ -1828,6 +2113,8 @@ void InitializeInterceptors() { TSAN_INTERCEPT(munlockall); TSAN_INTERCEPT(fork); + TSAN_INTERCEPT(on_exit); + TSAN_INTERCEPT(__cxa_atexit); // Need to setup it, because interceptors check that the function is resolved. // But atexit is emitted directly into the module, so can't be resolved. @@ -1835,7 +2122,7 @@ void InitializeInterceptors() { atexit_ctx = new(internal_alloc(MBlockAtExit, sizeof(AtExitContext))) AtExitContext(); - if (__cxa_atexit(&finalize, 0, 0)) { + if (REAL(__cxa_atexit)(&finalize, 0, 0)) { Printf("ThreadSanitizer: failed to setup atexit callback\n"); Die(); } diff --git a/lib/tsan/rtl/tsan_interface.cc b/lib/tsan/rtl/tsan_interface.cc index 6d0954602ff7..efad8c192d6e 100644 --- a/lib/tsan/rtl/tsan_interface.cc +++ b/lib/tsan/rtl/tsan_interface.cc @@ -14,23 +14,73 @@ #include "tsan_interface.h" #include "tsan_interface_ann.h" #include "tsan_rtl.h" +#include "sanitizer_common/sanitizer_internal_defs.h" #define CALLERPC ((uptr)__builtin_return_address(0)) using namespace __tsan; // NOLINT +typedef u16 uint16_t; +typedef u32 uint32_t; +typedef u64 uint64_t; + void __tsan_init() { Initialize(cur_thread()); } void __tsan_read16(void *addr) { - MemoryRead8Byte(cur_thread(), CALLERPC, (uptr)addr); - MemoryRead8Byte(cur_thread(), CALLERPC, (uptr)addr + 8); + MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8); + MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8); } void __tsan_write16(void *addr) { - MemoryWrite8Byte(cur_thread(), CALLERPC, (uptr)addr); - MemoryWrite8Byte(cur_thread(), CALLERPC, (uptr)addr + 8); + MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8); + MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8); +} + +u16 __tsan_unaligned_read2(void *addr) { + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false); + return *(u16*)addr; +} + +u32 __tsan_unaligned_read4(void *addr) { + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false); + return *(u32*)addr; +} + +u64 __tsan_unaligned_read8(void *addr) { + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false); + return *(u64*)addr; +} + +void __tsan_unaligned_write2(void *addr, u16 v) { + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false); + *(u16*)addr = v; +} + +void __tsan_unaligned_write4(void *addr, u32 v) { + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false); + *(u32*)addr = v; +} + +void __tsan_unaligned_write8(void *addr, u64 v) { + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false); + *(u64*)addr = v; +} + +extern "C" { +uint16_t __sanitizer_unaligned_load16(void *addr) + ALIAS("__tsan_unaligned_read2") SANITIZER_INTERFACE_ATTRIBUTE; +uint32_t __sanitizer_unaligned_load32(void *addr) + ALIAS("__tsan_unaligned_read4") SANITIZER_INTERFACE_ATTRIBUTE; +uint64_t __sanitizer_unaligned_load64(void *addr) + ALIAS("__tsan_unaligned_read8") SANITIZER_INTERFACE_ATTRIBUTE; +void __sanitizer_unaligned_store16(void *addr, uint16_t v) + ALIAS("__tsan_unaligned_write2") SANITIZER_INTERFACE_ATTRIBUTE; +void __sanitizer_unaligned_store32(void *addr, uint32_t v) + ALIAS("__tsan_unaligned_write4") SANITIZER_INTERFACE_ATTRIBUTE; +void __sanitizer_unaligned_store64(void *addr, uint64_t v) + ALIAS("__tsan_unaligned_write8") SANITIZER_INTERFACE_ATTRIBUTE; } void __tsan_acquire(void *addr) { diff --git a/lib/tsan/rtl/tsan_interface.h b/lib/tsan/rtl/tsan_interface.h index 7480fc893f2d..457fb55e0d2d 100644 --- a/lib/tsan/rtl/tsan_interface.h +++ b/lib/tsan/rtl/tsan_interface.h @@ -16,7 +16,7 @@ #ifndef TSAN_INTERFACE_H #define TSAN_INTERFACE_H -#include <sanitizer/common_interface_defs.h> +#include <sanitizer_common/sanitizer_internal_defs.h> // This header should NOT include any other headers. // All functions in this header are extern "C" and start with __tsan_. @@ -41,6 +41,14 @@ void __tsan_write4(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; void __tsan_write8(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; void __tsan_write16(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; +u16 __tsan_unaligned_read2(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; +u32 __tsan_unaligned_read4(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; +u64 __tsan_unaligned_read8(void *addr) SANITIZER_INTERFACE_ATTRIBUTE; +void __tsan_unaligned_write2(void *addr, u16 v) SANITIZER_INTERFACE_ATTRIBUTE; +void __tsan_unaligned_write4(void *addr, u32 v) SANITIZER_INTERFACE_ATTRIBUTE; +void __tsan_unaligned_write8(void *addr, u64 v) SANITIZER_INTERFACE_ATTRIBUTE; + +void __tsan_vptr_read(void **vptr_p) SANITIZER_INTERFACE_ATTRIBUTE; void __tsan_vptr_update(void **vptr_p, void *new_val) SANITIZER_INTERFACE_ATTRIBUTE; diff --git a/lib/tsan/rtl/tsan_interface_ann.cc b/lib/tsan/rtl/tsan_interface_ann.cc index 51ebbf2266dd..04b4b455d15e 100644 --- a/lib/tsan/rtl/tsan_interface_ann.cc +++ b/lib/tsan/rtl/tsan_interface_ann.cc @@ -20,6 +20,7 @@ #include "tsan_mman.h" #include "tsan_flags.h" #include "tsan_platform.h" +#include "tsan_vector.h" #define CALLERPC ((uptr)__builtin_return_address(0)) @@ -67,6 +68,7 @@ struct ExpectRace { ExpectRace *next; ExpectRace *prev; int hitcount; + int addcount; uptr addr; uptr size; char *file; @@ -91,16 +93,19 @@ static void AddExpectRace(ExpectRace *list, char *f, int l, uptr addr, uptr size, char *desc) { ExpectRace *race = list->next; for (; race != list; race = race->next) { - if (race->addr == addr && race->size == size) + if (race->addr == addr && race->size == size) { + race->addcount++; return; + } } race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace)); - race->hitcount = 0; race->addr = addr; race->size = size; race->file = f; race->line = l; race->desc[0] = 0; + race->hitcount = 0; + race->addcount = 1; if (desc) { int i = 0; for (; i < kMaxDescLen - 1 && desc[i]; i++) @@ -155,6 +160,68 @@ bool IsExpectedReport(uptr addr, uptr size) { return false; } +static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched, + int *unique_count, int *hit_count, int ExpectRace::*counter) { + ExpectRace *list = &dyn_ann_ctx->benign; + for (ExpectRace *race = list->next; race != list; race = race->next) { + (*unique_count)++; + if (race->*counter == 0) + continue; + (*hit_count) += race->*counter; + uptr i = 0; + for (; i < matched->Size(); i++) { + ExpectRace *race0 = &(*matched)[i]; + if (race->line == race0->line + && internal_strcmp(race->file, race0->file) == 0 + && internal_strcmp(race->desc, race0->desc) == 0) { + race0->*counter += race->*counter; + break; + } + } + if (i == matched->Size()) + matched->PushBack(*race); + } +} + +void PrintMatchedBenignRaces() { + Lock lock(&dyn_ann_ctx->mtx); + int unique_count = 0; + int hit_count = 0; + int add_count = 0; + Vector<ExpectRace> hit_matched(MBlockScopedBuf); + CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count, + &ExpectRace::hitcount); + Vector<ExpectRace> add_matched(MBlockScopedBuf); + CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count, + &ExpectRace::addcount); + if (hit_matched.Size()) { + Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n", + hit_count, (int)internal_getpid()); + for (uptr i = 0; i < hit_matched.Size(); i++) { + Printf("%d %s:%d %s\n", + hit_matched[i].hitcount, hit_matched[i].file, + hit_matched[i].line, hit_matched[i].desc); + } + } + if (hit_matched.Size()) { + Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique" + " (pid=%d):\n", + add_count, unique_count, (int)internal_getpid()); + for (uptr i = 0; i < add_matched.Size(); i++) { + Printf("%d %s:%d %s\n", + add_matched[i].addcount, add_matched[i].file, + add_matched[i].line, add_matched[i].desc); + } + } +} + +static void ReportMissedExpectedRace(ExpectRace *race) { + Printf("==================\n"); + Printf("WARNING: ThreadSanitizer: missed expected data race\n"); + Printf(" %s addr=%zx %s:%d\n", + race->desc, race->addr, race->file, race->line); + Printf("==================\n"); +} } // namespace __tsan using namespace __tsan; // NOLINT @@ -237,14 +304,6 @@ void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) { SCOPED_ANNOTATION(AnnotateNoOp); } -static void ReportMissedExpectedRace(ExpectRace *race) { - Printf("==================\n"); - Printf("WARNING: ThreadSanitizer: missed expected data race\n"); - Printf(" %s addr=%zx %s:%d\n", - race->desc, race->addr, race->file, race->line); - Printf("==================\n"); -} - void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) { SCOPED_ANNOTATION(AnnotateFlushExpectedRaces); Lock lock(&dyn_ann_ctx->mtx); @@ -357,6 +416,9 @@ void INTERFACE_ATTRIBUTE AnnotateThreadName( ThreadSetName(thr, name); } +// We deliberately omit the implementation of WTFAnnotateHappensBefore() and +// WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate +// atomic operations, which should be handled by ThreadSanitizer correctly. void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) { SCOPED_ANNOTATION(AnnotateHappensBefore); } @@ -368,6 +430,7 @@ void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) { void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized( char *f, int l, uptr mem, uptr sz, char *desc) { SCOPED_ANNOTATION(AnnotateBenignRaceSized); + BenignRaceImpl(f, l, mem, 1, desc); } int INTERFACE_ATTRIBUTE RunningOnValgrind() { diff --git a/lib/tsan/rtl/tsan_interface_ann.h b/lib/tsan/rtl/tsan_interface_ann.h index ed809073327e..8e45328e7ec1 100644 --- a/lib/tsan/rtl/tsan_interface_ann.h +++ b/lib/tsan/rtl/tsan_interface_ann.h @@ -14,7 +14,7 @@ #ifndef TSAN_INTERFACE_ANN_H #define TSAN_INTERFACE_ANN_H -#include <sanitizer/common_interface_defs.h> +#include <sanitizer_common/sanitizer_internal_defs.h> // This header should NOT include any other headers. // All functions in this header are extern "C" and start with __tsan_. diff --git a/lib/tsan/rtl/tsan_interface_atomic.cc b/lib/tsan/rtl/tsan_interface_atomic.cc index a9d75e5bf76c..80266969849a 100644 --- a/lib/tsan/rtl/tsan_interface_atomic.cc +++ b/lib/tsan/rtl/tsan_interface_atomic.cc @@ -20,25 +20,42 @@ // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/ #include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_stacktrace.h" #include "tsan_interface_atomic.h" #include "tsan_flags.h" #include "tsan_rtl.h" using namespace __tsan; // NOLINT +#define SCOPED_ATOMIC(func, ...) \ + const uptr callpc = (uptr)__builtin_return_address(0); \ + uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \ + pc = __sanitizer::StackTrace::GetPreviousInstructionPc(pc); \ + mo = ConvertOrder(mo); \ + mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \ + ThreadState *const thr = cur_thread(); \ + AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \ + ScopedAtomic sa(thr, callpc, __FUNCTION__); \ + return Atomic##func(thr, pc, __VA_ARGS__); \ +/**/ + class ScopedAtomic { public: ScopedAtomic(ThreadState *thr, uptr pc, const char *func) : thr_(thr) { - CHECK_EQ(thr_->in_rtl, 1); // 1 due to our own ScopedInRtl member. + CHECK_EQ(thr_->in_rtl, 0); + ProcessPendingSignals(thr); + FuncEntry(thr_, pc); DPrintf("#%d: %s\n", thr_->tid, func); + thr_->in_rtl++; } ~ScopedAtomic() { - CHECK_EQ(thr_->in_rtl, 1); + thr_->in_rtl--; + CHECK_EQ(thr_->in_rtl, 0); + FuncExit(thr_); } private: ThreadState *thr_; - ScopedInRtl in_rtl_; }; // Some shortcuts. @@ -212,16 +229,19 @@ a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) { } #endif -#define SCOPED_ATOMIC(func, ...) \ - mo = ConvertOrder(mo); \ - mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \ - ThreadState *const thr = cur_thread(); \ - ProcessPendingSignals(thr); \ - const uptr pc = (uptr)__builtin_return_address(0); \ - AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \ - ScopedAtomic sa(thr, pc, __FUNCTION__); \ - return Atomic##func(thr, pc, __VA_ARGS__); \ -/**/ +template<typename T> +static int SizeLog() { + if (sizeof(T) <= 1) + return kSizeLog1; + else if (sizeof(T) <= 2) + return kSizeLog2; + else if (sizeof(T) <= 4) + return kSizeLog4; + else + return kSizeLog8; + // For 16-byte atomics we also use 8-byte memory access, + // this leads to false negatives only in very obscure cases. +} template<typename T> static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, @@ -229,14 +249,17 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, CHECK(IsLoadOrder(mo)); // This fast-path is critical for performance. // Assume the access is atomic. - if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) + if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) { + MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>()); return *a; + } SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false); thr->clock.set(thr->tid, thr->fast_state.epoch()); thr->clock.acquire(&s->clock); T v = *a; s->mtx.ReadUnlock(); __sync_synchronize(); + MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>()); return v; } @@ -244,6 +267,7 @@ template<typename T> static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { CHECK(IsStoreOrder(mo)); + MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>()); // This fast-path is critical for performance. // Assume the access is atomic. // Strictly saying even relaxed store cuts off release sequence, @@ -265,16 +289,21 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, template<typename T, T (*F)(volatile T *v, T op)> static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { - SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - if (IsAcqRelOrder(mo)) - thr->clock.acq_rel(&s->clock); - else if (IsReleaseOrder(mo)) - thr->clock.release(&s->clock); - else if (IsAcquireOrder(mo)) - thr->clock.acquire(&s->clock); + MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>()); + SyncVar *s = 0; + if (mo != mo_relaxed) { + s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); + thr->clock.set(thr->tid, thr->fast_state.epoch()); + if (IsAcqRelOrder(mo)) + thr->clock.acq_rel(&s->clock); + else if (IsReleaseOrder(mo)) + thr->clock.release(&s->clock); + else if (IsAcquireOrder(mo)) + thr->clock.acquire(&s->clock); + } v = F(a, v); - s->mtx.Unlock(); + if (s) + s->mtx.Unlock(); return v; } @@ -324,17 +353,22 @@ template<typename T> static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morder mo, morder fmo) { (void)fmo; // Unused because llvm does not pass it yet. - SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - if (IsAcqRelOrder(mo)) - thr->clock.acq_rel(&s->clock); - else if (IsReleaseOrder(mo)) - thr->clock.release(&s->clock); - else if (IsAcquireOrder(mo)) - thr->clock.acquire(&s->clock); + MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>()); + SyncVar *s = 0; + if (mo != mo_relaxed) { + s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true); + thr->clock.set(thr->tid, thr->fast_state.epoch()); + if (IsAcqRelOrder(mo)) + thr->clock.acq_rel(&s->clock); + else if (IsReleaseOrder(mo)) + thr->clock.release(&s->clock); + else if (IsAcquireOrder(mo)) + thr->clock.acquire(&s->clock); + } T cc = *c; T pr = func_cas(a, cc, v); - s->mtx.Unlock(); + if (s) + s->mtx.Unlock(); if (pr == cc) return true; *c = pr; diff --git a/lib/tsan/rtl/tsan_interface_inl.h b/lib/tsan/rtl/tsan_interface_inl.h index 8a92155d57ef..0187e49d96e5 100644 --- a/lib/tsan/rtl/tsan_interface_inl.h +++ b/lib/tsan/rtl/tsan_interface_inl.h @@ -19,41 +19,53 @@ using namespace __tsan; // NOLINT void __tsan_read1(void *addr) { - MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 0, 0); + MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1); } void __tsan_read2(void *addr) { - MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, 0); + MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2); } void __tsan_read4(void *addr) { - MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, 0); + MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4); } void __tsan_read8(void *addr) { - MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 3, 0); + MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8); } void __tsan_write1(void *addr) { - MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 0, 1); + MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1); } void __tsan_write2(void *addr) { - MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, 1); + MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2); } void __tsan_write4(void *addr) { - MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, 1); + MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4); } void __tsan_write8(void *addr) { - MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 3, 1); + MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8); } void __tsan_vptr_update(void **vptr_p, void *new_val) { CHECK_EQ(sizeof(vptr_p), 8); - if (*vptr_p != new_val) - MemoryAccess(cur_thread(), CALLERPC, (uptr)vptr_p, 3, 1); + if (*vptr_p != new_val) { + ThreadState *thr = cur_thread(); + thr->is_vptr_access = true; + MemoryWrite(thr, CALLERPC, (uptr)vptr_p, kSizeLog8); + thr->is_vptr_access = false; + } +} + +void __tsan_vptr_read(void **vptr_p) { + CHECK_EQ(sizeof(vptr_p), 8); + ThreadState *thr = cur_thread(); + thr->is_vptr_access = true; + MemoryRead(thr, CALLERPC, (uptr)vptr_p, kSizeLog8); + thr->is_vptr_access = false; } void __tsan_func_entry(void *pc) { diff --git a/lib/tsan/rtl/tsan_interface_java.cc b/lib/tsan/rtl/tsan_interface_java.cc index e425c75800be..71e0747c3646 100644 --- a/lib/tsan/rtl/tsan_interface_java.cc +++ b/lib/tsan/rtl/tsan_interface_java.cc @@ -152,7 +152,7 @@ SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) { return 0; } -} // namespace __tsan { +} // namespace __tsan #define SCOPED_JAVA_FUNC(func) \ ThreadState *thr = cur_thread(); \ @@ -271,6 +271,7 @@ void __tsan_java_mutex_lock(jptr addr) { CHECK_GE(addr, jctx->heap_begin); CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + MutexCreate(thr, pc, addr, true, true, true); MutexLock(thr, pc, addr); } @@ -291,6 +292,7 @@ void __tsan_java_mutex_read_lock(jptr addr) { CHECK_GE(addr, jctx->heap_begin); CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + MutexCreate(thr, pc, addr, true, true, true); MutexReadLock(thr, pc, addr); } @@ -303,3 +305,25 @@ void __tsan_java_mutex_read_unlock(jptr addr) { MutexReadUnlock(thr, pc, addr); } + +void __tsan_java_mutex_lock_rec(jptr addr, int rec) { + SCOPED_JAVA_FUNC(__tsan_java_mutex_lock_rec); + DPrintf("#%d: java_mutex_lock_rec(%p, %d)\n", thr->tid, addr, rec); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + CHECK_GT(rec, 0); + + MutexCreate(thr, pc, addr, true, true, true); + MutexLock(thr, pc, addr, rec); +} + +int __tsan_java_mutex_unlock_rec(jptr addr) { + SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock_rec); + DPrintf("#%d: java_mutex_unlock_rec(%p)\n", thr->tid, addr); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + + return MutexUnlock(thr, pc, addr, true); +} diff --git a/lib/tsan/rtl/tsan_interface_java.h b/lib/tsan/rtl/tsan_interface_java.h index 241483aaa015..9ac78e074bbe 100644 --- a/lib/tsan/rtl/tsan_interface_java.h +++ b/lib/tsan/rtl/tsan_interface_java.h @@ -55,8 +55,7 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE; // Mutex lock. // Addr is any unique address associated with the mutex. -// Must not be called on recursive reentry. -// Object.wait() is handled as a pair of unlock/lock. +// Can be called on recursive reentry. void __tsan_java_mutex_lock(jptr addr) INTERFACE_ATTRIBUTE; // Mutex unlock. void __tsan_java_mutex_unlock(jptr addr) INTERFACE_ATTRIBUTE; @@ -64,6 +63,16 @@ void __tsan_java_mutex_unlock(jptr addr) INTERFACE_ATTRIBUTE; void __tsan_java_mutex_read_lock(jptr addr) INTERFACE_ATTRIBUTE; // Mutex read unlock. void __tsan_java_mutex_read_unlock(jptr addr) INTERFACE_ATTRIBUTE; +// Recursive mutex lock, intended for handling of Object.wait(). +// The 'rec' value must be obtained from the previous +// __tsan_java_mutex_unlock_rec(). +void __tsan_java_mutex_lock_rec(jptr addr, int rec) INTERFACE_ATTRIBUTE; +// Recursive mutex unlock, intended for handling of Object.wait(). +// The return value says how many times this thread called lock() +// w/o a pairing unlock() (i.e. how many recursive levels it unlocked). +// It must be passed back to __tsan_java_mutex_lock_rec() to restore +// the same recursion level. +int __tsan_java_mutex_unlock_rec(jptr addr) INTERFACE_ATTRIBUTE; #ifdef __cplusplus } // extern "C" diff --git a/lib/tsan/rtl/tsan_md5.cc b/lib/tsan/rtl/tsan_md5.cc index c9d671f5b599..66e824043153 100644 --- a/lib/tsan/rtl/tsan_md5.cc +++ b/lib/tsan/rtl/tsan_md5.cc @@ -242,4 +242,4 @@ MD5Hash md5_hash(const void *data, uptr size) { MD5_Final((unsigned char*)&res.hash[0], &ctx); return res; } -} +} // namespace __tsan diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc index 82f7105d60db..b6671b1abf09 100644 --- a/lib/tsan/rtl/tsan_mman.cc +++ b/lib/tsan/rtl/tsan_mman.cc @@ -29,6 +29,41 @@ extern "C" void WEAK __tsan_free_hook(void *ptr) { namespace __tsan { +COMPILER_CHECK(sizeof(MBlock) == 16); + +void MBlock::Lock() { + atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this); + uptr v = atomic_load(a, memory_order_relaxed); + for (int iter = 0;; iter++) { + if (v & 1) { + if (iter < 10) + proc_yield(20); + else + internal_sched_yield(); + v = atomic_load(a, memory_order_relaxed); + continue; + } + if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire)) + break; + } +} + +void MBlock::Unlock() { + atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this); + uptr v = atomic_load(a, memory_order_relaxed); + DCHECK(v & 1); + atomic_store(a, v & ~1, memory_order_relaxed); +} + +struct MapUnmapCallback { + void OnMap(uptr p, uptr size) const { } + void OnUnmap(uptr p, uptr size) const { + // We are about to unmap a chunk of user memory. + // Mark the corresponding shadow memory as not needed. + DontNeedShadowFor(p, size); + } +}; + static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64); Allocator *allocator() { return reinterpret_cast<Allocator*>(&allocator_placeholder); @@ -38,8 +73,16 @@ void InitializeAllocator() { allocator()->Init(); } -void AlloctorThreadFinish(ThreadState *thr) { - allocator()->SwallowCache(&thr->alloc_cache); +void AllocatorThreadStart(ThreadState *thr) { + allocator()->InitCache(&thr->alloc_cache); +} + +void AllocatorThreadFinish(ThreadState *thr) { + allocator()->DestroyCache(&thr->alloc_cache); +} + +void AllocatorPrintStats() { + allocator()->PrintStats(); } static void SignalUnsafeCall(ThreadState *thr, uptr pc) { @@ -48,7 +91,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) { Context *ctx = CTX(); StackTrace stack; stack.ObtainCurrent(thr, pc); - Lock l(&ctx->thread_mtx); + ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeSignalUnsafe); if (!IsFiredSuppression(ctx, rep, stack)) { rep.AddStack(&stack); @@ -58,17 +101,15 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) { void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) { CHECK_GT(thr->in_rtl, 0); + if ((sz >= (1ull << 40)) || (align >= (1ull << 40))) + return 0; void *p = allocator()->Allocate(&thr->alloc_cache, sz, align); if (p == 0) return 0; MBlock *b = new(allocator()->GetMetaData(p)) MBlock; - b->size = sz; - b->head = 0; - b->alloc_tid = thr->unique_id; - b->alloc_stack_id = CurrentStackId(thr, pc); - if (CTX() && CTX()->initialized) { + b->Init(sz, thr->tid, CurrentStackId(thr, pc)); + if (CTX() && CTX()->initialized) MemoryRangeImitateWrite(thr, pc, (uptr)p, sz); - } DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p); SignalUnsafeCall(thr, pc); return p; @@ -79,9 +120,9 @@ void user_free(ThreadState *thr, uptr pc, void *p) { CHECK_NE(p, (void*)0); DPrintf("#%d: free(%p)\n", thr->tid, p); MBlock *b = (MBlock*)allocator()->GetMetaData(p); - if (b->head) { - Lock l(&b->mtx); - for (SyncVar *s = b->head; s;) { + if (b->ListHead()) { + MBlock::ScopedLock l(b); + for (SyncVar *s = b->ListHead(); s;) { SyncVar *res = s; s = s->next; StatInc(thr, StatSyncDestroyed); @@ -89,12 +130,10 @@ void user_free(ThreadState *thr, uptr pc, void *p) { res->mtx.Unlock(); DestroyAndFree(res); } - b->head = 0; + b->ListReset(); } - if (CTX() && CTX()->initialized && thr->in_rtl == 1) { - MemoryRangeFreed(thr, pc, (uptr)p, b->size); - } - b->~MBlock(); + if (CTX() && CTX()->initialized && thr->in_rtl == 1) + MemoryRangeFreed(thr, pc, (uptr)p, b->Size()); allocator()->Deallocate(&thr->alloc_cache, p); SignalUnsafeCall(thr, pc); } @@ -110,20 +149,29 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { return 0; if (p) { MBlock *b = user_mblock(thr, p); - internal_memcpy(p2, p, min(b->size, sz)); + CHECK_NE(b, 0); + internal_memcpy(p2, p, min(b->Size(), sz)); } } - if (p) { + if (p) user_free(thr, pc, p); - } return p2; } +uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) { + CHECK_GT(thr->in_rtl, 0); + if (p == 0) + return 0; + MBlock *b = (MBlock*)allocator()->GetMetaData(p); + return b ? b->Size() : 0; +} + MBlock *user_mblock(ThreadState *thr, void *p) { - CHECK_NE(p, (void*)0); + CHECK_NE(p, 0); Allocator *a = allocator(); void *b = a->GetBlockBegin(p); - CHECK_NE(b, 0); + if (b == 0) + return 0; return (MBlock*)a->GetMetaData(b); } @@ -164,3 +212,54 @@ void internal_free(void *p) { } } // namespace __tsan + +using namespace __tsan; + +extern "C" { +uptr __tsan_get_current_allocated_bytes() { + u64 stats[AllocatorStatCount]; + allocator()->GetStats(stats); + u64 m = stats[AllocatorStatMalloced]; + u64 f = stats[AllocatorStatFreed]; + return m >= f ? m - f : 1; +} + +uptr __tsan_get_heap_size() { + u64 stats[AllocatorStatCount]; + allocator()->GetStats(stats); + u64 m = stats[AllocatorStatMmapped]; + u64 f = stats[AllocatorStatUnmapped]; + return m >= f ? m - f : 1; +} + +uptr __tsan_get_free_bytes() { + return 1; +} + +uptr __tsan_get_unmapped_bytes() { + return 1; +} + +uptr __tsan_get_estimated_allocated_size(uptr size) { + return size; +} + +bool __tsan_get_ownership(void *p) { + return allocator()->GetBlockBegin(p) != 0; +} + +uptr __tsan_get_allocated_size(void *p) { + if (p == 0) + return 0; + p = allocator()->GetBlockBegin(p); + if (p == 0) + return 0; + MBlock *b = (MBlock*)allocator()->GetMetaData(p); + return b->Size(); +} + +void __tsan_on_thread_idle() { + ThreadState *thr = cur_thread(); + allocator()->SwallowCache(&thr->alloc_cache); +} +} // extern "C" diff --git a/lib/tsan/rtl/tsan_mman.h b/lib/tsan/rtl/tsan_mman.h index 5cf00eac8d03..19d555437f3e 100644 --- a/lib/tsan/rtl/tsan_mman.h +++ b/lib/tsan/rtl/tsan_mman.h @@ -20,7 +20,9 @@ namespace __tsan { const uptr kDefaultAlignment = 16; void InitializeAllocator(); -void AlloctorThreadFinish(ThreadState *thr); +void AllocatorThreadStart(ThreadState *thr); +void AllocatorThreadFinish(ThreadState *thr); +void AllocatorPrintStats(); // For user allocations. void *user_alloc(ThreadState *thr, uptr pc, uptr sz, @@ -29,6 +31,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, void user_free(ThreadState *thr, uptr pc, void *p); void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz); void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align); +uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p); // Given the pointer p into a valid allocated block, // returns the descriptor of the block. MBlock *user_mblock(ThreadState *thr, void *p); @@ -60,6 +63,7 @@ enum MBlockType { MBlockExpectRace, MBlockSignal, MBlockFD, + MBlockJmpBuf, // This must be the last. MBlockTypeCount diff --git a/lib/tsan/rtl/tsan_mutex.cc b/lib/tsan/rtl/tsan_mutex.cc index 335ca2211d13..a92fd90fd9c1 100644 --- a/lib/tsan/rtl/tsan_mutex.cc +++ b/lib/tsan/rtl/tsan_mutex.cc @@ -31,8 +31,8 @@ static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = { /*0 MutexTypeInvalid*/ {}, /*1 MutexTypeTrace*/ {MutexTypeLeaf}, /*2 MutexTypeThreads*/ {MutexTypeReport}, - /*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeMBlock, - MutexTypeJavaMBlock}, + /*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeSyncVar, + MutexTypeMBlock, MutexTypeJavaMBlock}, /*4 MutexTypeSyncVar*/ {}, /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar}, /*6 MutexTypeSlab*/ {MutexTypeLeaf}, diff --git a/lib/tsan/rtl/tsan_mutexset.h b/lib/tsan/rtl/tsan_mutexset.h index 09223ff6cc48..eebfd4d70a14 100644 --- a/lib/tsan/rtl/tsan_mutexset.h +++ b/lib/tsan/rtl/tsan_mutexset.h @@ -22,7 +22,7 @@ class MutexSet { public: // Holds limited number of mutexes. // The oldest mutexes are discarded on overflow. - static const uptr kMaxSize = 64; + static const uptr kMaxSize = 16; struct Desc { u64 id; u64 epoch; diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h index c859c3e85b19..666b4d0c482f 100644 --- a/lib/tsan/rtl/tsan_platform.h +++ b/lib/tsan/rtl/tsan_platform.h @@ -37,9 +37,9 @@ C++ COMPAT linux memory layout: Go linux and darwin memory layout: 0000 0000 0000 - 0000 1000 0000: executable 0000 1000 0000 - 00f8 0000 0000: - -00f8 0000 0000 - 0118 0000 0000: heap -0118 0000 0000 - 1000 0000 0000: - -1000 0000 0000 - 1460 0000 0000: shadow +00c0 0000 0000 - 00e0 0000 0000: heap +00e0 0000 0000 - 1000 0000 0000: - +1000 0000 0000 - 1380 0000 0000: shadow 1460 0000 0000 - 6000 0000 0000: - 6000 0000 0000 - 6200 0000 0000: traces 6200 0000 0000 - 7fff ffff ffff: - @@ -47,8 +47,8 @@ Go linux and darwin memory layout: Go windows memory layout: 0000 0000 0000 - 0000 1000 0000: executable 0000 1000 0000 - 00f8 0000 0000: - -00f8 0000 0000 - 0118 0000 0000: heap -0118 0000 0000 - 0100 0000 0000: - +00c0 0000 0000 - 00e0 0000 0000: heap +00e0 0000 0000 - 0100 0000 0000: - 0100 0000 0000 - 0560 0000 0000: shadow 0560 0000 0000 - 0760 0000 0000: traces 0760 0000 0000 - 07ff ffff ffff: - @@ -65,11 +65,11 @@ namespace __tsan { #if defined(TSAN_GO) static const uptr kLinuxAppMemBeg = 0x000000000000ULL; -static const uptr kLinuxAppMemEnd = 0x00fcffffffffULL; -# if defined(_WIN32) +static const uptr kLinuxAppMemEnd = 0x04dfffffffffULL; +# if SANITIZER_WINDOWS static const uptr kLinuxShadowMsk = 0x010000000000ULL; # else -static const uptr kLinuxShadowMsk = 0x100000000000ULL; +static const uptr kLinuxShadowMsk = 0x200000000000ULL; # endif // TSAN_COMPAT_SHADOW is intended for COMPAT virtual memory layout, // when memory addresses are of the 0x2axxxxxxxxxx form. @@ -84,7 +84,7 @@ static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL; static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL; -#if defined(_WIN32) +#if SANITIZER_WINDOWS const uptr kTraceMemBegin = 0x056000000000ULL; #else const uptr kTraceMemBegin = 0x600000000000ULL; @@ -132,13 +132,19 @@ static inline uptr AlternativeAddress(uptr addr) { #endif } -uptr GetShadowMemoryConsumption(); void FlushShadowMemory(); +void WriteMemoryProfile(char *buf, uptr buf_size); const char *InitializePlatform(); void FinalizePlatform(); -uptr ALWAYS_INLINE INLINE GetThreadTrace(int tid) { - uptr p = kTraceMemBegin + (uptr)tid * kTraceSize * sizeof(Event); +uptr ALWAYS_INLINE GetThreadTrace(int tid) { + uptr p = kTraceMemBegin + (uptr)(tid * 2) * kTraceSize * sizeof(Event); + DCHECK_LT(p, kTraceMemBegin + kTraceMemSize); + return p; +} + +uptr ALWAYS_INLINE GetThreadTraceHeader(int tid) { + uptr p = kTraceMemBegin + (uptr)(tid * 2 + 1) * kTraceSize * sizeof(Event); DCHECK_LT(p, kTraceMemBegin + kTraceMemSize); return p; } @@ -148,9 +154,7 @@ void internal_start_thread(void(*func)(void*), void *arg); // Says whether the addr relates to a global var. // Guesses with high probability, may yield both false positives and negatives. bool IsGlobalVar(uptr addr); -uptr GetTlsSize(); -void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, - uptr *tls_addr, uptr *tls_size); +int ExtractResolvFDs(void *state, int *fds, int nfd); } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc index 6cc424975125..a0d71e8589d6 100644 --- a/lib/tsan/rtl/tsan_platform_linux.cc +++ b/lib/tsan/rtl/tsan_platform_linux.cc @@ -12,7 +12,9 @@ // Linux-specific code. //===----------------------------------------------------------------------===// -#ifdef __linux__ + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_LINUX #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" @@ -21,7 +23,6 @@ #include "tsan_rtl.h" #include "tsan_flags.h" -#include <asm/prctl.h> #include <fcntl.h> #include <pthread.h> #include <signal.h> @@ -40,11 +41,16 @@ #include <errno.h> #include <sched.h> #include <dlfcn.h> +#define __need_res_state +#include <resolv.h> +#include <malloc.h> -extern "C" int arch_prctl(int code, __sanitizer::uptr *addr); +extern "C" struct mallinfo __libc_mallinfo(); namespace __tsan { +const uptr kPageSize = 4096; + #ifndef TSAN_GO ScopedInRtl::ScopedInRtl() : thr_(cur_thread()) { @@ -66,8 +72,75 @@ ScopedInRtl::~ScopedInRtl() { } #endif -uptr GetShadowMemoryConsumption() { - return 0; +static bool ishex(char c) { + return (c >= '0' && c <= '9') + || (c >= 'a' && c <= 'f'); +} + +static uptr readhex(const char *p) { + uptr v = 0; + for (; ishex(p[0]); p++) { + if (p[0] >= '0' && p[0] <= '9') + v = v * 16 + p[0] - '0'; + else + v = v * 16 + p[0] - 'a' + 10; + } + return v; +} + +static uptr readdec(const char *p) { + uptr v = 0; + for (; p[0] >= '0' && p[0] <= '9' ; p++) + v = v * 10 + p[0] - '0'; + return v; +} + +void WriteMemoryProfile(char *buf, uptr buf_size) { + char *smaps = 0; + uptr smaps_cap = 0; + uptr smaps_len = ReadFileToBuffer("/proc/self/smaps", + &smaps, &smaps_cap, 64<<20); + uptr mem[6] = {}; + uptr total = 0; + uptr start = 0; + bool file = false; + const char *pos = smaps; + while (pos < smaps + smaps_len) { + if (ishex(pos[0])) { + start = readhex(pos); + for (; *pos != '/' && *pos > '\n'; pos++) {} + file = *pos == '/'; + } else if (internal_strncmp(pos, "Rss:", 4) == 0) { + for (; *pos < '0' || *pos > '9'; pos++) {} + uptr rss = readdec(pos) * 1024; + total += rss; + start >>= 40; + if (start < 0x10) // shadow + mem[0] += rss; + else if (start >= 0x20 && start < 0x30) // compat modules + mem[file ? 1 : 2] += rss; + else if (start >= 0x7e) // modules + mem[file ? 1 : 2] += rss; + else if (start >= 0x60 && start < 0x62) // traces + mem[3] += rss; + else if (start >= 0x7d && start < 0x7e) // heap + mem[4] += rss; + else // other + mem[5] += rss; + } + while (*pos++ != '\n') {} + } + UnmapOrDie(smaps, smaps_cap); + char *buf_pos = buf; + char *buf_end = buf + buf_size; + buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos, + "RSS %zd MB: shadow:%zd file:%zd mmap:%zd trace:%zd heap:%zd other:%zd\n", + total >> 20, mem[0] >> 20, mem[1] >> 20, mem[2] >> 20, + mem[3] >> 20, mem[4] >> 20, mem[5] >> 20); + struct mallinfo mi = __libc_mallinfo(); + buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos, + "mallinfo: arena=%d mmap=%d fordblks=%d keepcost=%d\n", + mi.arena >> 20, mi.hblkhd >> 20, mi.fordblks >> 20, mi.keepcost >> 20); } void FlushShadowMemory() { @@ -89,6 +162,63 @@ static void ProtectRange(uptr beg, uptr end) { #endif #ifndef TSAN_GO +// Mark shadow for .rodata sections with the special kShadowRodata marker. +// Accesses to .rodata can't race, so this saves time, memory and trace space. +static void MapRodata() { + // First create temp file. + const char *tmpdir = GetEnv("TMPDIR"); + if (tmpdir == 0) + tmpdir = GetEnv("TEST_TMPDIR"); +#ifdef P_tmpdir + if (tmpdir == 0) + tmpdir = P_tmpdir; +#endif + if (tmpdir == 0) + return; + char filename[256]; + internal_snprintf(filename, sizeof(filename), "%s/tsan.rodata.%d", + tmpdir, (int)internal_getpid()); + uptr openrv = internal_open(filename, O_RDWR | O_CREAT | O_EXCL, 0600); + if (internal_iserror(openrv)) + return; + fd_t fd = openrv; + // Fill the file with kShadowRodata. + const uptr kMarkerSize = 512 * 1024 / sizeof(u64); + InternalScopedBuffer<u64> marker(kMarkerSize); + for (u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++) + *p = kShadowRodata; + internal_write(fd, marker.data(), marker.size()); + // Map the file into memory. + uptr page = internal_mmap(0, kPageSize, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, fd, 0); + if (internal_iserror(page)) { + internal_close(fd); + internal_unlink(filename); + return; + } + // Map the file into shadow of .rodata sections. + MemoryMappingLayout proc_maps(/*cache_enabled*/true); + uptr start, end, offset, prot; + char name[128]; + while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) { + if (name[0] != 0 && name[0] != '[' + && (prot & MemoryMappingLayout::kProtectionRead) + && (prot & MemoryMappingLayout::kProtectionExecute) + && !(prot & MemoryMappingLayout::kProtectionWrite) + && IsAppMem(start)) { + // Assume it's .rodata + char *shadow_start = (char*)MemToShadow(start); + char *shadow_end = (char*)MemToShadow(end); + for (char *p = shadow_start; p < shadow_end; p += marker.size()) { + internal_mmap(p, Min<uptr>(marker.size(), shadow_end - p), + PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0); + } + } + } + internal_close(fd); + internal_unlink(filename); +} + void InitializeShadowMemory() { uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg); @@ -115,6 +245,8 @@ void InitializeShadowMemory() { kLinuxAppMemBeg, kLinuxAppMemEnd, (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30); DPrintf("stack %zx\n", (uptr)&shadow); + + MapRodata(); } #endif @@ -124,10 +256,11 @@ static uptr g_data_end; #ifndef TSAN_GO static void CheckPIE() { // Ensure that the binary is indeed compiled with -pie. - MemoryMappingLayout proc_maps; + MemoryMappingLayout proc_maps(true); uptr start, end; if (proc_maps.Next(&start, &end, - /*offset*/0, /*filename*/0, /*filename_size*/0)) { + /*offset*/0, /*filename*/0, /*filename_size*/0, + /*protection*/0)) { if ((u64)start < kLinuxAppMemBeg) { Printf("FATAL: ThreadSanitizer can not mmap the shadow memory (" "something is mapped at 0x%zx < 0x%zx)\n", @@ -140,11 +273,12 @@ static void CheckPIE() { } static void InitDataSeg() { - MemoryMappingLayout proc_maps; + MemoryMappingLayout proc_maps(true); uptr start, end, offset; char name[128]; bool prev_is_data = false; - while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name))) { + while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), + /*protection*/ 0)) { DPrintf("%p-%p %p %s\n", start, end, offset, name); bool is_data = offset != 0 && name[0] != 0; // BSS may get merged with [heap] in /proc/self/maps. This is not very @@ -163,27 +297,6 @@ static void InitDataSeg() { CHECK_LT((uptr)&g_data_start, g_data_end); } -static uptr g_tls_size; - -#ifdef __i386__ -# define INTERNAL_FUNCTION __attribute__((regparm(3), stdcall)) -#else -# define INTERNAL_FUNCTION -#endif - -static int InitTlsSize() { - typedef void (*get_tls_func)(size_t*, size_t*) INTERNAL_FUNCTION; - get_tls_func get_tls; - void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info"); - CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr)); - internal_memcpy(&get_tls, &get_tls_static_info_ptr, - sizeof(get_tls_static_info_ptr)); - CHECK_NE(get_tls, 0); - size_t tls_size = 0; - size_t tls_align = 0; - get_tls(&tls_size, &tls_align); - return tls_size; -} #endif // #ifndef TSAN_GO static rlim_t getlim(int res) { @@ -238,57 +351,29 @@ const char *InitializePlatform() { #ifndef TSAN_GO CheckPIE(); - g_tls_size = (uptr)InitTlsSize(); + InitTlsSize(); InitDataSeg(); #endif - return getenv(kTsanOptionsEnv); -} - -void FinalizePlatform() { - fflush(0); + return GetEnv(kTsanOptionsEnv); } -uptr GetTlsSize() { -#ifndef TSAN_GO - return g_tls_size; -#else - return 0; -#endif +bool IsGlobalVar(uptr addr) { + return g_data_start && addr >= g_data_start && addr < g_data_end; } -void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, - uptr *tls_addr, uptr *tls_size) { #ifndef TSAN_GO - arch_prctl(ARCH_GET_FS, tls_addr); - *tls_addr -= g_tls_size; - *tls_size = g_tls_size; - - uptr stack_top, stack_bottom; - GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); - *stk_addr = stack_bottom; - *stk_size = stack_top - stack_bottom; - - if (!main) { - // If stack and tls intersect, make them non-intersecting. - if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) { - CHECK_GT(*tls_addr + *tls_size, *stk_addr); - CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size); - *stk_size -= *tls_size; - *tls_addr = *stk_addr + *stk_size; - } +int ExtractResolvFDs(void *state, int *fds, int nfd) { + int cnt = 0; + __res_state *statp = (__res_state*)state; + for (int i = 0; i < MAXNS && cnt < nfd; i++) { + if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1) + fds[cnt++] = statp->_u._ext.nssocks[i]; } -#else - *stk_addr = 0; - *stk_size = 0; - *tls_addr = 0; - *tls_size = 0; -#endif + return cnt; } +#endif -bool IsGlobalVar(uptr addr) { - return g_data_start && addr >= g_data_start && addr < g_data_end; -} } // namespace __tsan -#endif // #ifdef __linux__ +#endif // SANITIZER_LINUX diff --git a/lib/tsan/rtl/tsan_platform_mac.cc b/lib/tsan/rtl/tsan_platform_mac.cc index 183061d14638..99d4533a4fa2 100644 --- a/lib/tsan/rtl/tsan_platform_mac.cc +++ b/lib/tsan/rtl/tsan_platform_mac.cc @@ -12,7 +12,8 @@ // Mac-specific code. //===----------------------------------------------------------------------===// -#ifdef __APPLE__ +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_MAC #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" @@ -82,25 +83,13 @@ const char *InitializePlatform() { setrlimit(RLIMIT_CORE, (rlimit*)&lim); } - return getenv(kTsanOptionsEnv); + return GetEnv(kTsanOptionsEnv); } void FinalizePlatform() { fflush(0); } -uptr GetTlsSize() { - return 0; -} - -void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, - uptr *tls_addr, uptr *tls_size) { - *stk_addr = 0; - *stk_size = 0; - *tls_addr = 0; - *tls_size = 0; -} - } // namespace __tsan -#endif // #ifdef __APPLE__ +#endif // SANITIZER_MAC diff --git a/lib/tsan/rtl/tsan_platform_windows.cc b/lib/tsan/rtl/tsan_platform_windows.cc index f23e84e7875d..711db72ce684 100644 --- a/lib/tsan/rtl/tsan_platform_windows.cc +++ b/lib/tsan/rtl/tsan_platform_windows.cc @@ -12,7 +12,8 @@ // Windows-specific code. //===----------------------------------------------------------------------===// -#ifdef _WIN32 +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_WINDOWS #include "tsan_platform.h" @@ -34,25 +35,13 @@ void FlushShadowMemory() { } const char *InitializePlatform() { - return getenv(kTsanOptionsEnv); + return GetEnv(kTsanOptionsEnv); } void FinalizePlatform() { fflush(0); } -uptr GetTlsSize() { - return 0; -} - -void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, - uptr *tls_addr, uptr *tls_size) { - *stk_addr = 0; - *stk_size = 0; - *tls_addr = 0; - *tls_size = 0; -} - } // namespace __tsan -#endif // #ifdef _WIN32 +#endif // SANITIZER_WINDOWS diff --git a/lib/tsan/rtl/tsan_report.cc b/lib/tsan/rtl/tsan_report.cc index 056dc97387b9..c95c5c86be69 100644 --- a/lib/tsan/rtl/tsan_report.cc +++ b/lib/tsan/rtl/tsan_report.cc @@ -22,7 +22,8 @@ ReportDesc::ReportDesc() , locs(MBlockReportLoc) , mutexes(MBlockReportMutex) , threads(MBlockReportThread) - , sleep() { + , sleep() + , count() { } ReportMop::ReportMop() @@ -43,23 +44,22 @@ const char *thread_name(char *buf, int tid) { return buf; } -static void PrintHeader(ReportType typ) { - Printf("WARNING: ThreadSanitizer: "); - +static const char *ReportTypeString(ReportType typ) { if (typ == ReportTypeRace) - Printf("data race"); - else if (typ == ReportTypeUseAfterFree) - Printf("heap-use-after-free"); - else if (typ == ReportTypeThreadLeak) - Printf("thread leak"); - else if (typ == ReportTypeMutexDestroyLocked) - Printf("destroy of a locked mutex"); - else if (typ == ReportTypeSignalUnsafe) - Printf("signal-unsafe call inside of a signal"); - else if (typ == ReportTypeErrnoInSignal) - Printf("signal handler spoils errno"); - - Printf(" (pid=%d)\n", GetPid()); + return "data race"; + if (typ == ReportTypeVptrRace) + return "data race on vptr (ctor/dtor vs virtual call)"; + if (typ == ReportTypeUseAfterFree) + return "heap-use-after-free"; + if (typ == ReportTypeThreadLeak) + return "thread leak"; + if (typ == ReportTypeMutexDestroyLocked) + return "destroy of a locked mutex"; + if (typ == ReportTypeSignalUnsafe) + return "signal-unsafe call inside of a signal"; + if (typ == ReportTypeErrnoInSignal) + return "signal handler spoils errno"; + return ""; } void PrintStack(const ReportStack *ent) { @@ -89,11 +89,17 @@ static void PrintMutexSet(Vector<ReportMopMutex> const& mset) { } } +static const char *MopDesc(bool first, bool write, bool atomic) { + return atomic ? (first ? (write ? "Atomic write" : "Atomic read") + : (write ? "Previous atomic write" : "Previous atomic read")) + : (first ? (write ? "Write" : "Read") + : (write ? "Previous write" : "Previous read")); +} + static void PrintMop(const ReportMop *mop, bool first) { char thrbuf[kThreadBufSize]; Printf(" %s of size %d at %p by %s", - (first ? (mop->write ? "Write" : "Read") - : (mop->write ? "Previous write" : "Previous read")), + MopDesc(first, mop->write, mop->atomic), mop->size, (void*)mop->addr, thread_name(thrbuf, mop->tid)); PrintMutexSet(mop->mset); @@ -135,7 +141,7 @@ static void PrintThread(const ReportThread *rt) { if (rt->id == 0) // Little sense in describing the main thread. return; Printf(" Thread T%d", rt->id); - if (rt->name) + if (rt->name && rt->name[0] != '\0') Printf(" '%s'", rt->name); char thrbuf[kThreadBufSize]; Printf(" (tid=%zu, %s) created by %s", @@ -152,9 +158,29 @@ static void PrintSleep(const ReportStack *s) { PrintStack(s); } +static ReportStack *ChooseSummaryStack(const ReportDesc *rep) { + if (rep->mops.Size()) + return rep->mops[0]->stack; + if (rep->stacks.Size()) + return rep->stacks[0]; + if (rep->mutexes.Size()) + return rep->mutexes[0]->stack; + if (rep->threads.Size()) + return rep->threads[0]->stack; + return 0; +} + +ReportStack *SkipTsanInternalFrames(ReportStack *ent) { + while (FrameIsInternal(ent) && ent->next) + ent = ent->next; + return ent; +} + void PrintReport(const ReportDesc *rep) { Printf("==================\n"); - PrintHeader(rep->typ); + const char *rep_typ_str = ReportTypeString(rep->typ); + Printf("WARNING: ThreadSanitizer: %s (pid=%d)\n", rep_typ_str, + (int)internal_getpid()); for (uptr i = 0; i < rep->stacks.Size(); i++) { if (i) @@ -177,6 +203,12 @@ void PrintReport(const ReportDesc *rep) { for (uptr i = 0; i < rep->threads.Size(); i++) PrintThread(rep->threads[i]); + if (rep->typ == ReportTypeThreadLeak && rep->count > 1) + Printf(" And %d more similar thread leaks.\n\n", rep->count - 1); + + if (ReportStack *ent = SkipTsanInternalFrames(ChooseSummaryStack(rep))) + ReportErrorSummary(rep_typ_str, ent->file, ent->line, ent->func); + Printf("==================\n"); } diff --git a/lib/tsan/rtl/tsan_report.h b/lib/tsan/rtl/tsan_report.h index f6715d1aae9b..b2ce0dd66a27 100644 --- a/lib/tsan/rtl/tsan_report.h +++ b/lib/tsan/rtl/tsan_report.h @@ -20,6 +20,7 @@ namespace __tsan { enum ReportType { ReportTypeRace, + ReportTypeVptrRace, ReportTypeUseAfterFree, ReportTypeThreadLeak, ReportTypeMutexDestroyLocked, @@ -48,6 +49,7 @@ struct ReportMop { uptr addr; int size; bool write; + bool atomic; Vector<ReportMopMutex> mset; ReportStack *stack; @@ -100,6 +102,7 @@ class ReportDesc { Vector<ReportMutex*> mutexes; Vector<ReportThread*> threads; ReportStack *sleep; + int count; ReportDesc(); ~ReportDesc(); diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc index 493ed2055dfc..5924858c84c5 100644 --- a/lib/tsan/rtl/tsan_rtl.cc +++ b/lib/tsan/rtl/tsan_rtl.cc @@ -23,6 +23,7 @@ #include "tsan_rtl.h" #include "tsan_mman.h" #include "tsan_suppressions.h" +#include "tsan_symbolize.h" volatile int __tsan_resumed = 0; @@ -37,17 +38,40 @@ THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); #endif static char ctx_placeholder[sizeof(Context)] ALIGNED(64); +// Can be overriden by a front-end. +bool CPP_WEAK OnFinalize(bool failed) { + return failed; +} + static Context *ctx; Context *CTX() { return ctx; } +static char thread_registry_placeholder[sizeof(ThreadRegistry)]; + +static ThreadContextBase *CreateThreadContext(u32 tid) { + // Map thread trace when context is created. + MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event)); + MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace)); + new(ThreadTrace(tid)) Trace(); + void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); + return new(mem) ThreadContext(tid); +} + +#ifndef TSAN_GO +static const u32 kThreadQuarantineSize = 16; +#else +static const u32 kThreadQuarantineSize = 64; +#endif + Context::Context() : initialized() , report_mtx(MutexTypeReport, StatMtxReport) , nreported() , nmissed_expected() - , thread_mtx(MutexTypeThreads, StatMtxThreads) + , thread_registry(new(thread_registry_placeholder) ThreadRegistry( + CreateThreadContext, kMaxTid, kThreadQuarantineSize)) , racy_stacks(MBlockRacyStacks) , racy_addresses(MBlockRacyAddresses) , fired_suppressions(MBlockRacyAddresses) { @@ -60,10 +84,12 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, : fast_state(tid, epoch) // Do not touch these, rely on zero initialization, // they may be accessed before the ctor. - // , fast_ignore_reads() - // , fast_ignore_writes() + // , ignore_reads_and_writes() // , in_rtl() , shadow_stack_pos(&shadow_stack[0]) +#ifndef TSAN_GO + , jmp_bufs(MBlockJmpBuf) +#endif , tid(tid) , unique_id(unique_id) , stk_addr(stk_addr) @@ -72,94 +98,74 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, , tls_size(tls_size) { } -ThreadContext::ThreadContext(int tid) - : tid(tid) - , unique_id() - , os_id() - , user_id() - , thr() - , status(ThreadStatusInvalid) - , detached() - , reuse_count() - , epoch0() - , epoch1() - , dead_info() - , dead_next() - , name() { -} - -static void WriteMemoryProfile(char *buf, uptr buf_size, int num) { - uptr shadow = GetShadowMemoryConsumption(); - - int nthread = 0; - int nlivethread = 0; - uptr threadmem = 0; - { - Lock l(&ctx->thread_mtx); - for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = ctx->threads[i]; - if (tctx == 0) - continue; - nthread += 1; - threadmem += sizeof(ThreadContext); - if (tctx->status != ThreadStatusRunning) - continue; - nlivethread += 1; - threadmem += sizeof(ThreadState); - } - } - - uptr nsync = 0; - uptr syncmem = CTX()->synctab.GetMemoryConsumption(&nsync); - - internal_snprintf(buf, buf_size, "%d: shadow=%zuMB" - " thread=%zuMB(total=%d/live=%d)" - " sync=%zuMB(cnt=%zu)\n", - num, - shadow >> 20, - threadmem >> 20, nthread, nlivethread, - syncmem >> 20, nsync); +static void MemoryProfiler(Context *ctx, fd_t fd, int i) { + uptr n_threads; + uptr n_running_threads; + ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); + InternalScopedBuffer<char> buf(4096); + internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n", + i, n_threads, n_running_threads); + internal_write(fd, buf.data(), internal_strlen(buf.data())); + WriteMemoryProfile(buf.data(), buf.size()); + internal_write(fd, buf.data(), internal_strlen(buf.data())); } -static void MemoryProfileThread(void *arg) { +static void BackgroundThread(void *arg) { ScopedInRtl in_rtl; - fd_t fd = (fd_t)(uptr)arg; + Context *ctx = CTX(); + const u64 kMs2Ns = 1000 * 1000; + + fd_t mprof_fd = kInvalidFd; + if (flags()->profile_memory && flags()->profile_memory[0]) { + InternalScopedBuffer<char> filename(4096); + internal_snprintf(filename.data(), filename.size(), "%s.%d", + flags()->profile_memory, (int)internal_getpid()); + uptr openrv = OpenFile(filename.data(), true); + if (internal_iserror(openrv)) { + Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", + &filename[0]); + } else { + mprof_fd = openrv; + } + } + + u64 last_flush = NanoTime(); for (int i = 0; ; i++) { - InternalScopedBuffer<char> buf(4096); - WriteMemoryProfile(buf.data(), buf.size(), i); - internal_write(fd, buf.data(), internal_strlen(buf.data())); SleepForSeconds(1); - } -} + u64 now = NanoTime(); + + // Flush memory if requested. + if (flags()->flush_memory_ms) { + if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { + FlushShadowMemory(); + last_flush = NanoTime(); + } + } -static void InitializeMemoryProfile() { - if (flags()->profile_memory == 0 || flags()->profile_memory[0] == 0) - return; - InternalScopedBuffer<char> filename(4096); - internal_snprintf(filename.data(), filename.size(), "%s.%d", - flags()->profile_memory, GetPid()); - fd_t fd = internal_open(filename.data(), true); - if (fd == kInvalidFd) { - Printf("Failed to open memory profile file '%s'\n", &filename[0]); - Die(); - } - internal_start_thread(&MemoryProfileThread, (void*)(uptr)fd); -} + // Write memory profile if requested. + if (mprof_fd != kInvalidFd) + MemoryProfiler(ctx, mprof_fd, i); -static void MemoryFlushThread(void *arg) { - ScopedInRtl in_rtl; - for (int i = 0; ; i++) { - SleepForMillis(flags()->flush_memory_ms); - FlushShadowMemory(); +#ifndef TSAN_GO + // Flush symbolizer cache if requested. + if (flags()->flush_symbolizer_ms > 0) { + u64 last = atomic_load(&ctx->last_symbolize_time_ns, + memory_order_relaxed); + if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { + Lock l(&ctx->report_mtx); + SpinMutexLock l2(&CommonSanitizerReportMutex); + SymbolizeFlush(); + atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); + } + } +#endif } } -static void InitializeMemoryFlush() { - if (flags()->flush_memory_ms == 0) - return; - if (flags()->flush_memory_ms < 100) - flags()->flush_memory_ms = 100; - internal_start_thread(&MemoryFlushThread, 0); +void DontNeedShadowFor(uptr addr, uptr size) { + uptr shadow_beg = MemToShadow(addr); + uptr shadow_end = MemToShadow(addr + size); + FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); } void MapShadow(uptr addr, uptr size) { @@ -182,6 +188,7 @@ void Initialize(ThreadState *thr) { if (is_initialized) return; is_initialized = true; + SanitizerToolName = "ThreadSanitizer"; // Install tool-specific callbacks in sanitizer_common. SetCheckFailedCallback(TsanCheckFailed); @@ -197,9 +204,6 @@ void Initialize(ThreadState *thr) { #ifndef TSAN_GO InitializeShadowMemory(); #endif - ctx->dead_list_size = 0; - ctx->dead_list_head = 0; - ctx->dead_list_tail = 0; InitializeFlags(&ctx->flags, env); // Setup correct file descriptor for error reports. if (internal_strcmp(flags()->log_path, "stdout") == 0) @@ -220,26 +224,24 @@ void Initialize(ThreadState *thr) { } } #endif - InitializeMemoryProfile(); - InitializeMemoryFlush(); + internal_start_thread(&BackgroundThread, 0); if (ctx->flags.verbosity) Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n", - GetPid()); + (int)internal_getpid()); // Initialize thread 0. - ctx->thread_seq = 0; int tid = ThreadCreate(thr, 0, 0, true); CHECK_EQ(tid, 0); - ThreadStart(thr, tid, GetPid()); + ThreadStart(thr, tid, internal_getpid()); CHECK_EQ(thr->in_rtl, 1); ctx->initialized = true; if (flags()->stop_on_start) { Printf("ThreadSanitizer is suspended at startup (pid %d)." " Call __tsan_resume().\n", - GetPid()); - while (__tsan_resumed == 0); + (int)internal_getpid()); + while (__tsan_resumed == 0) {} } } @@ -253,8 +255,15 @@ int Finalize(ThreadState *thr) { // Wait for pending reports. ctx->report_mtx.Lock(); + CommonSanitizerReportMutex.Lock(); + CommonSanitizerReportMutex.Unlock(); ctx->report_mtx.Unlock(); +#ifndef TSAN_GO + if (ctx->flags.verbosity) + AllocatorPrintStats(); +#endif + ThreadFinalize(thr); if (ctx->nreported) { @@ -272,6 +281,15 @@ int Finalize(ThreadState *thr) { ctx->nmissed_expected); } + if (flags()->print_suppressions) + PrintMatchedSuppressions(); +#ifndef TSAN_GO + if (flags()->print_benign) + PrintMatchedBenignRaces(); +#endif + + failed = OnFinalize(failed); + StatAggregate(ctx->stat, thr->stat); StatOutput(ctx->stat); return failed ? flags()->exitcode : 0; @@ -296,15 +314,20 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) { void TraceSwitch(ThreadState *thr) { thr->nomalloc++; ScopedInRtl in_rtl; - Lock l(&thr->trace.mtx); + Trace *thr_trace = ThreadTrace(thr->tid); + Lock l(&thr_trace->mtx); unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); - TraceHeader *hdr = &thr->trace.headers[trace]; + TraceHeader *hdr = &thr_trace->headers[trace]; hdr->epoch0 = thr->fast_state.epoch(); hdr->stack0.ObtainCurrent(thr, 0); hdr->mset0 = thr->mset; thr->nomalloc--; } +Trace *ThreadTrace(int tid) { + return (Trace*)GetThreadTraceHeader(tid); +} + uptr TraceTopPC(ThreadState *thr) { Event *events = (Event*)GetThreadTrace(thr->tid); uptr pc = events[thr->fast_state.GetTracePos()]; @@ -330,18 +353,18 @@ extern "C" void __tsan_report_race() { #endif ALWAYS_INLINE -static Shadow LoadShadow(u64 *p) { +Shadow LoadShadow(u64 *p) { u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); return Shadow(raw); } ALWAYS_INLINE -static void StoreShadow(u64 *sp, u64 s) { +void StoreShadow(u64 *sp, u64 s) { atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); } ALWAYS_INLINE -static void StoreIfNotYetStored(u64 *sp, u64 *s) { +void StoreIfNotYetStored(u64 *sp, u64 *s) { StoreShadow(sp, *s); *s = 0; } @@ -358,18 +381,6 @@ static inline void HandleRace(ThreadState *thr, u64 *shadow_mem, #endif } -static inline bool BothReads(Shadow s, int kAccessIsWrite) { - return !kAccessIsWrite && !s.is_write(); -} - -static inline bool OldIsRWNotWeaker(Shadow old, int kAccessIsWrite) { - return old.is_write() || !kAccessIsWrite; -} - -static inline bool OldIsRWWeakerOrEqual(Shadow old, int kAccessIsWrite) { - return !old.is_write() || kAccessIsWrite; -} - static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) { return old.epoch() >= thr->fast_synch_epoch; } @@ -378,9 +389,9 @@ static inline bool HappensBefore(Shadow old, ThreadState *thr) { return thr->clock.get(old.TidWithIgnore()) >= old.epoch(); } -ALWAYS_INLINE +ALWAYS_INLINE USED void MemoryAccessImpl(ThreadState *thr, uptr addr, - int kAccessSizeLog, bool kAccessIsWrite, + int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, u64 *shadow_mem, Shadow cur) { StatInc(thr, StatMop); StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); @@ -452,9 +463,30 @@ void MemoryAccessImpl(ThreadState *thr, uptr addr, return; } -ALWAYS_INLINE +void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, + int size, bool kAccessIsWrite, bool kIsAtomic) { + while (size) { + int size1 = 1; + int kAccessSizeLog = kSizeLog1; + if (size >= 8 && (addr & ~7) == ((addr + 8) & ~7)) { + size1 = 8; + kAccessSizeLog = kSizeLog8; + } else if (size >= 4 && (addr & ~7) == ((addr + 4) & ~7)) { + size1 = 4; + kAccessSizeLog = kSizeLog4; + } else if (size >= 2 && (addr & ~7) == ((addr + 2) & ~7)) { + size1 = 2; + kAccessSizeLog = kSizeLog2; + } + MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic); + addr += size1; + size -= size1; + } +} + +ALWAYS_INLINE USED void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, - int kAccessSizeLog, bool kAccessIsWrite) { + int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) { u64 *shadow_mem = (u64*)MemToShadow(addr); DPrintf2("#%d: MemoryAccess: @%p %p size=%d" " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", @@ -473,6 +505,16 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, } #endif + if (*shadow_mem == kShadowRodata) { + // Access to .rodata section, no races here. + // Measurements show that it can be 10-20% of all memory accesses. + StatInc(thr, StatMop); + StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); + StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); + StatInc(thr, StatMopRodata); + return; + } + FastState fast_state = thr->fast_state; if (fast_state.GetIgnoreBit()) return; @@ -481,17 +523,20 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, Shadow cur(fast_state); cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); cur.SetWrite(kAccessIsWrite); + cur.SetAtomic(kIsAtomic); // We must not store to the trace if we do not store to the shadow. // That is, this call must be moved somewhere below. TraceAddEvent(thr, fast_state, EventTypeMop, pc); - MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, + MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, shadow_mem, cur); } static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, u64 val) { + (void)thr; + (void)pc; if (size == 0) return; // FIXME: fix me. @@ -508,23 +553,42 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, // let it just crash as usual. if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) return; - (void)thr; - (void)pc; - // Some programs mmap like hundreds of GBs but actually used a small part. - // So, it's better to report a false positive on the memory - // then to hang here senselessly. - const uptr kMaxResetSize = 4ull*1024*1024*1024; - if (size > kMaxResetSize) - size = kMaxResetSize; + // Don't want to touch lots of shadow memory. + // If a program maps 10MB stack, there is no need reset the whole range. size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); - u64 *p = (u64*)MemToShadow(addr); - CHECK(IsShadowMem((uptr)p)); - CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); - // FIXME: may overwrite a part outside the region - for (uptr i = 0; i < size * kShadowCnt / kShadowCell;) { - p[i++] = val; - for (uptr j = 1; j < kShadowCnt; j++) - p[i++] = 0; + if (size < 64*1024) { + u64 *p = (u64*)MemToShadow(addr); + CHECK(IsShadowMem((uptr)p)); + CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); + // FIXME: may overwrite a part outside the region + for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) { + p[i++] = val; + for (uptr j = 1; j < kShadowCnt; j++) + p[i++] = 0; + } + } else { + // The region is big, reset only beginning and end. + const uptr kPageSize = 4096; + u64 *begin = (u64*)MemToShadow(addr); + u64 *end = begin + size / kShadowCell * kShadowCnt; + u64 *p = begin; + // Set at least first kPageSize/2 to page boundary. + while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) { + *p++ = val; + for (uptr j = 1; j < kShadowCnt; j++) + *p++ = 0; + } + // Reset middle part. + u64 *p1 = p; + p = RoundDown(end, kPageSize); + UnmapOrDie((void*)p1, (uptr)p - (uptr)p1); + MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1); + // Set the ending. + while (p < end) { + *p++ = val; + for (uptr j = 1; j < kShadowCnt; j++) + *p++ = 0; + } } } @@ -533,7 +597,17 @@ void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { } void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { + // Processing more than 1k (4k of shadow) is expensive, + // can cause excessive memory consumption (user does not necessary touch + // the whole range) and most likely unnecessary. + if (size > 1024) + size = 1024; + CHECK_EQ(thr->is_freeing, false); + thr->is_freeing = true; MemoryAccessRange(thr, pc, addr, size, true); + thr->is_freeing = false; + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); Shadow s(thr->fast_state); s.ClearIgnoreBit(); s.MarkAsFreed(); @@ -543,6 +617,8 @@ void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { } void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { + thr->fast_state.IncrementEpoch(); + TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); Shadow s(thr->fast_state); s.ClearIgnoreBit(); s.SetWrite(true); @@ -550,7 +626,7 @@ void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { MemoryRangeSet(thr, pc, addr, size, s.raw()); } -ALWAYS_INLINE +ALWAYS_INLINE USED void FuncEntry(ThreadState *thr, uptr pc) { DCHECK_EQ(thr->in_rtl, 0); StatInc(thr, StatFuncEnter); @@ -580,7 +656,7 @@ void FuncEntry(ThreadState *thr, uptr pc) { thr->shadow_stack_pos++; } -ALWAYS_INLINE +ALWAYS_INLINE USED void FuncExit(ThreadState *thr) { DCHECK_EQ(thr->in_rtl, 0); StatInc(thr, StatFuncExit); diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h index 6b0ab0d385ef..f1a73e457331 100644 --- a/lib/tsan/rtl/tsan_rtl.h +++ b/lib/tsan/rtl/tsan_rtl.h @@ -26,8 +26,9 @@ #ifndef TSAN_RTL_H #define TSAN_RTL_H -#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_thread_registry.h" #include "tsan_clock.h" #include "tsan_defs.h" #include "tsan_flags.h" @@ -46,15 +47,73 @@ namespace __tsan { // Descriptor of user's memory block. struct MBlock { - Mutex mtx; - uptr size; - u32 alloc_tid; - u32 alloc_stack_id; - SyncVar *head; + /* + u64 mtx : 1; // must be first + u64 lst : 44; + u64 stk : 31; // on word boundary + u64 tid : kTidBits; + u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39 + */ + u64 raw[2]; + + void Init(uptr siz, u32 tid, u32 stk) { + raw[0] = raw[1] = 0; + raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64); + raw[1] |= (u64)tid << ((1 + 44 + 31) % 64); + raw[0] |= (u64)stk << (1 + 44); + raw[1] |= (u64)stk >> (64 - 44 - 1); + DCHECK_EQ(Size(), siz); + DCHECK_EQ(Tid(), tid); + DCHECK_EQ(StackId(), stk); + } + + u32 Tid() const { + return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits); + } + + uptr Size() const { + return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64); + } + + u32 StackId() const { + return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31); + } + + SyncVar *ListHead() const { + return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3); + } + + void ListPush(SyncVar *v) { + SyncVar *lst = ListHead(); + v->next = lst; + u64 x = (u64)v ^ (u64)lst; + x = (x >> 3) << 1; + raw[0] ^= x; + DCHECK_EQ(ListHead(), v); + } + + SyncVar *ListPop() { + SyncVar *lst = ListHead(); + SyncVar *nxt = lst->next; + lst->next = 0; + u64 x = (u64)lst ^ (u64)nxt; + x = (x >> 3) << 1; + raw[0] ^= x; + DCHECK_EQ(ListHead(), nxt); + return lst; + } - MBlock() - : mtx(MutexTypeMBlock, StatMtxMBlock) { + void ListReset() { + SyncVar *lst = ListHead(); + u64 x = (u64)lst; + x = (x >> 3) << 1; + raw[0] ^= x; + DCHECK_EQ(ListHead(), 0); } + + void Lock(); + void Unlock(); + typedef GenericScopedLock<MBlock> ScopedLock; }; #ifndef TSAN_GO @@ -65,22 +124,11 @@ const uptr kAllocatorSpace = 0x7d0000000000ULL; #endif const uptr kAllocatorSize = 0x10000000000ULL; // 1T. -struct TsanMapUnmapCallback { - void OnMap(uptr p, uptr size) const { } - void OnUnmap(uptr p, uptr size) const { - // We are about to unmap a chunk of user memory. - // Mark the corresponding shadow memory as not needed. - uptr shadow_beg = MemToShadow(p); - uptr shadow_end = MemToShadow(p + size); - CHECK(IsAligned(shadow_end|shadow_beg, GetPageSizeCached())); - FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg); - } -}; - +struct MapUnmapCallback; typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock), - DefaultSizeClassMap> PrimaryAllocator; + DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator; typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; -typedef LargeMmapAllocator<TsanMapUnmapCallback> SecondaryAllocator; +typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator; typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator> Allocator; Allocator *allocator(); @@ -89,6 +137,8 @@ Allocator *allocator(); void TsanCheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); +const u64 kShadowRodata = (u64)-1; // .rodata shadow marker + // FastState (from most significant bit): // ignore : 1 // tid : kTidBits @@ -173,7 +223,8 @@ class FastState { // freed : 1 // tid : kTidBits // epoch : kClkBits -// is_write : 1 +// is_atomic : 1 +// is_read : 1 // size_log : 2 // addr0 : 3 class Shadow : public FastState { @@ -197,13 +248,26 @@ class Shadow : public FastState { } void SetWrite(unsigned kAccessIsWrite) { - DCHECK_EQ(x_ & 32, 0); - if (kAccessIsWrite) - x_ |= 32; - DCHECK_EQ(kAccessIsWrite, is_write()); + DCHECK_EQ(x_ & kReadBit, 0); + if (!kAccessIsWrite) + x_ |= kReadBit; + DCHECK_EQ(kAccessIsWrite, IsWrite()); + } + + void SetAtomic(bool kIsAtomic) { + DCHECK(!IsAtomic()); + if (kIsAtomic) + x_ |= kAtomicBit; + DCHECK_EQ(IsAtomic(), kIsAtomic); } - bool IsZero() const { return x_ == 0; } + bool IsAtomic() const { + return x_ & kAtomicBit; + } + + bool IsZero() const { + return x_ == 0; + } static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) { u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift; @@ -250,7 +314,8 @@ class Shadow : public FastState { } u64 addr0() const { return x_ & 7; } u64 size() const { return 1ull << size_log(); } - bool is_write() const { return x_ & 32; } + bool IsWrite() const { return !IsRead(); } + bool IsRead() const { return x_ & kReadBit; } // The idea behind the freed bit is as follows. // When the memory is freed (or otherwise unaccessible) we write to the shadow @@ -265,13 +330,46 @@ class Shadow : public FastState { x_ |= kFreedBit; } + bool IsFreed() const { + return x_ & kFreedBit; + } + bool GetFreedAndReset() { bool res = x_ & kFreedBit; x_ &= ~kFreedBit; return res; } + bool IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const { + // analyzes 5-th bit (is_read) and 6-th bit (is_atomic) + bool v = x_ & u64(((kIsWrite ^ 1) << kReadShift) + | (kIsAtomic << kAtomicShift)); + DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic)); + return v; + } + + bool IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const { + bool v = ((x_ >> kReadShift) & 3) + <= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); + DCHECK_EQ(v, (IsAtomic() < kIsAtomic) || + (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite)); + return v; + } + + bool IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const { + bool v = ((x_ >> kReadShift) & 3) + >= u64((kIsWrite ^ 1) | (kIsAtomic << 1)); + DCHECK_EQ(v, (IsAtomic() > kIsAtomic) || + (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite)); + return v; + } + private: + static const u64 kReadShift = 5; + static const u64 kReadBit = 1ull << kReadShift; + static const u64 kAtomicShift = 6; + static const u64 kAtomicBit = 1ull << kAtomicShift; + u64 size_log() const { return (x_ >> 3) & 3; } static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) { @@ -286,6 +384,12 @@ class Shadow : public FastState { struct SignalContext; +struct JmpBuf { + uptr sp; + uptr mangled_sp; + uptr *shadow_stack_pos; +}; + // This struct is stored in TLS. struct ThreadState { FastState fast_state; @@ -308,7 +412,6 @@ struct ThreadState { uptr *shadow_stack_pos; u64 *racy_shadow_addr; u64 racy_state[2]; - Trace trace; #ifndef TSAN_GO // C/C++ uses embed shadow stack of fixed size. uptr shadow_stack[kShadowStackSize]; @@ -321,12 +424,16 @@ struct ThreadState { ThreadClock clock; #ifndef TSAN_GO AllocatorCache alloc_cache; + Vector<JmpBuf> jmp_bufs; #endif u64 stat[StatCnt]; const int tid; const int unique_id; int in_rtl; + bool in_symbolizer; bool is_alive; + bool is_freeing; + bool is_vptr_access; const uptr stk_addr; const uptr stk_size; const uptr tls_addr; @@ -360,41 +467,30 @@ INLINE ThreadState *cur_thread() { } #endif -enum ThreadStatus { - ThreadStatusInvalid, // Non-existent thread, data is invalid. - ThreadStatusCreated, // Created but not yet running. - ThreadStatusRunning, // The thread is currently running. - ThreadStatusFinished, // Joinable thread is finished but not yet joined. - ThreadStatusDead // Joined, but some info (trace) is still alive. -}; - -// An info about a thread that is hold for some time after its termination. -struct ThreadDeadInfo { - Trace trace; -}; - -struct ThreadContext { - const int tid; - int unique_id; // Non-rolling thread id. - uptr os_id; // pid - uptr user_id; // Some opaque user thread id (e.g. pthread_t). +class ThreadContext : public ThreadContextBase { + public: + explicit ThreadContext(int tid); + ~ThreadContext(); ThreadState *thr; - ThreadStatus status; - bool detached; - int reuse_count; +#ifdef TSAN_GO + StackTrace creation_stack; +#else + u32 creation_stack_id; +#endif SyncClock sync; // Epoch at which the thread had started. // If we see an event from the thread stamped by an older epoch, // the event is from a dead thread that shared tid with this thread. u64 epoch0; u64 epoch1; - StackTrace creation_stack; - int creation_tid; - ThreadDeadInfo *dead_info; - ThreadContext *dead_next; // In dead thread list. - char *name; // As annotated by user. - explicit ThreadContext(int tid); + // Override superclass callbacks. + void OnDead(); + void OnJoined(void *arg); + void OnFinished(); + void OnStarted(void *arg); + void OnCreated(void *arg); + void OnReset(); }; struct RacyStacks { @@ -416,6 +512,7 @@ struct RacyAddress { struct FiredSuppression { ReportType type; uptr pc; + Suppression *supp; }; struct Context { @@ -428,16 +525,9 @@ struct Context { Mutex report_mtx; int nreported; int nmissed_expected; + atomic_uint64_t last_symbolize_time_ns; - Mutex thread_mtx; - unsigned thread_seq; - unsigned unique_thread_seq; - int alive_threads; - int max_alive_threads; - ThreadContext *threads[kMaxTid]; - int dead_list_size; - ThreadContext* dead_list_head; - ThreadContext* dead_list_tail; + ThreadRegistry *thread_registry; Vector<RacyStacks> racy_stacks; Vector<RacyAddress> racy_addresses; @@ -472,6 +562,7 @@ class ScopedReport { void AddMutex(const SyncVar *s); void AddLocation(uptr addr, uptr size); void AddSleep(u32 stack_id); + void SetCount(int count); const ReportDesc *GetReport() const; @@ -489,13 +580,18 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset); void StatAggregate(u64 *dst, u64 *src); void StatOutput(u64 *stat); -void ALWAYS_INLINE INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { +void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { if (kCollectStats) thr->stat[typ] += n; } +void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) { + if (kCollectStats) + thr->stat[typ] = n; +} void MapShadow(uptr addr, uptr size); void MapThreadTrace(uptr addr, uptr size); +void DontNeedShadowFor(uptr addr, uptr size); void InitializeShadowMemory(); void InitializeInterceptors(); void InitializeDynamicAnnotations(); @@ -503,11 +599,15 @@ void InitializeDynamicAnnotations(); void ReportRace(ThreadState *thr); bool OutputReport(Context *ctx, const ScopedReport &srep, - const ReportStack *suppress_stack = 0); + const ReportStack *suppress_stack1 = 0, + const ReportStack *suppress_stack2 = 0); bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, const StackTrace &trace); bool IsExpectedReport(uptr addr, uptr size); +void PrintMatchedBenignRaces(); +bool FrameIsInternal(const ReportStack *frame); +ReportStack *SkipTsanInternalFrames(ReportStack *ent); #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1 # define DPrintf Printf @@ -523,6 +623,7 @@ bool IsExpectedReport(uptr addr, uptr size); u32 CurrentStackId(ThreadState *thr, uptr pc); void PrintCurrentStack(ThreadState *thr, uptr pc); +void PrintCurrentStackSlow(); // uses libunwind void Initialize(ThreadState *thr); int Finalize(ThreadState *thr); @@ -532,16 +633,42 @@ SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr, SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr); void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, - int kAccessSizeLog, bool kAccessIsWrite); + int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic); void MemoryAccessImpl(ThreadState *thr, uptr addr, - int kAccessSizeLog, bool kAccessIsWrite, + int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, u64 *shadow_mem, Shadow cur); -void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr); -void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr); -void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr); -void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr); void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, - uptr size, bool is_write); + uptr size, bool is_write); +void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr, + uptr size, uptr step, bool is_write); +void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, + int size, bool kAccessIsWrite, bool kIsAtomic); + +const int kSizeLog1 = 0; +const int kSizeLog2 = 1; +const int kSizeLog4 = 2; +const int kSizeLog8 = 3; + +void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc, + uptr addr, int kAccessSizeLog) { + MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false); +} + +void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc, + uptr addr, int kAccessSizeLog) { + MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false); +} + +void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc, + uptr addr, int kAccessSizeLog) { + MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true); +} + +void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc, + uptr addr, int kAccessSizeLog) { + MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true); +} + void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size); void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size); @@ -564,8 +691,8 @@ void ProcessPendingSignals(ThreadState *thr); void MutexCreate(ThreadState *thr, uptr pc, uptr addr, bool rw, bool recursive, bool linker_init); void MutexDestroy(ThreadState *thr, uptr pc, uptr addr); -void MutexLock(ThreadState *thr, uptr pc, uptr addr); -void MutexUnlock(ThreadState *thr, uptr pc, uptr addr); +void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1); +int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false); void MutexReadLock(ThreadState *thr, uptr pc, uptr addr); void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr); void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); @@ -601,9 +728,10 @@ void TraceSwitch(ThreadState *thr); uptr TraceTopPC(ThreadState *thr); uptr TraceSize(); uptr TraceParts(); +Trace *ThreadTrace(int tid); extern "C" void __tsan_trace_switch(); -void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, FastState fs, +void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs, EventType typ, u64 addr) { DCHECK_GE((int)typ, 0); DCHECK_LE((int)typ, 7); diff --git a/lib/tsan/rtl/tsan_rtl_amd64.S b/lib/tsan/rtl/tsan_rtl_amd64.S index af878563573e..11c75c72dbe5 100644 --- a/lib/tsan/rtl/tsan_rtl_amd64.S +++ b/lib/tsan/rtl/tsan_rtl_amd64.S @@ -160,6 +160,143 @@ __tsan_report_race_thunk: ret .cfi_endproc +.hidden __tsan_setjmp +.comm _ZN14__interception11real_setjmpE,8,8 +.globl setjmp +.type setjmp, @function +setjmp: + .cfi_startproc + // save env parameter + push %rdi + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rdi, 0 + // obtain %rsp + lea 16(%rsp), %rdi + mov %rdi, %rsi + xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) + rol $0x11, %rsi + // call tsan interceptor + call __tsan_setjmp + // restore env parameter + pop %rdi + .cfi_adjust_cfa_offset -8 + .cfi_restore %rdi + // tail jump to libc setjmp + movl $0, %eax + movq _ZN14__interception11real_setjmpE@GOTPCREL(%rip), %rdx + jmp *(%rdx) + .cfi_endproc +.size setjmp, .-setjmp + +.comm _ZN14__interception12real__setjmpE,8,8 +.globl _setjmp +.type _setjmp, @function +_setjmp: + .cfi_startproc + // save env parameter + push %rdi + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rdi, 0 + // obtain %rsp + lea 16(%rsp), %rdi + mov %rdi, %rsi + xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) + rol $0x11, %rsi + // call tsan interceptor + call __tsan_setjmp + // restore env parameter + pop %rdi + .cfi_adjust_cfa_offset -8 + .cfi_restore %rdi + // tail jump to libc setjmp + movl $0, %eax + movq _ZN14__interception12real__setjmpE@GOTPCREL(%rip), %rdx + jmp *(%rdx) + .cfi_endproc +.size _setjmp, .-_setjmp + +.comm _ZN14__interception14real_sigsetjmpE,8,8 +.globl sigsetjmp +.type sigsetjmp, @function +sigsetjmp: + .cfi_startproc + // save env parameter + push %rdi + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rdi, 0 + // save savesigs parameter + push %rsi + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rsi, 0 + // align stack frame + sub $8, %rsp + .cfi_adjust_cfa_offset 8 + // obtain %rsp + lea 32(%rsp), %rdi + mov %rdi, %rsi + xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) + rol $0x11, %rsi + // call tsan interceptor + call __tsan_setjmp + // unalign stack frame + add $8, %rsp + .cfi_adjust_cfa_offset -8 + // restore savesigs parameter + pop %rsi + .cfi_adjust_cfa_offset -8 + .cfi_restore %rsi + // restore env parameter + pop %rdi + .cfi_adjust_cfa_offset -8 + .cfi_restore %rdi + // tail jump to libc sigsetjmp + movl $0, %eax + movq _ZN14__interception14real_sigsetjmpE@GOTPCREL(%rip), %rdx + jmp *(%rdx) + .cfi_endproc +.size sigsetjmp, .-sigsetjmp + +.comm _ZN14__interception16real___sigsetjmpE,8,8 +.globl __sigsetjmp +.type __sigsetjmp, @function +__sigsetjmp: + .cfi_startproc + // save env parameter + push %rdi + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rdi, 0 + // save savesigs parameter + push %rsi + .cfi_adjust_cfa_offset 8 + .cfi_rel_offset %rsi, 0 + // align stack frame + sub $8, %rsp + .cfi_adjust_cfa_offset 8 + // obtain %rsp + lea 32(%rsp), %rdi + mov %rdi, %rsi + xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp) + rol $0x11, %rsi + // call tsan interceptor + call __tsan_setjmp + // unalign stack frame + add $8, %rsp + .cfi_adjust_cfa_offset -8 + // restore savesigs parameter + pop %rsi + .cfi_adjust_cfa_offset -8 + .cfi_restore %rsi + // restore env parameter + pop %rdi + .cfi_adjust_cfa_offset -8 + .cfi_restore %rdi + // tail jump to libc sigsetjmp + movl $0, %eax + movq _ZN14__interception16real___sigsetjmpE@GOTPCREL(%rip), %rdx + jmp *(%rdx) + .cfi_endproc +.size __sigsetjmp, .-__sigsetjmp + #ifdef __linux__ /* We do not need executable stack. */ .section .note.GNU-stack,"",@progbits diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc index d812f12be560..cf2e44dd09ee 100644 --- a/lib/tsan/rtl/tsan_rtl_mutex.cc +++ b/lib/tsan/rtl/tsan_rtl_mutex.cc @@ -26,8 +26,12 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr, CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr); StatInc(thr, StatMutexCreate); - if (!linker_init && IsAppMem(addr)) - MemoryWrite1Byte(thr, pc, addr); + if (!linker_init && IsAppMem(addr)) { + CHECK(!thr->is_freeing); + thr->is_freeing = true; + MemoryWrite(thr, pc, addr, kSizeLog1); + thr->is_freeing = false; + } SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true); s->is_rw = rw; s->is_recursive = recursive; @@ -49,13 +53,17 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr); if (s == 0) return; - if (IsAppMem(addr)) - MemoryWrite1Byte(thr, pc, addr); + if (IsAppMem(addr)) { + CHECK(!thr->is_freeing); + thr->is_freeing = true; + MemoryWrite(thr, pc, addr, kSizeLog1); + thr->is_freeing = false; + } if (flags()->report_destroy_locked && s->owner_tid != SyncVar::kInvalidTid && !s->is_broken) { s->is_broken = true; - Lock l(&ctx->thread_mtx); + ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeMutexDestroyLocked); rep.AddMutex(s); StackTrace trace; @@ -71,11 +79,12 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { DestroyAndFree(s); } -void MutexLock(ThreadState *thr, uptr pc, uptr addr) { +void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) { CHECK_GT(thr->in_rtl, 0); - DPrintf("#%d: MutexLock %zx\n", thr->tid, addr); + DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec); + CHECK_GT(rec, 0); if (IsAppMem(addr)) - MemoryRead1Byte(thr, pc, addr); + MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId()); @@ -99,19 +108,20 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) { } else if (!s->is_recursive) { StatInc(thr, StatMutexRecLock); } - s->recursion++; + s->recursion += rec; thr->mset.Add(s->GetId(), true, thr->fast_state.epoch()); s->mtx.Unlock(); } -void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) { +int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) { CHECK_GT(thr->in_rtl, 0); - DPrintf("#%d: MutexUnlock %zx\n", thr->tid, addr); + DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all); if (IsAppMem(addr)) - MemoryRead1Byte(thr, pc, addr); + MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); + int rec = 0; if (s->recursion == 0) { if (!s->is_broken) { s->is_broken = true; @@ -125,7 +135,8 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) { PrintCurrentStack(thr, pc); } } else { - s->recursion--; + rec = all ? s->recursion : 1; + s->recursion -= rec; if (s->recursion == 0) { StatInc(thr, StatMutexUnlock); s->owner_tid = SyncVar::kInvalidTid; @@ -139,6 +150,7 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) { } thr->mset.Del(s->GetId(), true); s->mtx.Unlock(); + return rec; } void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) { @@ -146,7 +158,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr); StatInc(thr, StatMutexReadLock); if (IsAppMem(addr)) - MemoryRead1Byte(thr, pc, addr); + MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId()); @@ -167,7 +179,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr); StatInc(thr, StatMutexReadUnlock); if (IsAppMem(addr)) - MemoryRead1Byte(thr, pc, addr); + MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); @@ -188,7 +200,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { CHECK_GT(thr->in_rtl, 0); DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr); if (IsAppMem(addr)) - MemoryRead1Byte(thr, pc, addr); + MemoryReadAtomic(thr, pc, addr, kSizeLog1); SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true); bool write = true; if (s->owner_tid == SyncVar::kInvalidTid) { @@ -240,18 +252,19 @@ void Acquire(ThreadState *thr, uptr pc, uptr addr) { s->mtx.ReadUnlock(); } +static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) { + ThreadState *thr = reinterpret_cast<ThreadState*>(arg); + ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); + if (tctx->status == ThreadStatusRunning) + thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch()); + else + thr->clock.set(tctx->tid, tctx->epoch1); +} + void AcquireGlobal(ThreadState *thr, uptr pc) { - Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = ctx->threads[i]; - if (tctx == 0) - continue; - if (tctx->status == ThreadStatusRunning) - thr->clock.set(i, tctx->thr->fast_state.epoch()); - else - thr->clock.set(i, tctx->epoch1); - } + ThreadRegistryLock l(CTX()->thread_registry); + CTX()->thread_registry->RunCallbackForEachThreadLocked( + UpdateClockCallback, thr); } void Release(ThreadState *thr, uptr pc, uptr addr) { @@ -275,19 +288,20 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { } #ifndef TSAN_GO +static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) { + ThreadState *thr = reinterpret_cast<ThreadState*>(arg); + ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); + if (tctx->status == ThreadStatusRunning) + thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch()); + else + thr->last_sleep_clock.set(tctx->tid, tctx->epoch1); +} + void AfterSleep(ThreadState *thr, uptr pc) { - Context *ctx = CTX(); thr->last_sleep_stack_id = CurrentStackId(thr, pc); - Lock l(&ctx->thread_mtx); - for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = ctx->threads[i]; - if (tctx == 0) - continue; - if (tctx->status == ThreadStatusRunning) - thr->last_sleep_clock.set(i, tctx->thr->fast_state.epoch()); - else - thr->last_sleep_clock.set(i, tctx->epoch1); - } + ThreadRegistryLock l(CTX()->thread_registry); + CTX()->thread_registry->RunCallbackForEachThreadLocked( + UpdateSleepClockCallback, thr); } #endif diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc index 1a780e4b8070..f77a7a2efa96 100644 --- a/lib/tsan/rtl/tsan_rtl_report.cc +++ b/lib/tsan/rtl/tsan_rtl_report.cc @@ -15,6 +15,7 @@ #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_stacktrace.h" #include "tsan_platform.h" #include "tsan_rtl.h" #include "tsan_suppressions.h" @@ -29,12 +30,15 @@ namespace __tsan { using namespace __sanitizer; // NOLINT +static ReportStack *SymbolizeStack(const StackTrace& trace); + void TsanCheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2) { ScopedInRtl in_rtl; Printf("FATAL: ThreadSanitizer CHECK failed: " "%s:%d \"%s\" (0x%zx, 0x%zx)\n", file, line, cond, (uptr)v1, (uptr)v2); + PrintCurrentStackSlow(); Die(); } @@ -121,14 +125,16 @@ static ReportStack *SymbolizeStack(const StackTrace& trace) { ScopedReport::ScopedReport(ReportType typ) { ctx_ = CTX(); - ctx_->thread_mtx.CheckLocked(); + ctx_->thread_registry->CheckLocked(); void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); rep_ = new(mem) ReportDesc; rep_->typ = typ; ctx_->report_mtx.Lock(); + CommonSanitizerReportMutex.Lock(); } ScopedReport::~ScopedReport() { + CommonSanitizerReportMutex.Unlock(); ctx_->report_mtx.Unlock(); DestroyAndFree(rep_); } @@ -146,7 +152,8 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, mop->tid = s.tid(); mop->addr = addr + s.addr0(); mop->size = s.size(); - mop->write = s.is_write(); + mop->write = s.IsWrite(); + mop->atomic = s.IsAtomic(); mop->stack = SymbolizeStack(*stack); for (uptr i = 0; i < mset->Size(); i++) { MutexSet::Desc d = mset->Get(i); @@ -172,7 +179,7 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, void ScopedReport::AddThread(const ThreadContext *tctx) { for (uptr i = 0; i < rep_->threads.Size(); i++) { - if (rep_->threads[i]->id == tctx->tid) + if ((u32)rep_->threads[i]->id == tctx->tid) return; } void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); @@ -182,42 +189,65 @@ void ScopedReport::AddThread(const ThreadContext *tctx) { rt->pid = tctx->os_id; rt->running = (tctx->status == ThreadStatusRunning); rt->name = tctx->name ? internal_strdup(tctx->name) : 0; - rt->parent_tid = tctx->creation_tid; + rt->parent_tid = tctx->parent_tid; + rt->stack = 0; +#ifdef TSAN_GO rt->stack = SymbolizeStack(tctx->creation_stack); +#else + uptr ssz = 0; + const uptr *stack = StackDepotGet(tctx->creation_stack_id, &ssz); + if (stack) { + StackTrace trace; + trace.Init(stack, ssz); + rt->stack = SymbolizeStack(trace); + } +#endif } #ifndef TSAN_GO -static ThreadContext *FindThread(int unique_id) { +static ThreadContext *FindThreadByUidLocked(int unique_id) { Context *ctx = CTX(); - ctx->thread_mtx.CheckLocked(); + ctx->thread_registry->CheckLocked(); for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = ctx->threads[i]; - if (tctx && tctx->unique_id == unique_id) { + ThreadContext *tctx = static_cast<ThreadContext*>( + ctx->thread_registry->GetThreadLocked(i)); + if (tctx && tctx->unique_id == (u32)unique_id) { return tctx; } } return 0; } +static ThreadContext *FindThreadByTidLocked(int tid) { + Context *ctx = CTX(); + ctx->thread_registry->CheckLocked(); + return static_cast<ThreadContext*>( + ctx->thread_registry->GetThreadLocked(tid)); +} + +static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { + uptr addr = (uptr)arg; + ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); + if (tctx->status != ThreadStatusRunning) + return false; + ThreadState *thr = tctx->thr; + CHECK(thr); + return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || + (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); +} + ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { Context *ctx = CTX(); - ctx->thread_mtx.CheckLocked(); - for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = ctx->threads[i]; - if (tctx == 0 || tctx->status != ThreadStatusRunning) - continue; - ThreadState *thr = tctx->thr; - CHECK(thr); - if (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) { - *is_stack = true; - return tctx; - } - if (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size) { - *is_stack = false; - return tctx; - } - } - return 0; + ctx->thread_registry->CheckLocked(); + ThreadContext *tctx = static_cast<ThreadContext*>( + ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls, + (void*)addr)); + if (!tctx) + return 0; + ThreadState *thr = tctx->thr; + CHECK(thr); + *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); + return tctx; } #endif @@ -231,7 +261,16 @@ void ScopedReport::AddMutex(const SyncVar *s) { rep_->mutexes.PushBack(rm); rm->id = s->uid; rm->destroyed = false; - rm->stack = SymbolizeStack(s->creation_stack); + rm->stack = 0; +#ifndef TSAN_GO + uptr ssz = 0; + const uptr *stack = StackDepotGet(s->creation_stack_id, &ssz); + if (stack) { + StackTrace trace; + trace.Init(stack, ssz); + rm->stack = SymbolizeStack(trace); + } +#endif } void ScopedReport::AddMutex(u64 id) { @@ -269,27 +308,28 @@ void ScopedReport::AddLocation(uptr addr, uptr size) { trace.Init(stack, ssz); loc->stack = SymbolizeStack(trace); } - ThreadContext *tctx = FindThread(creat_tid); + ThreadContext *tctx = FindThreadByUidLocked(creat_tid); if (tctx) AddThread(tctx); return; } - if (allocator()->PointerIsMine((void*)addr)) { - MBlock *b = user_mblock(0, (void*)addr); - ThreadContext *tctx = FindThread(b->alloc_tid); + MBlock *b = 0; + if (allocator()->PointerIsMine((void*)addr) + && (b = user_mblock(0, (void*)addr))) { + ThreadContext *tctx = FindThreadByTidLocked(b->Tid()); void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation)); ReportLocation *loc = new(mem) ReportLocation(); rep_->locs.PushBack(loc); loc->type = ReportLocationHeap; loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr); - loc->size = b->size; - loc->tid = tctx ? tctx->tid : b->alloc_tid; + loc->size = b->Size(); + loc->tid = tctx ? tctx->tid : b->Tid(); loc->name = 0; loc->file = 0; loc->line = 0; loc->stack = 0; uptr ssz = 0; - const uptr *stack = StackDepotGet(b->alloc_stack_id, &ssz); + const uptr *stack = StackDepotGet(b->StackId(), &ssz); if (stack) { StackTrace trace; trace.Init(stack, ssz); @@ -328,6 +368,10 @@ void ScopedReport::AddSleep(u32 stack_id) { } #endif +void ScopedReport::SetCount(int count) { + rep_->count = count; +} + const ReportDesc *ScopedReport::GetReport() const { return rep_; } @@ -336,21 +380,17 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) { // This function restores stack trace and mutex set for the thread/epoch. // It does so by getting stack trace and mutex set at the beginning of // trace part, and then replaying the trace till the given epoch. - ThreadContext *tctx = CTX()->threads[tid]; + Context *ctx = CTX(); + ctx->thread_registry->CheckLocked(); + ThreadContext *tctx = static_cast<ThreadContext*>( + ctx->thread_registry->GetThreadLocked(tid)); if (tctx == 0) return; - Trace* trace = 0; - if (tctx->status == ThreadStatusRunning) { - CHECK(tctx->thr); - trace = &tctx->thr->trace; - } else if (tctx->status == ThreadStatusFinished - || tctx->status == ThreadStatusDead) { - if (tctx->dead_info == 0) - return; - trace = &tctx->dead_info->trace; - } else { + if (tctx->status != ThreadStatusRunning + && tctx->status != ThreadStatusFinished + && tctx->status != ThreadStatusDead) return; - } + Trace* trace = ThreadTrace(tctx->tid); Lock l(&trace->mtx); const int partidx = (epoch / kTracePartSize) % TraceParts(); TraceHeader* hdr = &trace->headers[partidx]; @@ -460,12 +500,17 @@ static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], bool OutputReport(Context *ctx, const ScopedReport &srep, - const ReportStack *suppress_stack) { + const ReportStack *suppress_stack1, + const ReportStack *suppress_stack2) { + atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed); const ReportDesc *rep = srep.GetReport(); - const uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack); + Suppression *supp = 0; + uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack1, &supp); + if (suppress_pc == 0) + suppress_pc = IsSuppressed(rep->typ, suppress_stack2, &supp); if (suppress_pc != 0) { - FiredSuppression supp = {srep.GetReport()->typ, suppress_pc}; - ctx->fired_suppressions.PushBack(supp); + FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp}; + ctx->fired_suppressions.PushBack(s); } if (OnReport(rep, suppress_pc != 0)) return false; @@ -481,13 +526,24 @@ bool IsFiredSuppression(Context *ctx, if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) continue; for (uptr j = 0; j < trace.Size(); j++) { - if (trace.Get(j) == ctx->fired_suppressions[k].pc) + FiredSuppression *s = &ctx->fired_suppressions[k]; + if (trace.Get(j) == s->pc) { + if (s->supp) + s->supp->hit_count++; return true; + } } } return false; } +bool FrameIsInternal(const ReportStack *frame) { + return frame != 0 && frame->file != 0 + && (internal_strstr(frame->file, "tsan_interceptors.cc") || + internal_strstr(frame->file, "sanitizer_common_interceptors.inc") || + internal_strstr(frame->file, "tsan_interface_")); +} + // On programs that use Java we see weird reports like: // WARNING: ThreadSanitizer: data race (pid=22512) // Read of size 8 at 0x7d2b00084318 by thread 100: @@ -497,30 +553,42 @@ bool IsFiredSuppression(Context *ctx, // #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919) // #1 <null> <null>:0 (0x7f7ad9b42707) static bool IsJavaNonsense(const ReportDesc *rep) { +#ifndef TSAN_GO for (uptr i = 0; i < rep->mops.Size(); i++) { ReportMop *mop = rep->mops[i]; ReportStack *frame = mop->stack; - if (frame != 0 && frame->func != 0 - && (internal_strcmp(frame->func, "memset") == 0 - || internal_strcmp(frame->func, "memcpy") == 0 - || internal_strcmp(frame->func, "memmove") == 0 - || internal_strcmp(frame->func, "strcmp") == 0 - || internal_strcmp(frame->func, "strncpy") == 0 - || internal_strcmp(frame->func, "strlen") == 0 - || internal_strcmp(frame->func, "free") == 0 - || internal_strcmp(frame->func, "pthread_mutex_lock") == 0)) { + if (frame == 0 + || (frame->func == 0 && frame->file == 0 && frame->line == 0 + && frame->module == 0)) { + return true; + } + if (FrameIsInternal(frame)) { frame = frame->next; if (frame == 0 || (frame->func == 0 && frame->file == 0 && frame->line == 0 - && frame->module == 0)) { + && frame->module == 0)) { if (frame) { - FiredSuppression supp = {rep->typ, frame->pc}; + FiredSuppression supp = {rep->typ, frame->pc, 0}; CTX()->fired_suppressions.PushBack(supp); } return true; } } } +#endif + return false; +} + +static bool RaceBetweenAtomicAndFree(ThreadState *thr) { + Shadow s0(thr->racy_state[0]); + Shadow s1(thr->racy_state[1]); + CHECK(!(s0.IsAtomic() && s1.IsAtomic())); + if (!s0.IsAtomic() && !s1.IsAtomic()) + return true; + if (s0.IsAtomic() && s1.IsFreed()) + return true; + if (s1.IsAtomic() && thr->is_freeing) + return true; return false; } @@ -529,9 +597,8 @@ void ReportRace(ThreadState *thr) { return; ScopedInRtl in_rtl; - if (thr->in_signal_handler) - Printf("ThreadSanitizer: printing report from signal handler." - " Can crash or hang.\n"); + if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr)) + return; bool freed = false; { @@ -555,9 +622,14 @@ void ReportRace(ThreadState *thr) { } Context *ctx = CTX(); - Lock l0(&ctx->thread_mtx); - - ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace); + ThreadRegistryLock l0(ctx->thread_registry); + + ReportType typ = ReportTypeRace; + if (thr->is_vptr_access) + typ = ReportTypeVptrRace; + else if (freed) + typ = ReportTypeUseAfterFree; + ScopedReport rep(typ); const uptr kMop = 2; StackTrace traces[kMop]; const uptr toppc = TraceTopPC(thr); @@ -583,7 +655,8 @@ void ReportRace(ThreadState *thr) { for (uptr i = 0; i < kMop; i++) { FastState s(thr->racy_state[i]); - ThreadContext *tctx = ctx->threads[s.tid()]; + ThreadContext *tctx = static_cast<ThreadContext*>( + ctx->thread_registry->GetThreadLocked(s.tid())); if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) continue; rep.AddThread(tctx); @@ -599,7 +672,8 @@ void ReportRace(ThreadState *thr) { } #endif - if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack)) + if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack, + rep.GetReport()->mops[1]->stack)) return; AddRacyStacks(thr, traces, addr_min, addr_max); @@ -611,4 +685,21 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) { PrintStack(SymbolizeStack(trace)); } +void PrintCurrentStackSlow() { +#ifndef TSAN_GO + __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace, + sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace; + ptrace->SlowUnwindStack(__sanitizer::StackTrace::GetCurrentPc(), + kStackTraceMax); + for (uptr i = 0; i < ptrace->size / 2; i++) { + uptr tmp = ptrace->trace[i]; + ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1]; + ptrace->trace[ptrace->size - i - 1] = tmp; + } + StackTrace trace; + trace.Init(ptrace->trace, ptrace->size); + PrintStack(SymbolizeStack(trace)); +#endif +} + } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc index 359775927834..ee13fa18db3f 100644 --- a/lib/tsan/rtl/tsan_rtl_thread.cc +++ b/lib/tsan/rtl/tsan_rtl_thread.cc @@ -20,144 +20,193 @@ namespace __tsan { +// ThreadContext implementation. + +ThreadContext::ThreadContext(int tid) + : ThreadContextBase(tid) + , thr() + , sync() + , epoch0() + , epoch1() { +} + #ifndef TSAN_GO -const int kThreadQuarantineSize = 16; -#else -const int kThreadQuarantineSize = 64; +ThreadContext::~ThreadContext() { +} #endif -static void MaybeReportThreadLeak(ThreadContext *tctx) { - if (tctx->detached) +void ThreadContext::OnDead() { + sync.Reset(); +} + +void ThreadContext::OnJoined(void *arg) { + ThreadState *caller_thr = static_cast<ThreadState *>(arg); + caller_thr->clock.acquire(&sync); + StatInc(caller_thr, StatSyncAcquire); + sync.Reset(); +} + +struct OnCreatedArgs { + ThreadState *thr; + uptr pc; +}; + +void ThreadContext::OnCreated(void *arg) { + thr = 0; + if (tid == 0) return; - if (tctx->status != ThreadStatusCreated - && tctx->status != ThreadStatusRunning - && tctx->status != ThreadStatusFinished) + OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg); + args->thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0); + args->thr->clock.set(args->thr->tid, args->thr->fast_state.epoch()); + args->thr->fast_synch_epoch = args->thr->fast_state.epoch(); + args->thr->clock.release(&sync); + StatInc(args->thr, StatSyncRelease); +#ifdef TSAN_GO + creation_stack.ObtainCurrent(args->thr, args->pc); +#else + creation_stack_id = CurrentStackId(args->thr, args->pc); +#endif + if (reuse_count == 0) + StatInc(args->thr, StatThreadMaxTid); +} + +void ThreadContext::OnReset() { + sync.Reset(); + FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event)); + //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace)); +} + +struct OnStartedArgs { + ThreadState *thr; + uptr stk_addr; + uptr stk_size; + uptr tls_addr; + uptr tls_size; +}; + +void ThreadContext::OnStarted(void *arg) { + OnStartedArgs *args = static_cast<OnStartedArgs*>(arg); + thr = args->thr; + // RoundUp so that one trace part does not contain events + // from different threads. + epoch0 = RoundUp(epoch1 + 1, kTracePartSize); + epoch1 = (u64)-1; + new(thr) ThreadState(CTX(), tid, unique_id, + epoch0, args->stk_addr, args->stk_size, args->tls_addr, args->tls_size); +#ifdef TSAN_GO + // Setup dynamic shadow stack. + const int kInitStackSize = 8; + args->thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack, + kInitStackSize * sizeof(uptr)); + args->thr->shadow_stack_pos = thr->shadow_stack; + args->thr->shadow_stack_end = thr->shadow_stack + kInitStackSize; +#endif +#ifndef TSAN_GO + AllocatorThreadStart(args->thr); +#endif + thr = args->thr; + thr->fast_synch_epoch = epoch0; + thr->clock.set(tid, epoch0); + thr->clock.acquire(&sync); + thr->fast_state.SetHistorySize(flags()->history_size); + const uptr trace = (epoch0 / kTracePartSize) % TraceParts(); + Trace *thr_trace = ThreadTrace(thr->tid); + thr_trace->headers[trace].epoch0 = epoch0; + StatInc(thr, StatSyncAcquire); + sync.Reset(); + DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " + "tls_addr=%zx tls_size=%zx\n", + tid, (uptr)epoch0, args->stk_addr, args->stk_size, + args->tls_addr, args->tls_size); + thr->is_alive = true; +} + +void ThreadContext::OnFinished() { + if (!detached) { + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + thr->clock.set(thr->tid, thr->fast_state.epoch()); + thr->fast_synch_epoch = thr->fast_state.epoch(); + thr->clock.release(&sync); + StatInc(thr, StatSyncRelease); + } + epoch1 = thr->fast_state.epoch(); + +#ifndef TSAN_GO + AllocatorThreadFinish(thr); +#endif + thr->~ThreadState(); + StatAggregate(CTX()->stat, thr->stat); + thr = 0; +} + +#ifndef TSAN_GO +struct ThreadLeak { + ThreadContext *tctx; + int count; +}; + +static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) { + Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg; + ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); + if (tctx->detached || tctx->status != ThreadStatusFinished) return; - ScopedReport rep(ReportTypeThreadLeak); - rep.AddThread(tctx); - OutputReport(CTX(), rep); + for (uptr i = 0; i < leaks.Size(); i++) { + if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) { + leaks[i].count++; + return; + } + } + ThreadLeak leak = {tctx, 1}; + leaks.PushBack(leak); +} +#endif + +static void ThreadCheckIgnore(ThreadState *thr) { + if (thr->ignore_reads_and_writes) { + Printf("ThreadSanitizer: thread T%d finished with ignores enabled.\n", + thr->tid); + } } void ThreadFinalize(ThreadState *thr) { CHECK_GT(thr->in_rtl, 0); + ThreadCheckIgnore(thr); +#ifndef TSAN_GO if (!flags()->report_thread_leaks) return; - Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = ctx->threads[i]; - if (tctx == 0) - continue; - MaybeReportThreadLeak(tctx); + ThreadRegistryLock l(CTX()->thread_registry); + Vector<ThreadLeak> leaks(MBlockScopedBuf); + CTX()->thread_registry->RunCallbackForEachThreadLocked( + MaybeReportThreadLeak, &leaks); + for (uptr i = 0; i < leaks.Size(); i++) { + ScopedReport rep(ReportTypeThreadLeak); + rep.AddThread(leaks[i].tctx); + rep.SetCount(leaks[i].count); + OutputReport(CTX(), rep); } +#endif } int ThreadCount(ThreadState *thr) { CHECK_GT(thr->in_rtl, 0); Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - int cnt = 0; - for (unsigned i = 0; i < kMaxTid; i++) { - ThreadContext *tctx = ctx->threads[i]; - if (tctx == 0) - continue; - if (tctx->status != ThreadStatusCreated - && tctx->status != ThreadStatusRunning) - continue; - cnt++; - } - return cnt; -} - -static void ThreadDead(ThreadState *thr, ThreadContext *tctx) { - Context *ctx = CTX(); - CHECK_GT(thr->in_rtl, 0); - CHECK(tctx->status == ThreadStatusRunning - || tctx->status == ThreadStatusFinished); - DPrintf("#%d: ThreadDead uid=%zu\n", thr->tid, tctx->user_id); - tctx->status = ThreadStatusDead; - tctx->user_id = 0; - tctx->sync.Reset(); - - // Put to dead list. - tctx->dead_next = 0; - if (ctx->dead_list_size == 0) - ctx->dead_list_head = tctx; - else - ctx->dead_list_tail->dead_next = tctx; - ctx->dead_list_tail = tctx; - ctx->dead_list_size++; + uptr result; + ctx->thread_registry->GetNumberOfThreads(0, 0, &result); + return (int)result; } int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) { CHECK_GT(thr->in_rtl, 0); - Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); StatInc(thr, StatThreadCreate); - int tid = -1; - ThreadContext *tctx = 0; - if (ctx->dead_list_size > kThreadQuarantineSize - || ctx->thread_seq >= kMaxTid) { - // Reusing old thread descriptor and tid. - if (ctx->dead_list_size == 0) { - Printf("ThreadSanitizer: %d thread limit exceeded. Dying.\n", - kMaxTid); - Die(); - } - StatInc(thr, StatThreadReuse); - tctx = ctx->dead_list_head; - ctx->dead_list_head = tctx->dead_next; - ctx->dead_list_size--; - if (ctx->dead_list_size == 0) { - CHECK_EQ(tctx->dead_next, 0); - ctx->dead_list_head = 0; - } - CHECK_EQ(tctx->status, ThreadStatusDead); - tctx->status = ThreadStatusInvalid; - tctx->reuse_count++; - tctx->sync.Reset(); - tid = tctx->tid; - DestroyAndFree(tctx->dead_info); - if (tctx->name) { - internal_free(tctx->name); - tctx->name = 0; - } - } else { - // Allocating new thread descriptor and tid. - StatInc(thr, StatThreadMaxTid); - tid = ctx->thread_seq++; - void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); - tctx = new(mem) ThreadContext(tid); - ctx->threads[tid] = tctx; - MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event)); - } - CHECK_NE(tctx, 0); - CHECK_GE(tid, 0); - CHECK_LT(tid, kMaxTid); + Context *ctx = CTX(); + OnCreatedArgs args = { thr, pc }; + int tid = ctx->thread_registry->CreateThread(uid, detached, thr->tid, &args); DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid); - CHECK_EQ(tctx->status, ThreadStatusInvalid); - ctx->alive_threads++; - if (ctx->max_alive_threads < ctx->alive_threads) { - ctx->max_alive_threads++; - CHECK_EQ(ctx->max_alive_threads, ctx->alive_threads); - StatInc(thr, StatThreadMaxAlive); - } - tctx->status = ThreadStatusCreated; - tctx->thr = 0; - tctx->user_id = uid; - tctx->unique_id = ctx->unique_thread_seq++; - tctx->detached = detached; - if (tid) { - thr->fast_state.IncrementEpoch(); - // Can't increment epoch w/o writing to the trace as well. - TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.release(&tctx->sync); - StatInc(thr, StatSyncRelease); - tctx->creation_stack.ObtainCurrent(thr, pc); - tctx->creation_tid = thr->tid; - } + StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads()); return tid; } @@ -170,9 +219,8 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) { GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size); if (tid) { - if (stk_addr && stk_size) { - MemoryResetRange(thr, /*pc=*/ 1, stk_addr, stk_size); - } + if (stk_addr && stk_size) + MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size); if (tls_addr && tls_size) { // Check that the thr object is in tls; @@ -183,113 +231,42 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) { CHECK_GE(thr_end, tls_addr); CHECK_LE(thr_end, tls_addr + tls_size); // Since the thr object is huge, skip it. - MemoryResetRange(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr); - MemoryResetRange(thr, /*pc=*/ 2, thr_end, tls_addr + tls_size - thr_end); + MemoryRangeImitateWrite(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr); + MemoryRangeImitateWrite(thr, /*pc=*/ 2, + thr_end, tls_addr + tls_size - thr_end); } } - Lock l(&CTX()->thread_mtx); - ThreadContext *tctx = CTX()->threads[tid]; - CHECK_NE(tctx, 0); - CHECK_EQ(tctx->status, ThreadStatusCreated); - tctx->status = ThreadStatusRunning; - tctx->os_id = os_id; - // RoundUp so that one trace part does not contain events - // from different threads. - tctx->epoch0 = RoundUp(tctx->epoch1 + 1, kTracePartSize); - tctx->epoch1 = (u64)-1; - new(thr) ThreadState(CTX(), tid, tctx->unique_id, - tctx->epoch0, stk_addr, stk_size, - tls_addr, tls_size); -#ifdef TSAN_GO - // Setup dynamic shadow stack. - const int kInitStackSize = 8; - thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack, - kInitStackSize * sizeof(uptr)); - thr->shadow_stack_pos = thr->shadow_stack; - thr->shadow_stack_end = thr->shadow_stack + kInitStackSize; -#endif - tctx->thr = thr; - thr->fast_synch_epoch = tctx->epoch0; - thr->clock.set(tid, tctx->epoch0); - thr->clock.acquire(&tctx->sync); - thr->fast_state.SetHistorySize(flags()->history_size); - const uptr trace = (tctx->epoch0 / kTracePartSize) % TraceParts(); - thr->trace.headers[trace].epoch0 = tctx->epoch0; - StatInc(thr, StatSyncAcquire); - DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " - "tls_addr=%zx tls_size=%zx\n", - tid, (uptr)tctx->epoch0, stk_addr, stk_size, tls_addr, tls_size); - thr->is_alive = true; + OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size }; + CTX()->thread_registry->StartThread(tid, os_id, &args); } void ThreadFinish(ThreadState *thr) { CHECK_GT(thr->in_rtl, 0); + ThreadCheckIgnore(thr); StatInc(thr, StatThreadFinish); - // FIXME: Treat it as write. if (thr->stk_addr && thr->stk_size) - MemoryResetRange(thr, /*pc=*/ 3, thr->stk_addr, thr->stk_size); - if (thr->tls_addr && thr->tls_size) { - const uptr thr_beg = (uptr)thr; - const uptr thr_end = (uptr)thr + sizeof(*thr); - // Since the thr object is huge, skip it. - MemoryResetRange(thr, /*pc=*/ 4, thr->tls_addr, thr_beg - thr->tls_addr); - MemoryResetRange(thr, /*pc=*/ 5, - thr_end, thr->tls_addr + thr->tls_size - thr_end); - } + DontNeedShadowFor(thr->stk_addr, thr->stk_size); + if (thr->tls_addr && thr->tls_size) + DontNeedShadowFor(thr->tls_addr, thr->tls_size); thr->is_alive = false; Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - ThreadContext *tctx = ctx->threads[thr->tid]; - CHECK_NE(tctx, 0); - CHECK_EQ(tctx->status, ThreadStatusRunning); - CHECK_GT(ctx->alive_threads, 0); - ctx->alive_threads--; - if (tctx->detached) { - ThreadDead(thr, tctx); - } else { - thr->fast_state.IncrementEpoch(); - // Can't increment epoch w/o writing to the trace as well. - TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); - thr->clock.set(thr->tid, thr->fast_state.epoch()); - thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.release(&tctx->sync); - StatInc(thr, StatSyncRelease); - tctx->status = ThreadStatusFinished; - } + ctx->thread_registry->FinishThread(thr->tid); +} - // Save from info about the thread. - tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo))) - ThreadDeadInfo(); - for (uptr i = 0; i < TraceParts(); i++) { - tctx->dead_info->trace.headers[i].epoch0 = thr->trace.headers[i].epoch0; - tctx->dead_info->trace.headers[i].stack0.CopyFrom( - thr->trace.headers[i].stack0); +static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) { + uptr uid = (uptr)arg; + if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) { + tctx->user_id = 0; + return true; } - tctx->epoch1 = thr->fast_state.epoch(); - -#ifndef TSAN_GO - AlloctorThreadFinish(thr); -#endif - thr->~ThreadState(); - StatAggregate(ctx->stat, thr->stat); - tctx->thr = 0; + return false; } int ThreadTid(ThreadState *thr, uptr pc, uptr uid) { CHECK_GT(thr->in_rtl, 0); Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - int res = -1; - for (unsigned tid = 0; tid < kMaxTid; tid++) { - ThreadContext *tctx = ctx->threads[tid]; - if (tctx != 0 && tctx->user_id == uid - && tctx->status != ThreadStatusInvalid) { - tctx->user_id = 0; - res = tid; - break; - } - } + int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid); DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res); return res; } @@ -300,18 +277,7 @@ void ThreadJoin(ThreadState *thr, uptr pc, int tid) { CHECK_LT(tid, kMaxTid); DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid); Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - ThreadContext *tctx = ctx->threads[tid]; - if (tctx->status == ThreadStatusInvalid) { - Printf("ThreadSanitizer: join of non-existent thread\n"); - return; - } - // FIXME(dvyukov): print message and continue (it's user error). - CHECK_EQ(tctx->detached, false); - CHECK_EQ(tctx->status, ThreadStatusFinished); - thr->clock.acquire(&tctx->sync); - StatInc(thr, StatSyncAcquire); - ThreadDead(thr, tctx); + ctx->thread_registry->JoinThread(tid, thr); } void ThreadDetach(ThreadState *thr, uptr pc, int tid) { @@ -319,31 +285,12 @@ void ThreadDetach(ThreadState *thr, uptr pc, int tid) { CHECK_GT(tid, 0); CHECK_LT(tid, kMaxTid); Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - ThreadContext *tctx = ctx->threads[tid]; - if (tctx->status == ThreadStatusInvalid) { - Printf("ThreadSanitizer: detach of non-existent thread\n"); - return; - } - if (tctx->status == ThreadStatusFinished) { - ThreadDead(thr, tctx); - } else { - tctx->detached = true; - } + ctx->thread_registry->DetachThread(tid); } void ThreadSetName(ThreadState *thr, const char *name) { - Context *ctx = CTX(); - Lock l(&ctx->thread_mtx); - ThreadContext *tctx = ctx->threads[thr->tid]; - CHECK_NE(tctx, 0); - CHECK_EQ(tctx->status, ThreadStatusRunning); - if (tctx->name) { - internal_free(tctx->name); - tctx->name = 0; - } - if (name) - tctx->name = internal_strdup(name); + CHECK_GT(thr->in_rtl, 0); + CTX()->thread_registry->SetThreadName(thr->tid, name); } void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, @@ -378,6 +325,13 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, StatInc(thr, StatMopRange); + if (*shadow_mem == kShadowRodata) { + // Access to .rodata section, no races here. + // Measurements show that it can be 10-20% of all memory accesses. + StatInc(thr, StatMopRangeRodata); + return; + } + FastState fast_state = thr->fast_state; if (fast_state.GetIgnoreBit()) return; @@ -394,7 +348,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, Shadow cur(fast_state); cur.SetWrite(is_write); cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); - MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, + MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem, cur); } if (unaligned) @@ -405,7 +359,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, Shadow cur(fast_state); cur.SetWrite(is_write); cur.SetAddr0AndSizeLog(0, kAccessSizeLog); - MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, + MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem, cur); shadow_mem += kShadowCnt; } @@ -415,24 +369,30 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, Shadow cur(fast_state); cur.SetWrite(is_write); cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog); - MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, + MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false, shadow_mem, cur); } } -void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr) { - MemoryAccess(thr, pc, addr, 0, 0); -} - -void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr) { - MemoryAccess(thr, pc, addr, 0, 1); -} - -void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr) { - MemoryAccess(thr, pc, addr, 3, 0); -} +void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr, + uptr size, uptr step, bool is_write) { + if (size == 0) + return; + FastState fast_state = thr->fast_state; + if (fast_state.GetIgnoreBit()) + return; + StatInc(thr, StatMopRange); + fast_state.IncrementEpoch(); + thr->fast_state = fast_state; + TraceAddEvent(thr, fast_state, EventTypeMop, pc); -void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr) { - MemoryAccess(thr, pc, addr, 3, 1); + for (uptr addr_end = addr + size; addr < addr_end; addr += step) { + u64 *shadow_mem = (u64*)MemToShadow(addr); + Shadow cur(fast_state); + cur.SetWrite(is_write); + cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kSizeLog1); + MemoryAccessImpl(thr, addr, kSizeLog1, is_write, false, + shadow_mem, cur); + } } } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_stat.cc b/lib/tsan/rtl/tsan_stat.cc index 82f1d6b5620f..9676e0872e08 100644 --- a/lib/tsan/rtl/tsan_stat.cc +++ b/lib/tsan/rtl/tsan_stat.cc @@ -38,6 +38,8 @@ void StatOutput(u64 *stat) { name[StatMop8] = " size 8 "; name[StatMopSame] = " Including same "; name[StatMopRange] = " Including range "; + name[StatMopRodata] = " Including .rodata "; + name[StatMopRangeRodata] = " Including .rodata range "; name[StatShadowProcessed] = "Shadow processed "; name[StatShadowZero] = " Including empty "; name[StatShadowNonZero] = " Including non empty "; @@ -105,6 +107,7 @@ void StatOutput(u64 *stat) { name[StatInt_realloc] = " realloc "; name[StatInt_free] = " free "; name[StatInt_cfree] = " cfree "; + name[StatInt_malloc_usable_size] = " malloc_usable_size "; name[StatInt_mmap] = " mmap "; name[StatInt_mmap64] = " mmap64 "; name[StatInt_munmap] = " munmap "; @@ -135,6 +138,8 @@ void StatOutput(u64 *stat) { name[StatInt_strcpy] = " strcpy "; name[StatInt_strncpy] = " strncpy "; name[StatInt_strstr] = " strstr "; + name[StatInt_strcasecmp] = " strcasecmp "; + name[StatInt_strncasecmp] = " strncasecmp "; name[StatInt_atexit] = " atexit "; name[StatInt___cxa_guard_acquire] = " __cxa_guard_acquire "; name[StatInt___cxa_guard_release] = " __cxa_guard_release "; @@ -174,6 +179,7 @@ void StatOutput(u64 *stat) { name[StatInt_pthread_barrier_destroy] = " pthread_barrier_destroy "; name[StatInt_pthread_barrier_wait] = " pthread_barrier_wait "; name[StatInt_pthread_once] = " pthread_once "; + name[StatInt_pthread_getschedparam] = " pthread_getschedparam "; name[StatInt_sem_init] = " sem_init "; name[StatInt_sem_destroy] = " sem_destroy "; name[StatInt_sem_wait] = " sem_wait "; @@ -181,6 +187,18 @@ void StatOutput(u64 *stat) { name[StatInt_sem_timedwait] = " sem_timedwait "; name[StatInt_sem_post] = " sem_post "; name[StatInt_sem_getvalue] = " sem_getvalue "; + name[StatInt_stat] = " stat "; + name[StatInt___xstat] = " __xstat "; + name[StatInt_stat64] = " stat64 "; + name[StatInt___xstat64] = " __xstat64 "; + name[StatInt_lstat] = " lstat "; + name[StatInt___lxstat] = " __lxstat "; + name[StatInt_lstat64] = " lstat64 "; + name[StatInt___lxstat64] = " __lxstat64 "; + name[StatInt_fstat] = " fstat "; + name[StatInt___fxstat] = " __fxstat "; + name[StatInt_fstat64] = " fstat64 "; + name[StatInt___fxstat64] = " __fxstat64 "; name[StatInt_open] = " open "; name[StatInt_open64] = " open64 "; name[StatInt_creat] = " creat "; @@ -195,12 +213,15 @@ void StatOutput(u64 *stat) { name[StatInt_socket] = " socket "; name[StatInt_socketpair] = " socketpair "; name[StatInt_connect] = " connect "; + name[StatInt_bind] = " bind "; + name[StatInt_listen] = " listen "; name[StatInt_accept] = " accept "; name[StatInt_accept4] = " accept4 "; name[StatInt_epoll_create] = " epoll_create "; name[StatInt_epoll_create1] = " epoll_create1 "; name[StatInt_close] = " close "; name[StatInt___close] = " __close "; + name[StatInt___res_iclose] = " __res_iclose "; name[StatInt_pipe] = " pipe "; name[StatInt_pipe2] = " pipe2 "; name[StatInt_read] = " read "; @@ -224,6 +245,8 @@ void StatOutput(u64 *stat) { name[StatInt_fclose] = " fclose "; name[StatInt_fread] = " fread "; name[StatInt_fwrite] = " fwrite "; + name[StatInt_fflush] = " fflush "; + name[StatInt_abort] = " abort "; name[StatInt_puts] = " puts "; name[StatInt_rmdir] = " rmdir "; name[StatInt_opendir] = " opendir "; @@ -231,6 +254,10 @@ void StatOutput(u64 *stat) { name[StatInt_epoll_wait] = " epoll_wait "; name[StatInt_poll] = " poll "; name[StatInt_sigaction] = " sigaction "; + name[StatInt_signal] = " signal "; + name[StatInt_raise] = " raise "; + name[StatInt_kill] = " kill "; + name[StatInt_pthread_kill] = " pthread_kill "; name[StatInt_sleep] = " sleep "; name[StatInt_usleep] = " usleep "; name[StatInt_nanosleep] = " nanosleep "; @@ -242,6 +269,59 @@ void StatOutput(u64 *stat) { name[StatInt_scanf] = " scanf "; name[StatInt_sscanf] = " sscanf "; name[StatInt_fscanf] = " fscanf "; + name[StatInt___isoc99_vscanf] = " vscanf "; + name[StatInt___isoc99_vsscanf] = " vsscanf "; + name[StatInt___isoc99_vfscanf] = " vfscanf "; + name[StatInt___isoc99_scanf] = " scanf "; + name[StatInt___isoc99_sscanf] = " sscanf "; + name[StatInt___isoc99_fscanf] = " fscanf "; + name[StatInt_on_exit] = " on_exit "; + name[StatInt___cxa_atexit] = " __cxa_atexit "; + name[StatInt_localtime] = " localtime "; + name[StatInt_localtime_r] = " localtime_r "; + name[StatInt_gmtime] = " gmtime "; + name[StatInt_gmtime_r] = " gmtime_r "; + name[StatInt_ctime] = " ctime "; + name[StatInt_ctime_r] = " ctime_r "; + name[StatInt_asctime] = " asctime "; + name[StatInt_asctime_r] = " asctime_r "; + name[StatInt_frexp] = " frexp "; + name[StatInt_frexpf] = " frexpf "; + name[StatInt_frexpl] = " frexpl "; + name[StatInt_getpwnam] = " getpwnam "; + name[StatInt_getpwuid] = " getpwuid "; + name[StatInt_getgrnam] = " getgrnam "; + name[StatInt_getgrgid] = " getgrgid "; + name[StatInt_getpwnam_r] = " getpwnam_r "; + name[StatInt_getpwuid_r] = " getpwuid_r "; + name[StatInt_getgrnam_r] = " getgrnam_r "; + name[StatInt_getgrgid_r] = " getgrgid_r "; + name[StatInt_clock_getres] = " clock_getres "; + name[StatInt_clock_gettime] = " clock_gettime "; + name[StatInt_clock_settime] = " clock_settime "; + name[StatInt_getitimer] = " getitimer "; + name[StatInt_setitimer] = " setitimer "; + name[StatInt_time] = " time "; + name[StatInt_glob] = " glob "; + name[StatInt_glob64] = " glob64 "; + name[StatInt_wait] = " wait "; + name[StatInt_waitid] = " waitid "; + name[StatInt_waitpid] = " waitpid "; + name[StatInt_wait3] = " wait3 "; + name[StatInt_wait4] = " wait4 "; + name[StatInt_inet_ntop] = " inet_ntop "; + name[StatInt_inet_pton] = " inet_pton "; + name[StatInt_getaddrinfo] = " getaddrinfo "; + name[StatInt_getsockname] = " getsockname "; + name[StatInt_gethostent] = " gethostent "; + name[StatInt_gethostbyname] = " gethostbyname "; + name[StatInt_gethostbyname2] = " gethostbyname2 "; + name[StatInt_gethostbyaddr] = " gethostbyaddr "; + name[StatInt_gethostent_r] = " gethostent_r "; + name[StatInt_gethostbyname_r] = " gethostbyname_r "; + name[StatInt_gethostbyname2_r] = " gethostbyname2_r "; + name[StatInt_gethostbyaddr_r] = " gethostbyaddr_r "; + name[StatInt_getsockopt] = " getsockopt "; name[StatAnnotation] = "Dynamic annotations "; name[StatAnnotateHappensBefore] = " HappensBefore "; @@ -251,6 +331,7 @@ void StatOutput(u64 *stat) { name[StatAnnotateMutexIsNotPHB] = " MutexIsNotPHB "; name[StatAnnotateCondVarWait] = " CondVarWait "; name[StatAnnotateRWLockCreate] = " RWLockCreate "; + name[StatAnnotateRWLockCreateStatic] = " StatAnnotateRWLockCreateStatic "; name[StatAnnotateRWLockDestroy] = " RWLockDestroy "; name[StatAnnotateRWLockAcquired] = " RWLockAcquired "; name[StatAnnotateRWLockReleased] = " RWLockReleased "; @@ -287,6 +368,7 @@ void StatOutput(u64 *stat) { name[StatMtxAnnotations] = " Annotations "; name[StatMtxMBlock] = " MBlock "; name[StatMtxJavaMBlock] = " JavaMBlock "; + name[StatMtxFD] = " FD "; Printf("Statistics:\n"); for (int i = 0; i < StatCnt; i++) diff --git a/lib/tsan/rtl/tsan_stat.h b/lib/tsan/rtl/tsan_stat.h index 58c5f23af40b..d5c8b4389394 100644 --- a/lib/tsan/rtl/tsan_stat.h +++ b/lib/tsan/rtl/tsan_stat.h @@ -27,6 +27,8 @@ enum StatType { StatMop8, StatMopSame, StatMopRange, + StatMopRodata, + StatMopRangeRodata, StatShadowProcessed, StatShadowZero, StatShadowNonZero, // Derived. @@ -102,6 +104,7 @@ enum StatType { StatInt_realloc, StatInt_free, StatInt_cfree, + StatInt_malloc_usable_size, StatInt_mmap, StatInt_mmap64, StatInt_munmap, @@ -131,6 +134,8 @@ enum StatType { StatInt_strncmp, StatInt_strcpy, StatInt_strncpy, + StatInt_strcasecmp, + StatInt_strncasecmp, StatInt_strstr, StatInt_atexit, StatInt___cxa_guard_acquire, @@ -169,6 +174,7 @@ enum StatType { StatInt_pthread_barrier_destroy, StatInt_pthread_barrier_wait, StatInt_pthread_once, + StatInt_pthread_getschedparam, StatInt_sem_init, StatInt_sem_destroy, StatInt_sem_wait, @@ -176,6 +182,18 @@ enum StatType { StatInt_sem_timedwait, StatInt_sem_post, StatInt_sem_getvalue, + StatInt_stat, + StatInt___xstat, + StatInt_stat64, + StatInt___xstat64, + StatInt_lstat, + StatInt___lxstat, + StatInt_lstat64, + StatInt___lxstat64, + StatInt_fstat, + StatInt___fxstat, + StatInt_fstat64, + StatInt___fxstat64, StatInt_open, StatInt_open64, StatInt_creat, @@ -190,12 +208,15 @@ enum StatType { StatInt_socket, StatInt_socketpair, StatInt_connect, + StatInt_bind, + StatInt_listen, StatInt_accept, StatInt_accept4, StatInt_epoll_create, StatInt_epoll_create1, StatInt_close, StatInt___close, + StatInt___res_iclose, StatInt_pipe, StatInt_pipe2, StatInt_read, @@ -219,6 +240,8 @@ enum StatType { StatInt_fclose, StatInt_fread, StatInt_fwrite, + StatInt_fflush, + StatInt_abort, StatInt_puts, StatInt_rmdir, StatInt_opendir, @@ -241,6 +264,59 @@ enum StatType { StatInt_scanf, StatInt_sscanf, StatInt_fscanf, + StatInt___isoc99_vscanf, + StatInt___isoc99_vsscanf, + StatInt___isoc99_vfscanf, + StatInt___isoc99_scanf, + StatInt___isoc99_sscanf, + StatInt___isoc99_fscanf, + StatInt_on_exit, + StatInt___cxa_atexit, + StatInt_localtime, + StatInt_localtime_r, + StatInt_gmtime, + StatInt_gmtime_r, + StatInt_ctime, + StatInt_ctime_r, + StatInt_asctime, + StatInt_asctime_r, + StatInt_frexp, + StatInt_frexpf, + StatInt_frexpl, + StatInt_getpwnam, + StatInt_getpwuid, + StatInt_getgrnam, + StatInt_getgrgid, + StatInt_getpwnam_r, + StatInt_getpwuid_r, + StatInt_getgrnam_r, + StatInt_getgrgid_r, + StatInt_clock_getres, + StatInt_clock_gettime, + StatInt_clock_settime, + StatInt_getitimer, + StatInt_setitimer, + StatInt_time, + StatInt_glob, + StatInt_glob64, + StatInt_wait, + StatInt_waitid, + StatInt_waitpid, + StatInt_wait3, + StatInt_wait4, + StatInt_inet_ntop, + StatInt_inet_pton, + StatInt_getaddrinfo, + StatInt_getsockname, + StatInt_gethostent, + StatInt_gethostbyname, + StatInt_gethostbyname2, + StatInt_gethostbyaddr, + StatInt_gethostent_r, + StatInt_gethostbyname_r, + StatInt_gethostbyname2_r, + StatInt_gethostbyaddr_r, + StatInt_getsockopt, // Dynamic annotations. StatAnnotation, @@ -289,6 +365,7 @@ enum StatType { StatMtxAtExit, StatMtxMBlock, StatMtxJavaMBlock, + StatMtxFD, // This must be the last. StatCnt diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc index 5316f6db6a0a..6c49355bed88 100644 --- a/lib/tsan/rtl/tsan_suppressions.cc +++ b/lib/tsan/rtl/tsan_suppressions.cc @@ -19,6 +19,13 @@ #include "tsan_mman.h" #include "tsan_platform.h" +// Can be overriden in frontend. +#ifndef TSAN_GO +extern "C" const char *WEAK __tsan_default_suppressions() { + return 0; +} +#endif + namespace __tsan { static Suppression *g_suppressions; @@ -31,12 +38,13 @@ static char *ReadFile(const char *filename) { internal_snprintf(tmp.data(), tmp.size(), "%s", filename); else internal_snprintf(tmp.data(), tmp.size(), "%s/%s", GetPwd(), filename); - fd_t fd = internal_open(tmp.data(), false); - if (fd == kInvalidFd) { + uptr openrv = OpenFile(tmp.data(), false); + if (internal_iserror(openrv)) { Printf("ThreadSanitizer: failed to open suppressions file '%s'\n", tmp.data()); Die(); } + fd_t fd = openrv; const uptr fsize = internal_filesize(fd); if (fsize == (uptr)-1) { Printf("ThreadSanitizer: failed to stat suppressions file '%s'\n", @@ -80,8 +88,7 @@ bool SuppressionMatch(char *templ, const char *str) { return true; } -Suppression *SuppressionParse(const char* supp) { - Suppression *head = 0; +Suppression *SuppressionParse(Suppression *head, const char* supp) { const char *line = supp; while (line) { while (line[0] == ' ' || line[0] == '\t') @@ -121,6 +128,7 @@ Suppression *SuppressionParse(const char* supp) { s->templ = (char*)internal_alloc(MBlockSuppression, end2 - line + 1); internal_memcpy(s->templ, line, end2 - line); s->templ[end2 - line] = 0; + s->hit_count = 0; } if (end[0] == 0) break; @@ -130,11 +138,15 @@ Suppression *SuppressionParse(const char* supp) { } void InitializeSuppressions() { - char *supp = ReadFile(flags()->suppressions); - g_suppressions = SuppressionParse(supp); + const char *supp = ReadFile(flags()->suppressions); + g_suppressions = SuppressionParse(0, supp); +#ifndef TSAN_GO + supp = __tsan_default_suppressions(); + g_suppressions = SuppressionParse(g_suppressions, supp); +#endif } -uptr IsSuppressed(ReportType typ, const ReportStack *stack) { +uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) { if (g_suppressions == 0 || stack == 0) return 0; SuppressionType stype; @@ -152,12 +164,41 @@ uptr IsSuppressed(ReportType typ, const ReportStack *stack) { for (Suppression *supp = g_suppressions; supp; supp = supp->next) { if (stype == supp->type && (SuppressionMatch(supp->templ, frame->func) || - SuppressionMatch(supp->templ, frame->file))) { + SuppressionMatch(supp->templ, frame->file) || + SuppressionMatch(supp->templ, frame->module))) { DPrintf("ThreadSanitizer: matched suppression '%s'\n", supp->templ); + supp->hit_count++; + *sp = supp; return frame->pc; } } } return 0; } + +static const char *SuppTypeStr(SuppressionType t) { + switch (t) { + case SuppressionRace: return "race"; + case SuppressionMutex: return "mutex"; + case SuppressionThread: return "thread"; + case SuppressionSignal: return "signal"; + } + CHECK(0); + return "unknown"; +} + +void PrintMatchedSuppressions() { + int hit_count = 0; + for (Suppression *supp = g_suppressions; supp; supp = supp->next) + hit_count += supp->hit_count; + if (hit_count == 0) + return; + Printf("ThreadSanitizer: Matched %d suppressions (pid=%d):\n", + hit_count, (int)internal_getpid()); + for (Suppression *supp = g_suppressions; supp; supp = supp->next) { + if (supp->hit_count == 0) + continue; + Printf("%d %s:%s\n", supp->hit_count, SuppTypeStr(supp->type), supp->templ); + } +} } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_suppressions.h b/lib/tsan/rtl/tsan_suppressions.h index 61a4cca9d17a..1c98363383dc 100644 --- a/lib/tsan/rtl/tsan_suppressions.h +++ b/lib/tsan/rtl/tsan_suppressions.h @@ -17,10 +17,6 @@ namespace __tsan { -void InitializeSuppressions(); -void FinalizeSuppressions(); -uptr IsSuppressed(ReportType typ, const ReportStack *stack); - // Exposed for testing. enum SuppressionType { SuppressionRace, @@ -33,9 +29,14 @@ struct Suppression { Suppression *next; SuppressionType type; char *templ; + int hit_count; }; -Suppression *SuppressionParse(const char* supp); +void InitializeSuppressions(); +void FinalizeSuppressions(); +void PrintMatchedSuppressions(); +uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp); +Suppression *SuppressionParse(Suppression *head, const char* supp); bool SuppressionMatch(char *templ, const char *str); } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_symbolize.cc b/lib/tsan/rtl/tsan_symbolize.cc index 29dfe237ffd9..12226064f5a4 100644 --- a/lib/tsan/rtl/tsan_symbolize.cc +++ b/lib/tsan/rtl/tsan_symbolize.cc @@ -18,9 +18,24 @@ #include "sanitizer_common/sanitizer_symbolizer.h" #include "tsan_flags.h" #include "tsan_report.h" +#include "tsan_rtl.h" namespace __tsan { +struct ScopedInSymbolizer { + ScopedInSymbolizer() { + ThreadState *thr = cur_thread(); + CHECK(!thr->in_symbolizer); + thr->in_symbolizer = true; + } + + ~ScopedInSymbolizer() { + ThreadState *thr = cur_thread(); + CHECK(thr->in_symbolizer); + thr->in_symbolizer = false; + } +}; + ReportStack *NewReportStackEntry(uptr addr) { ReportStack *ent = (ReportStack*)internal_alloc(MBlockReportStack, sizeof(ReportStack)); @@ -55,35 +70,36 @@ static ReportStack *NewReportStackEntry(const AddressInfo &info) { } ReportStack *SymbolizeCode(uptr addr) { - if (flags()->external_symbolizer_path[0]) { - static const uptr kMaxAddrFrames = 16; - InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames); - for (uptr i = 0; i < kMaxAddrFrames; i++) - new(&addr_frames[i]) AddressInfo(); - uptr addr_frames_num = __sanitizer::SymbolizeCode(addr, addr_frames.data(), - kMaxAddrFrames); - if (addr_frames_num == 0) - return NewReportStackEntry(addr); - ReportStack *top = 0; - ReportStack *bottom = 0; - for (uptr i = 0; i < addr_frames_num; i++) { - ReportStack *cur_entry = NewReportStackEntry(addr_frames[i]); - CHECK(cur_entry); - addr_frames[i].Clear(); - if (i == 0) - top = cur_entry; - else - bottom->next = cur_entry; - bottom = cur_entry; - } - return top; + if (!IsSymbolizerAvailable()) + return SymbolizeCodeAddr2Line(addr); + ScopedInSymbolizer in_symbolizer; + static const uptr kMaxAddrFrames = 16; + InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames); + for (uptr i = 0; i < kMaxAddrFrames; i++) + new(&addr_frames[i]) AddressInfo(); + uptr addr_frames_num = __sanitizer::SymbolizeCode(addr, addr_frames.data(), + kMaxAddrFrames); + if (addr_frames_num == 0) + return NewReportStackEntry(addr); + ReportStack *top = 0; + ReportStack *bottom = 0; + for (uptr i = 0; i < addr_frames_num; i++) { + ReportStack *cur_entry = NewReportStackEntry(addr_frames[i]); + CHECK(cur_entry); + addr_frames[i].Clear(); + if (i == 0) + top = cur_entry; + else + bottom->next = cur_entry; + bottom = cur_entry; } - return SymbolizeCodeAddr2Line(addr); + return top; } ReportLocation *SymbolizeData(uptr addr) { - if (flags()->external_symbolizer_path[0] == 0) + if (!IsSymbolizerAvailable()) return 0; + ScopedInSymbolizer in_symbolizer; DataInfo info; if (!__sanitizer::SymbolizeData(addr, &info)) return 0; @@ -100,4 +116,11 @@ ReportLocation *SymbolizeData(uptr addr) { return ent; } +void SymbolizeFlush() { + if (!IsSymbolizerAvailable()) + return; + ScopedInSymbolizer in_symbolizer; + __sanitizer::FlushSymbolizer(); +} + } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_symbolize.h b/lib/tsan/rtl/tsan_symbolize.h index 29193043cd70..7bc6123df57d 100644 --- a/lib/tsan/rtl/tsan_symbolize.h +++ b/lib/tsan/rtl/tsan_symbolize.h @@ -20,6 +20,7 @@ namespace __tsan { ReportStack *SymbolizeCode(uptr addr); ReportLocation *SymbolizeData(uptr addr); +void SymbolizeFlush(); ReportStack *SymbolizeCodeAddr2Line(uptr addr); diff --git a/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc b/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc index 76926e2b5aaf..47f9e1fbf418 100644 --- a/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc +++ b/lib/tsan/rtl/tsan_symbolize_addr2line_linux.cc @@ -87,7 +87,8 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { DlIteratePhdrCtx *ctx = (DlIteratePhdrCtx*)arg; InternalScopedBuffer<char> tmp(128); if (ctx->is_first) { - internal_snprintf(tmp.data(), tmp.size(), "/proc/%d/exe", GetPid()); + internal_snprintf(tmp.data(), tmp.size(), "/proc/%d/exe", + (int)internal_getpid()); info->dlpi_name = tmp.data(); } ctx->is_first = false; diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc index b25346ef344f..c6ddcdb37426 100644 --- a/lib/tsan/rtl/tsan_sync.cc +++ b/lib/tsan/rtl/tsan_sync.cc @@ -63,7 +63,7 @@ SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) { const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed); SyncVar *res = new(mem) SyncVar(addr, uid); #ifndef TSAN_GO - res->creation_stack.ObtainCurrent(thr, pc); + res->creation_stack_id = CurrentStackId(thr, pc); #endif return res; } @@ -82,9 +82,10 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, // the hashmap anyway. if (PrimaryAllocator::PointerIsMine((void*)addr)) { MBlock *b = user_mblock(thr, (void*)addr); - Lock l(&b->mtx); + CHECK_NE(b, 0); + MBlock::ScopedLock l(b); SyncVar *res = 0; - for (res = b->head; res; res = res->next) { + for (res = b->ListHead(); res; res = res->next) { if (res->addr == addr) break; } @@ -92,8 +93,7 @@ SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc, if (!create) return 0; res = Create(thr, pc, addr); - res->next = b->head; - b->head = res; + b->ListPush(res); } if (write_lock) res->mtx.Lock(); @@ -147,27 +147,37 @@ SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) { } if (PrimaryAllocator::PointerIsMine((void*)addr)) { MBlock *b = user_mblock(thr, (void*)addr); + CHECK_NE(b, 0); SyncVar *res = 0; { - Lock l(&b->mtx); - SyncVar **prev = &b->head; - res = *prev; - while (res) { + MBlock::ScopedLock l(b); + res = b->ListHead(); + if (res) { if (res->addr == addr) { if (res->is_linker_init) return 0; - *prev = res->next; - break; + b->ListPop(); + } else { + SyncVar **prev = &res->next; + res = *prev; + while (res) { + if (res->addr == addr) { + if (res->is_linker_init) + return 0; + *prev = res->next; + break; + } + prev = &res->next; + res = *prev; + } + } + if (res) { + StatInc(thr, StatSyncDestroyed); + res->mtx.Lock(); + res->mtx.Unlock(); } - prev = &res->next; - res = *prev; } } - if (res) { - StatInc(thr, StatSyncDestroyed); - res->mtx.Lock(); - res->mtx.Unlock(); - } return res; } #endif @@ -197,26 +207,6 @@ SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) { return res; } -uptr SyncVar::GetMemoryConsumption() { - return sizeof(*this) - + clock.size() * sizeof(u64) - + read_clock.size() * sizeof(u64) - + creation_stack.Size() * sizeof(uptr); -} - -uptr SyncTab::GetMemoryConsumption(uptr *nsync) { - uptr mem = 0; - for (int i = 0; i < kPartCount; i++) { - Part *p = &tab_[i]; - Lock l(&p->mtx); - for (SyncVar *s = p->val; s; s = s->next) { - *nsync += 1; - mem += s->GetMemoryConsumption(); - } - } - return mem; -} - int SyncTab::PartIdx(uptr addr) { return (addr >> 3) % kPartCount; } diff --git a/lib/tsan/rtl/tsan_sync.h b/lib/tsan/rtl/tsan_sync.h index 77749e22ffc2..823af543f590 100644 --- a/lib/tsan/rtl/tsan_sync.h +++ b/lib/tsan/rtl/tsan_sync.h @@ -59,7 +59,7 @@ struct SyncVar { const u64 uid; // Globally unique id. SyncClock clock; SyncClock read_clock; // Used for rw mutexes only. - StackTrace creation_stack; + u32 creation_stack_id; int owner_tid; // Set only by exclusive owners. u64 last_lock; int recursion; diff --git a/lib/tsan/rtl/tsan_update_shadow_word_inl.h b/lib/tsan/rtl/tsan_update_shadow_word_inl.h index 2c435556abb2..e7c036c5dea8 100644 --- a/lib/tsan/rtl/tsan_update_shadow_word_inl.h +++ b/lib/tsan/rtl/tsan_update_shadow_word_inl.h @@ -34,7 +34,7 @@ do { if (Shadow::TidsAreEqual(old, cur)) { StatInc(thr, StatShadowSameThread); if (OldIsInSameSynchEpoch(old, thr)) { - if (OldIsRWNotWeaker(old, kAccessIsWrite)) { + if (old.IsRWNotWeaker(kAccessIsWrite, kIsAtomic)) { // found a slot that holds effectively the same info // (that is, same tid, same sync epoch and same size) StatInc(thr, StatMopSame); @@ -43,7 +43,7 @@ do { StoreIfNotYetStored(sp, &store_word); break; } - if (OldIsRWWeakerOrEqual(old, kAccessIsWrite)) + if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) StoreIfNotYetStored(sp, &store_word); break; } @@ -52,25 +52,23 @@ do { StoreIfNotYetStored(sp, &store_word); break; } - if (BothReads(old, kAccessIsWrite)) + if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)) break; goto RACE; } - // Do the memory access intersect? - if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) { + // In Go all memory accesses are 1 byte, so there can be no intersections. + if (kCppMode && Shadow::TwoRangesIntersect(old, cur, kAccessSize)) { StatInc(thr, StatShadowIntersect); if (Shadow::TidsAreEqual(old, cur)) { StatInc(thr, StatShadowSameThread); break; } StatInc(thr, StatShadowAnotherThread); - if (HappensBefore(old, thr)) + if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)) break; - - if (BothReads(old, kAccessIsWrite)) + if (HappensBefore(old, thr)) break; - goto RACE; } // The accesses do not intersect. diff --git a/lib/tsan/rtl/tsan_vector.h b/lib/tsan/rtl/tsan_vector.h index d41063df3de5..fa236b1f1e44 100644 --- a/lib/tsan/rtl/tsan_vector.h +++ b/lib/tsan/rtl/tsan_vector.h @@ -64,6 +64,11 @@ class Vector { return &end_[-1]; } + void PopBack() { + DCHECK_GT(end_, begin_); + end_--; + } + void Resize(uptr size) { uptr old_size = Size(); EnsureSize(size); @@ -105,6 +110,6 @@ class Vector { Vector(const Vector&); void operator=(const Vector&); }; -} +} // namespace __tsan #endif // #ifndef TSAN_VECTOR_H diff --git a/lib/tsan/tests/CMakeLists.txt b/lib/tsan/tests/CMakeLists.txt index 0fcc6b2b1c8f..7cc079f3d27a 100644 --- a/lib/tsan/tests/CMakeLists.txt +++ b/lib/tsan/tests/CMakeLists.txt @@ -12,9 +12,9 @@ function(add_tsan_unittest testname) add_unittest(TsanUnitTests ${testname} ${ARGN}) # Link with TSan runtime. target_link_libraries(${testname} clang_rt.tsan-x86_64) - # Build tests with PIE and debug info. - set_property(TARGET ${testname} APPEND_STRING - PROPERTY COMPILE_FLAGS " -fPIE -g") + # Compile tests with the same flags as TSan runtime. + set_target_compile_flags(${testname} ${TSAN_CFLAGS}) + # Link tests with -pie. set_property(TARGET ${testname} APPEND_STRING PROPERTY LINK_FLAGS " -pie") endif() diff --git a/lib/tsan/tests/rtl/tsan_test_util_linux.cc b/lib/tsan/tests/rtl/tsan_test_util_linux.cc index dce8db90de70..a2601486a2e1 100644 --- a/lib/tsan/tests/rtl/tsan_test_util_linux.cc +++ b/lib/tsan/tests/rtl/tsan_test_util_linux.cc @@ -73,7 +73,7 @@ bool OnReport(const ReportDesc *rep, bool suppressed) { expect_report_reported = true; return true; } -} +} // namespace __tsan static void* allocate_addr(int size, int offset_from_aligned = 0) { static uintptr_t foo; diff --git a/lib/tsan/tests/unit/CMakeLists.txt b/lib/tsan/tests/unit/CMakeLists.txt index 52ebdb826939..b25a56d8d55c 100644 --- a/lib/tsan/tests/unit/CMakeLists.txt +++ b/lib/tsan/tests/unit/CMakeLists.txt @@ -3,7 +3,6 @@ set(TSAN_UNIT_TESTS tsan_flags_test.cc tsan_mman_test.cc tsan_mutex_test.cc - tsan_platform_test.cc tsan_shadow_test.cc tsan_stack_test.cc tsan_suppressions_test.cc diff --git a/lib/tsan/tests/unit/tsan_mman_test.cc b/lib/tsan/tests/unit/tsan_mman_test.cc index 1a9a88f606fc..0961d2b75d11 100644 --- a/lib/tsan/tests/unit/tsan_mman_test.cc +++ b/lib/tsan/tests/unit/tsan_mman_test.cc @@ -10,10 +10,21 @@ // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// +#include <limits> #include "tsan_mman.h" #include "tsan_rtl.h" #include "gtest/gtest.h" +extern "C" { +uptr __tsan_get_current_allocated_bytes(); +uptr __tsan_get_heap_size(); +uptr __tsan_get_free_bytes(); +uptr __tsan_get_unmapped_bytes(); +uptr __tsan_get_estimated_allocated_size(uptr size); +bool __tsan_get_ownership(void *p); +uptr __tsan_get_allocated_size(void *p); +} + namespace __tsan { TEST(Mman, Internal) { @@ -44,10 +55,10 @@ TEST(Mman, User) { EXPECT_NE(p2, p); MBlock *b = user_mblock(thr, p); EXPECT_NE(b, (MBlock*)0); - EXPECT_EQ(b->size, (uptr)10); + EXPECT_EQ(b->Size(), (uptr)10); MBlock *b2 = user_mblock(thr, p2); EXPECT_NE(b2, (MBlock*)0); - EXPECT_EQ(b2->size, (uptr)20); + EXPECT_EQ(b2->Size(), (uptr)20); for (int i = 0; i < 10; i++) { p[i] = 42; EXPECT_EQ(b, user_mblock(thr, p + i)); @@ -106,4 +117,55 @@ TEST(Mman, UserRealloc) { } } +TEST(Mman, UsableSize) { + ScopedInRtl in_rtl; + ThreadState *thr = cur_thread(); + uptr pc = 0; + char *p = (char*)user_alloc(thr, pc, 10); + char *p2 = (char*)user_alloc(thr, pc, 20); + EXPECT_EQ(0U, user_alloc_usable_size(thr, pc, NULL)); + EXPECT_EQ(10U, user_alloc_usable_size(thr, pc, p)); + EXPECT_EQ(20U, user_alloc_usable_size(thr, pc, p2)); + user_free(thr, pc, p); + user_free(thr, pc, p2); +} + +TEST(Mman, Stats) { + ScopedInRtl in_rtl; + ThreadState *thr = cur_thread(); + + uptr alloc0 = __tsan_get_current_allocated_bytes(); + uptr heap0 = __tsan_get_heap_size(); + uptr free0 = __tsan_get_free_bytes(); + uptr unmapped0 = __tsan_get_unmapped_bytes(); + + EXPECT_EQ(__tsan_get_estimated_allocated_size(10), (uptr)10); + EXPECT_EQ(__tsan_get_estimated_allocated_size(20), (uptr)20); + EXPECT_EQ(__tsan_get_estimated_allocated_size(100), (uptr)100); + + char *p = (char*)user_alloc(thr, 0, 10); + EXPECT_EQ(__tsan_get_ownership(p), true); + EXPECT_EQ(__tsan_get_allocated_size(p), (uptr)10); + + EXPECT_EQ(__tsan_get_current_allocated_bytes(), alloc0 + 16); + EXPECT_GE(__tsan_get_heap_size(), heap0); + EXPECT_EQ(__tsan_get_free_bytes(), free0); + EXPECT_EQ(__tsan_get_unmapped_bytes(), unmapped0); + + user_free(thr, 0, p); + + EXPECT_EQ(__tsan_get_current_allocated_bytes(), alloc0); + EXPECT_GE(__tsan_get_heap_size(), heap0); + EXPECT_EQ(__tsan_get_free_bytes(), free0); + EXPECT_EQ(__tsan_get_unmapped_bytes(), unmapped0); +} + +TEST(Mman, CallocOverflow) { + size_t kArraySize = 4096; + volatile size_t kMaxSizeT = std::numeric_limits<size_t>::max(); + volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10; + volatile void *p = calloc(kArraySize, kArraySize2); // Should return 0. + EXPECT_EQ(0L, p); +} + } // namespace __tsan diff --git a/lib/tsan/tests/unit/tsan_mutexset_test.cc b/lib/tsan/tests/unit/tsan_mutexset_test.cc index da1ae2e49e0c..335a7748cc1a 100644 --- a/lib/tsan/tests/unit/tsan_mutexset_test.cc +++ b/lib/tsan/tests/unit/tsan_mutexset_test.cc @@ -115,7 +115,8 @@ TEST(MutexSet, Overflow) { EXPECT_EQ(mset.Size(), MutexSet::kMaxSize); for (uptr i = 0; i < MutexSet::kMaxSize; i++) { if (i == 0) - Expect(mset, i, 63, true, 64, 2); + Expect(mset, i, MutexSet::kMaxSize - 1, + true, MutexSet::kMaxSize, 2); else if (i == MutexSet::kMaxSize - 1) Expect(mset, i, 100, true, 200, 1); else diff --git a/lib/tsan/tests/unit/tsan_platform_test.cc b/lib/tsan/tests/unit/tsan_platform_test.cc deleted file mode 100644 index b43dbb4e4ff3..000000000000 --- a/lib/tsan/tests/unit/tsan_platform_test.cc +++ /dev/null @@ -1,89 +0,0 @@ -//===-- tsan_platform_test.cc ---------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file is a part of ThreadSanitizer (TSan), a race detector. -// -//===----------------------------------------------------------------------===// -#include "sanitizer_common/sanitizer_libc.h" -#include "tsan_platform.h" -#include "tsan_rtl.h" -#include "gtest/gtest.h" - -namespace __tsan { - -static void TestThreadInfo(bool main) { - ScopedInRtl in_rtl; - uptr stk_addr = 0; - uptr stk_size = 0; - uptr tls_addr = 0; - uptr tls_size = 0; - GetThreadStackAndTls(main, &stk_addr, &stk_size, &tls_addr, &tls_size); - // Printf("stk=%zx-%zx(%zu)\n", stk_addr, stk_addr + stk_size, stk_size); - // Printf("tls=%zx-%zx(%zu)\n", tls_addr, tls_addr + tls_size, tls_size); - - int stack_var; - EXPECT_NE(stk_addr, (uptr)0); - EXPECT_NE(stk_size, (uptr)0); - EXPECT_GT((uptr)&stack_var, stk_addr); - EXPECT_LT((uptr)&stack_var, stk_addr + stk_size); - - static __thread int thread_var; - EXPECT_NE(tls_addr, (uptr)0); - EXPECT_NE(tls_size, (uptr)0); - EXPECT_GT((uptr)&thread_var, tls_addr); - EXPECT_LT((uptr)&thread_var, tls_addr + tls_size); - - // Ensure that tls and stack do not intersect. - uptr tls_end = tls_addr + tls_size; - EXPECT_TRUE(tls_addr < stk_addr || tls_addr >= stk_addr + stk_size); - EXPECT_TRUE(tls_end < stk_addr || tls_end >= stk_addr + stk_size); - EXPECT_TRUE((tls_addr < stk_addr) == (tls_end < stk_addr)); -} - -static void *WorkerThread(void *arg) { - TestThreadInfo(false); - return 0; -} - -TEST(Platform, ThreadInfoMain) { - TestThreadInfo(true); -} - -TEST(Platform, ThreadInfoWorker) { - pthread_t t; - pthread_create(&t, 0, WorkerThread, 0); - pthread_join(t, 0); -} - -TEST(Platform, FileOps) { - const char *str1 = "qwerty"; - uptr len1 = internal_strlen(str1); - const char *str2 = "zxcv"; - uptr len2 = internal_strlen(str2); - - fd_t fd = internal_open("./tsan_test.tmp", true); - EXPECT_NE(fd, kInvalidFd); - EXPECT_EQ(len1, internal_write(fd, str1, len1)); - EXPECT_EQ(len2, internal_write(fd, str2, len2)); - internal_close(fd); - - fd = internal_open("./tsan_test.tmp", false); - EXPECT_NE(fd, kInvalidFd); - EXPECT_EQ(len1 + len2, internal_filesize(fd)); - char buf[64] = {}; - EXPECT_EQ(len1, internal_read(fd, buf, len1)); - EXPECT_EQ(0, internal_memcmp(buf, str1, len1)); - EXPECT_EQ((char)0, buf[len1 + 1]); - internal_memset(buf, 0, len1); - EXPECT_EQ(len2, internal_read(fd, buf, len2)); - EXPECT_EQ(0, internal_memcmp(buf, str2, len2)); - internal_close(fd); -} - -} // namespace __tsan diff --git a/lib/tsan/tests/unit/tsan_shadow_test.cc b/lib/tsan/tests/unit/tsan_shadow_test.cc index fa9c982c0f6d..17b17977bf86 100644 --- a/lib/tsan/tests/unit/tsan_shadow_test.cc +++ b/lib/tsan/tests/unit/tsan_shadow_test.cc @@ -25,7 +25,7 @@ TEST(Shadow, FastState) { EXPECT_EQ(s.GetHistorySize(), 0); EXPECT_EQ(s.addr0(), (u64)0); EXPECT_EQ(s.size(), (u64)1); - EXPECT_EQ(s.is_write(), false); + EXPECT_EQ(s.IsWrite(), true); s.IncrementEpoch(); EXPECT_EQ(s.epoch(), (u64)23); diff --git a/lib/tsan/tests/unit/tsan_suppressions_test.cc b/lib/tsan/tests/unit/tsan_suppressions_test.cc index e1e0c12c004c..decfa3214d23 100644 --- a/lib/tsan/tests/unit/tsan_suppressions_test.cc +++ b/lib/tsan/tests/unit/tsan_suppressions_test.cc @@ -20,7 +20,7 @@ namespace __tsan { TEST(Suppressions, Parse) { ScopedInRtl in_rtl; - Suppression *supp0 = SuppressionParse( + Suppression *supp0 = SuppressionParse(0, "race:foo\n" " race:bar\n" // NOLINT "race:baz \n" // NOLINT @@ -45,7 +45,7 @@ TEST(Suppressions, Parse) { TEST(Suppressions, Parse2) { ScopedInRtl in_rtl; - Suppression *supp0 = SuppressionParse( + Suppression *supp0 = SuppressionParse(0, " # first line comment\n" // NOLINT " race:bar \n" // NOLINT "race:baz* *baz\n" @@ -64,7 +64,7 @@ TEST(Suppressions, Parse2) { TEST(Suppressions, Parse3) { ScopedInRtl in_rtl; - Suppression *supp0 = SuppressionParse( + Suppression *supp0 = SuppressionParse(0, "# last suppression w/o line-feed\n" "race:foo\n" "race:bar" @@ -81,7 +81,7 @@ TEST(Suppressions, Parse3) { TEST(Suppressions, ParseType) { ScopedInRtl in_rtl; - Suppression *supp0 = SuppressionParse( + Suppression *supp0 = SuppressionParse(0, "race:foo\n" "thread:bar\n" "mutex:baz\n" diff --git a/lib/ubsan/CMakeLists.txt b/lib/ubsan/CMakeLists.txt index 40d0e897179d..c8470bc6d353 100644 --- a/lib/ubsan/CMakeLists.txt +++ b/lib/ubsan/CMakeLists.txt @@ -3,9 +3,12 @@ set(UBSAN_SOURCES ubsan_diag.cc ubsan_handlers.cc + ubsan_value.cc + ) + +set(UBSAN_CXX_SOURCES ubsan_handlers_cxx.cc ubsan_type_hash.cc - ubsan_value.cc ) include_directories(..) @@ -19,31 +22,30 @@ set(UBSAN_RUNTIME_LIBRARIES) if(APPLE) # Build universal binary on APPLE. - add_library(clang_rt.ubsan_osx STATIC - ${UBSAN_SOURCES} - $<TARGET_OBJECTS:RTSanitizerCommon.osx> - ) - set_target_compile_flags(clang_rt.ubsan_osx ${UBSAN_CFLAGS}) - set_target_properties(clang_rt.ubsan_osx PROPERTIES - OSX_ARCHITECTURES "${UBSAN_SUPPORTED_ARCH}") + add_compiler_rt_osx_static_runtime(clang_rt.ubsan_osx + ARCH ${UBSAN_SUPPORTED_ARCH} + SOURCES ${UBSAN_SOURCES} ${UBSAN_CXX_SOURCES} + $<TARGET_OBJECTS:RTSanitizerCommon.osx> + CFLAGS ${UBSAN_CFLAGS}) list(APPEND UBSAN_RUNTIME_LIBRARIES clang_rt.ubsan_osx) else() # Build separate libraries for each target. foreach(arch ${UBSAN_SUPPORTED_ARCH}) - add_library(clang_rt.ubsan-${arch} STATIC - ${UBSAN_SOURCES} - $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> - ) - set_target_compile_flags(clang_rt.ubsan-${arch} - ${UBSAN_CFLAGS} ${TARGET_${arch}_CFLAGS} - ) - list(APPEND UBSAN_RUNTIME_LIBRARIES clang_rt.ubsan-${arch}) + # Main UBSan runtime. + add_compiler_rt_static_runtime(clang_rt.ubsan-${arch} ${arch} + SOURCES ${UBSAN_SOURCES} + CFLAGS ${UBSAN_CFLAGS} + SYMS ubsan.syms) + # C++-specific parts of UBSan runtime. Requires a C++ ABI library. + add_compiler_rt_static_runtime(clang_rt.ubsan_cxx-${arch} ${arch} + SOURCES ${UBSAN_CXX_SOURCES} + CFLAGS ${UBSAN_CFLAGS} + SYMS ubsan.syms) + list(APPEND UBSAN_RUNTIME_LIBRARIES + clang_rt.san-${arch} + clang_rt.ubsan-${arch} + clang_rt.ubsan_cxx-${arch}) endforeach() endif() - -set_property(TARGET ${UBSAN_RUNTIME_LIBRARIES} APPEND PROPERTY - COMPILE_DEFINITIONS ${UBSAN_COMMON_DEFINITIONS}) -add_clang_compiler_rt_libraries(${UBSAN_RUNTIME_LIBRARIES}) - add_subdirectory(lit_tests) diff --git a/lib/ubsan/Makefile.mk b/lib/ubsan/Makefile.mk index 5702e0e752d8..d5561f41b152 100644 --- a/lib/ubsan/Makefile.mk +++ b/lib/ubsan/Makefile.mk @@ -11,6 +11,8 @@ ModuleName := ubsan SubDirs := Sources := $(foreach file,$(wildcard $(Dir)/*.cc),$(notdir $(file))) +CXXSources := ubsan_type_hash.cc ubsan_handlers_cxx.cc +CSources := $(filter-out $(CXXSources),$(Sources)) ObjNames := $(Sources:%.cc=%.o) Implementation := Generic @@ -20,4 +22,5 @@ Dependencies := $(wildcard $(Dir)/*.h) Dependencies += $(wildcard $(Dir)/../sanitizer_common/*.h) # Define a convenience variable for all the ubsan functions. -UbsanFunctions := $(Sources:%.cc=%) +UbsanFunctions := $(CSources:%.cc=%) +UbsanCXXFunctions := $(CXXSources:%.cc=%) diff --git a/lib/ubsan/lit_tests/CMakeLists.txt b/lib/ubsan/lit_tests/CMakeLists.txt index 565c523ceb49..7e1a13c782d2 100644 --- a/lib/ubsan/lit_tests/CMakeLists.txt +++ b/lib/ubsan/lit_tests/CMakeLists.txt @@ -7,9 +7,8 @@ if(COMPILER_RT_CAN_EXECUTE_TESTS) # Run UBSan output tests only if we're sure that clang would produce # working binaries. set(UBSAN_TEST_DEPS - clang clang-headers FileCheck count not - ${UBSAN_RUNTIME_LIBRARIES} - ) + ${SANITIZER_COMMON_LIT_TEST_DEPS} + ${UBSAN_RUNTIME_LIBRARIES}) set(UBSAN_TEST_PARAMS ubsan_site_config=${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg ) diff --git a/lib/ubsan/lit_tests/Float/cast-overflow.cpp b/lib/ubsan/lit_tests/Float/cast-overflow.cpp index 63410dc87140..8d9120d586a1 100644 --- a/lib/ubsan/lit_tests/Float/cast-overflow.cpp +++ b/lib/ubsan/lit_tests/Float/cast-overflow.cpp @@ -9,7 +9,6 @@ // RUN: %t 6 2>&1 | FileCheck %s --check-prefix=CHECK-6 // FIXME: %t 7 2>&1 | FileCheck %s --check-prefix=CHECK-7 // RUN: %t 8 2>&1 | FileCheck %s --check-prefix=CHECK-8 -// RUN: %t 9 2>&1 | FileCheck %s --check-prefix=CHECK-9 // This test assumes float and double are IEEE-754 single- and double-precision. @@ -36,6 +35,9 @@ int main(int argc, char **argv) { (void)(float)FloatMaxAsUInt128; // ok #endif + float NearlyMinusOne = -0.99999; + unsigned Zero = NearlyMinusOne; // ok + // Build a '+Inf'. char InfVal[] = { 0x00, 0x00, 0x80, 0x7f }; float Inf; @@ -46,6 +48,8 @@ int main(int argc, char **argv) { float NaN; memcpy(&NaN, NaNVal, 4); + double DblInf = (double)Inf; // ok + switch (argv[1][0]) { // FIXME: Produce a source location for these checks and test for it here. @@ -59,8 +63,8 @@ int main(int argc, char **argv) { // CHECK-1: runtime error: value -2.14748{{.*}} is outside the range of representable values of type 'int' return MinFloatRepresentableAsInt - 0x100; case '2': - // CHECK-2: runtime error: value -0.001 is outside the range of representable values of type 'unsigned int' - return (unsigned)-0.001; + // CHECK-2: runtime error: value -1 is outside the range of representable values of type 'unsigned int' + return (unsigned)-1.0; case '3': // CHECK-3: runtime error: value 4.2949{{.*}} is outside the range of representable values of type 'unsigned int' return (unsigned)(MaxFloatRepresentableAsUInt + 0x100); @@ -91,8 +95,5 @@ int main(int argc, char **argv) { case '8': // CHECK-8: runtime error: value 1e+39 is outside the range of representable values of type 'float' return (float)1e39; - case '9': - // CHECK-9: runtime error: value {{.*}} is outside the range of representable values of type 'double' - return (double)Inf; } } diff --git a/lib/ubsan/lit_tests/Misc/bounds.cpp b/lib/ubsan/lit_tests/Misc/bounds.cpp new file mode 100644 index 000000000000..07b30d384df9 --- /dev/null +++ b/lib/ubsan/lit_tests/Misc/bounds.cpp @@ -0,0 +1,15 @@ +// RUN: %clang -fsanitize=bounds %s -O3 -o %T/bounds.exe +// RUN: %T/bounds.exe 0 0 0 +// RUN: %T/bounds.exe 1 2 3 +// RUN: %T/bounds.exe 2 0 0 2>&1 | FileCheck %s --check-prefix=CHECK-A-2 +// RUN: %T/bounds.exe 0 3 0 2>&1 | FileCheck %s --check-prefix=CHECK-B-3 +// RUN: %T/bounds.exe 0 0 4 2>&1 | FileCheck %s --check-prefix=CHECK-C-4 + +int main(int argc, char **argv) { + int arr[2][3][4] = {}; + + return arr[argv[1][0] - '0'][argv[2][0] - '0'][argv[3][0] - '0']; + // CHECK-A-2: bounds.cpp:11:10: runtime error: index 2 out of bounds for type 'int [2][3][4]' + // CHECK-B-3: bounds.cpp:11:10: runtime error: index 3 out of bounds for type 'int [3][4]' + // CHECK-C-4: bounds.cpp:11:10: runtime error: index 4 out of bounds for type 'int [4]' +} diff --git a/lib/ubsan/lit_tests/TypeCheck/vptr.cpp b/lib/ubsan/lit_tests/TypeCheck/vptr.cpp index 574a7bef9622..109e7a824f58 100644 --- a/lib/ubsan/lit_tests/TypeCheck/vptr.cpp +++ b/lib/ubsan/lit_tests/TypeCheck/vptr.cpp @@ -1,11 +1,13 @@ // RUN: %clang -ccc-cxx -fsanitize=vptr %s -O3 -o %t -// RUN: %t rT && %t mT && %t fT -// RUN: %t rU && %t mU && %t fU +// RUN: %t rT && %t mT && %t fT && %t cT +// RUN: %t rU && %t mU && %t fU && %t cU // RUN: %t rS && %t rV && %t oV // RUN: %t mS 2>&1 | FileCheck %s --check-prefix=CHECK-MEMBER --strict-whitespace // RUN: %t fS 2>&1 | FileCheck %s --check-prefix=CHECK-MEMFUN --strict-whitespace +// RUN: %t cS 2>&1 | FileCheck %s --check-prefix=CHECK-DOWNCAST --strict-whitespace // RUN: %t mV 2>&1 | FileCheck %s --check-prefix=CHECK-MEMBER --strict-whitespace // RUN: %t fV 2>&1 | FileCheck %s --check-prefix=CHECK-MEMFUN --strict-whitespace +// RUN: %t cV 2>&1 | FileCheck %s --check-prefix=CHECK-DOWNCAST --strict-whitespace // RUN: %t oU 2>&1 | FileCheck %s --check-prefix=CHECK-OFFSET --strict-whitespace // RUN: %t m0 2>&1 | FileCheck %s --check-prefix=CHECK-NULL-MEMBER --strict-whitespace @@ -102,5 +104,14 @@ int main(int, char **argv) { // CHECK-OFFSET-NEXT: {{^ \^ ( ~~~~~~~~~~~~)~~~~~~~~~~~ *$}} // CHECK-OFFSET-NEXT: {{^ ( )?vptr for}} 'T' base class of [[DYN_TYPE]] return reinterpret_cast<U*>(p)->v() - 2; + + case 'c': + // CHECK-DOWNCAST: vptr.cpp:[[@LINE+5]]:5: runtime error: downcast of address [[PTR:0x[0-9a-f]*]] which does not point to an object of type 'T' + // CHECK-DOWNCAST-NEXT: [[PTR]]: note: object is of type [[DYN_TYPE:'S'|'U']] + // CHECK-DOWNCAST-NEXT: {{^ .. .. .. .. .. .. .. .. .. .. .. .. }} + // CHECK-DOWNCAST-NEXT: {{^ \^~~~~~~~~~~(~~~~~~~~~~~~)? *$}} + // CHECK-DOWNCAST-NEXT: {{^ vptr for}} [[DYN_TYPE]] + static_cast<T*>(reinterpret_cast<S*>(p)); + return 0; } } diff --git a/lib/ubsan/lit_tests/lit.cfg b/lib/ubsan/lit_tests/lit.cfg index 9fd3a1aeaa16..ea6ebdf9001f 100644 --- a/lib/ubsan/lit_tests/lit.cfg +++ b/lib/ubsan/lit_tests/lit.cfg @@ -2,6 +2,14 @@ import os +def get_required_attr(config, attr_name): + attr_value = getattr(config, attr_name, None) + if not attr_value: + lit.fatal("No attribute %r in test configuration! You may need to run " + "tests from your build directory or add this attribute " + "to lit.site.cfg " % attr_name) + return attr_value + # Setup config name. config.name = 'UndefinedBehaviorSanitizer' @@ -30,14 +38,6 @@ if llvm_src_root is None: if not llvm_config: DisplayNoConfigMessage() - # Validate that llvm-config points to the same source tree. - llvm_src_root = lit.util.capture(["llvm-config", "--src-root"]).strip() - ubsan_test_src_root = os.path.join(llvm_src_root, "projects", "compiler-rt", - "lib", "ubsan", "lit_tests") - if (os.path.realpath(ubsan_test_src_root) != - os.path.realpath(config.test_source_root)): - DisplayNoConfigMessage() - # Find out the presumed location of generated site config. llvm_obj_root = lit.util.capture(["llvm-config", "--obj-root"]).strip() ubsan_site_cfg = os.path.join(llvm_obj_root, "projects", "compiler-rt", @@ -49,8 +49,9 @@ if llvm_src_root is None: raise SystemExit # Setup attributes common for all compiler-rt projects. -compiler_rt_lit_cfg = os.path.join(llvm_src_root, "projects", "compiler-rt", - "lib", "lit.common.cfg") +compiler_rt_src_root = get_required_attr(config, 'compiler_rt_src_root') +compiler_rt_lit_cfg = os.path.join(compiler_rt_src_root, "lib", + "lit.common.cfg") if not compiler_rt_lit_cfg or not os.path.exists(compiler_rt_lit_cfg): lit.fatal("Can't find common compiler-rt lit config at: %r" % compiler_rt_lit_cfg) diff --git a/lib/ubsan/lit_tests/lit.site.cfg.in b/lib/ubsan/lit_tests/lit.site.cfg.in index b1c6ccf544ea..07b521af061f 100644 --- a/lib/ubsan/lit_tests/lit.site.cfg.in +++ b/lib/ubsan/lit_tests/lit.site.cfg.in @@ -4,6 +4,7 @@ config.clang = "@LLVM_BINARY_DIR@/bin/clang" config.host_os = "@HOST_OS@" config.llvm_src_root = "@LLVM_SOURCE_DIR@" +config.compiler_rt_src_root = "@COMPILER_RT_SOURCE_DIR@" config.llvm_tools_dir = "@LLVM_TOOLS_DIR@" config.target_triple = "@TARGET_TRIPLE@" diff --git a/lib/ubsan/ubsan.syms b/lib/ubsan/ubsan.syms new file mode 100644 index 000000000000..e74de33f012c --- /dev/null +++ b/lib/ubsan/ubsan.syms @@ -0,0 +1 @@ +{ __ubsan_*; }; diff --git a/lib/ubsan/ubsan_diag.cc b/lib/ubsan/ubsan_diag.cc index 57c98e669e90..3f92761465de 100644 --- a/lib/ubsan/ubsan_diag.cc +++ b/lib/ubsan/ubsan_diag.cc @@ -14,6 +14,7 @@ #include "ubsan_diag.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_symbolizer.h" #include <stdio.h> @@ -30,7 +31,7 @@ Location __ubsan::getCallerLocation(uptr CallerLoc) { if (!SymbolizeCode(Loc, &Info, 1) || !Info.module || !*Info.module) return Location(Loc); - if (!Info.function) + if (!Info.file) return ModuleLocation(Info.module, Info.module_offset); return SourceLocation(Info.file, Info.line, Info.column); @@ -70,7 +71,7 @@ static void renderLocation(Location Loc) { case Location::LK_Source: { SourceLocation SLoc = Loc.getSourceLocation(); if (SLoc.isInvalid()) - RawWrite("<unknown>:"); + Printf("<unknown>:"); else { Printf("%s:%d:", SLoc.getFilename(), SLoc.getLine()); if (SLoc.getColumn()) @@ -86,7 +87,7 @@ static void renderLocation(Location Loc) { Printf("%p:", Loc.getMemoryLocation()); break; case Location::LK_Null: - RawWrite("<unknown>:"); + Printf("<unknown>:"); break; } } @@ -99,7 +100,7 @@ static void renderText(const char *Message, const Diag::Arg *Args) { for (I = 0; Msg[I] && Msg[I] != '%' && I != 63; ++I) Buffer[I] = Msg[I]; Buffer[I] = '\0'; - RawWrite(Buffer); + Printf(Buffer); Msg += I - 1; } else { const Diag::Arg &A = Args[*++Msg - '0']; @@ -108,9 +109,7 @@ static void renderText(const char *Message, const Diag::Arg *Args) { Printf("%s", A.String); break; case Diag::AK_Mangled: { - RawWrite("'"); - RawWrite(Demangle(A.String)); - RawWrite("'"); + Printf("'%s'", Demangle(A.String)); break; } case Diag::AK_SInt: @@ -156,7 +155,8 @@ static Range *upperBound(MemoryLocation Loc, Range *Ranges, } /// Render a snippet of the address space near a location. -static void renderMemorySnippet(MemoryLocation Loc, +static void renderMemorySnippet(const __sanitizer::AnsiColorDecorator &Decor, + MemoryLocation Loc, Range *Ranges, unsigned NumRanges, const Diag::Arg *Args) { const unsigned BytesToShow = 32; @@ -180,9 +180,10 @@ static void renderMemorySnippet(MemoryLocation Loc, unsigned char C = *reinterpret_cast<const unsigned char*>(P); Printf("%s%02x", (P % 8 == 0) ? " " : " ", C); } - RawWrite("\n"); + Printf("\n"); // Emit highlights. + Printf(Decor.Green()); Range *InRange = upperBound(Min, Ranges, NumRanges); for (uptr P = Min; P != Max; ++P) { char Pad = ' ', Byte = ' '; @@ -195,9 +196,9 @@ static void renderMemorySnippet(MemoryLocation Loc, if (InRange && InRange->getStart().getMemoryLocation() <= P) Byte = '~'; char Buffer[] = { Pad, Pad, P == Loc ? '^' : Byte, Byte, 0 }; - RawWrite((P % 8 == 0) ? Buffer : &Buffer[1]); + Printf((P % 8 == 0) ? Buffer : &Buffer[1]); } - RawWrite("\n"); + Printf("%s\n", Decor.Default()); // Go over the line again, and print names for the ranges. InRange = 0; @@ -212,9 +213,9 @@ static void renderMemorySnippet(MemoryLocation Loc, if (InRange && InRange->getStart().getMemoryLocation() == P) { while (Spaces--) - RawWrite(" "); + Printf(" "); renderText(InRange->getText(), Args); - RawWrite("\n"); + Printf("\n"); // FIXME: We only support naming one range for now! break; } @@ -235,37 +236,28 @@ static void renderMemorySnippet(MemoryLocation Loc, } Diag::~Diag() { - bool UseAnsiColor = PrintsToTty(); - if (UseAnsiColor) - RawWrite("\033[1m"); + __sanitizer::AnsiColorDecorator Decor(PrintsToTty()); + SpinMutexLock l(&CommonSanitizerReportMutex); + Printf(Decor.Bold()); renderLocation(Loc); switch (Level) { case DL_Error: - if (UseAnsiColor) - RawWrite("\033[31m"); - RawWrite(" runtime error: "); - if (UseAnsiColor) - RawWrite("\033[0;1m"); + Printf("%s runtime error: %s%s", + Decor.Red(), Decor.Default(), Decor.Bold()); break; case DL_Note: - if (UseAnsiColor) - RawWrite("\033[30m"); - RawWrite(" note: "); - if (UseAnsiColor) - RawWrite("\033[0m"); + Printf("%s note: %s", Decor.Black(), Decor.Default()); break; } renderText(Message, Args); - if (UseAnsiColor) - RawWrite("\033[0m"); - - RawWrite("\n"); + Printf("%s\n", Decor.Default()); if (Loc.isMemoryLocation()) - renderMemorySnippet(Loc.getMemoryLocation(), Ranges, NumRanges, Args); + renderMemorySnippet(Decor, Loc.getMemoryLocation(), Ranges, + NumRanges, Args); } diff --git a/lib/ubsan/ubsan_handlers.cc b/lib/ubsan/ubsan_handlers.cc index 1b02aa0fadf3..fa93b095d0bb 100644 --- a/lib/ubsan/ubsan_handlers.cc +++ b/lib/ubsan/ubsan_handlers.cc @@ -22,7 +22,7 @@ using namespace __ubsan; namespace __ubsan { const char *TypeCheckKinds[] = { "load of", "store to", "reference binding to", "member access within", - "member call on", "constructor call on" + "member call on", "constructor call on", "downcast of", "downcast of" }; } @@ -183,6 +183,22 @@ void __ubsan::__ubsan_handle_shift_out_of_bounds_abort( Die(); } +void __ubsan::__ubsan_handle_out_of_bounds(OutOfBoundsData *Data, + ValueHandle Index) { + SourceLocation Loc = Data->Loc.acquire(); + if (Loc.isDisabled()) + return; + + Value IndexVal(Data->IndexType, Index); + Diag(Loc, DL_Error, "index %0 out of bounds for type %1") + << IndexVal << Data->ArrayType; +} +void __ubsan::__ubsan_handle_out_of_bounds_abort(OutOfBoundsData *Data, + ValueHandle Index) { + __ubsan_handle_out_of_bounds(Data, Index); + Die(); +} + void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) { Diag(Data->Loc, DL_Error, "execution reached a __builtin_unreachable() call"); Die(); diff --git a/lib/ubsan/ubsan_handlers.h b/lib/ubsan/ubsan_handlers.h index d6a042481ffa..5e237e1aa2de 100644 --- a/lib/ubsan/ubsan_handlers.h +++ b/lib/ubsan/ubsan_handlers.h @@ -67,6 +67,15 @@ struct ShiftOutOfBoundsData { RECOVERABLE(shift_out_of_bounds, ShiftOutOfBoundsData *Data, ValueHandle LHS, ValueHandle RHS) +struct OutOfBoundsData { + SourceLocation Loc; + const TypeDescriptor &ArrayType; + const TypeDescriptor &IndexType; +}; + +/// \brief Handle an array index out of bounds error. +RECOVERABLE(out_of_bounds, OutOfBoundsData *Data, ValueHandle Index) + struct UnreachableData { SourceLocation Loc; }; diff --git a/lib/ubsan/ubsan_handlers_cxx.cc b/lib/ubsan/ubsan_handlers_cxx.cc index dcc1f60078d2..b6cddefeb4f4 100644 --- a/lib/ubsan/ubsan_handlers_cxx.cc +++ b/lib/ubsan/ubsan_handlers_cxx.cc @@ -42,7 +42,6 @@ static void HandleDynamicTypeCacheMiss( << TypeCheckKinds[Data->TypeCheckKind] << (void*)Pointer << Data->Type; // If possible, say what type it actually points to. - // FIXME: Demangle the type names. DynamicTypeInfo DTI = getDynamicTypeInfo((void*)Pointer); if (!DTI.isValid()) Diag(Pointer, DL_Note, "object has invalid vptr") diff --git a/lib/ubsan/ubsan_type_hash.cc b/lib/ubsan/ubsan_type_hash.cc index 7a9cd28f6ec0..b27aefc16821 100644 --- a/lib/ubsan/ubsan_type_hash.cc +++ b/lib/ubsan/ubsan_type_hash.cc @@ -116,7 +116,7 @@ __ubsan::__ubsan_vptr_type_cache[__ubsan::VptrTypeCacheSize] = { 1 }; static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived, const abi::__class_type_info *Base, sptr Offset) { - if (Derived == Base) + if (Derived->__type_name == Base->__type_name) return Offset == 0; if (const abi::__si_class_type_info *SI = diff --git a/lib/ubsan/ubsan_value.cc b/lib/ubsan/ubsan_value.cc index f17c58989db9..5d77350d0c40 100644 --- a/lib/ubsan/ubsan_value.cc +++ b/lib/ubsan/ubsan_value.cc @@ -13,6 +13,8 @@ //===----------------------------------------------------------------------===// #include "ubsan_value.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_libc.h" using namespace __ubsan; @@ -66,16 +68,34 @@ UIntMax Value::getPositiveIntValue() const { /// them to be passed in floating-point registers, so this has little cost). FloatMax Value::getFloatValue() const { CHECK(getType().isFloatTy()); - switch (getType().getFloatBitWidth()) { + if (isInlineFloat()) { + switch (getType().getFloatBitWidth()) { #if 0 - // FIXME: OpenCL / NEON 'half' type. LLVM can't lower the conversion - // from this to 'long double'. - case 16: return *reinterpret_cast<__fp16*>(Val); + // FIXME: OpenCL / NEON 'half' type. LLVM can't lower the conversion + // from '__fp16' to 'long double'. + case 16: { + __fp16 Value; + internal_memcpy(&Value, &Val, 4); + return Value; + } #endif - case 32: return *reinterpret_cast<float*>(Val); - case 64: return *reinterpret_cast<double*>(Val); - case 80: return *reinterpret_cast<long double*>(Val); - case 128: return *reinterpret_cast<long double*>(Val); + case 32: { + float Value; + internal_memcpy(&Value, &Val, 4); + return Value; + } + case 64: { + double Value; + internal_memcpy(&Value, &Val, 8); + return Value; + } + } + } else { + switch (getType().getFloatBitWidth()) { + case 64: return *reinterpret_cast<double*>(Val); + case 80: return *reinterpret_cast<long double*>(Val); + case 128: return *reinterpret_cast<long double*>(Val); + } } UNREACHABLE("unexpected floating point bit width"); } diff --git a/lib/ubsan/ubsan_value.h b/lib/ubsan/ubsan_value.h index e673f7af1d83..54ed5ad1931f 100644 --- a/lib/ubsan/ubsan_value.h +++ b/lib/ubsan/ubsan_value.h @@ -108,7 +108,8 @@ public: /// integer otherwise. TK_Integer = 0x0000, /// A floating-point type. Low 16 bits are bit width. The value - /// representation is a pointer to the floating-point value. + /// representation is that of bitcasting the floating-point value to an + /// integer type. TK_Float = 0x0001, /// Any other type. The value representation is unspecified. TK_Unknown = 0xffff @@ -162,6 +163,14 @@ class Value { return Bits <= InlineBits; } + /// Is \c Val a (zero-extended) integer representation of a float? + bool isInlineFloat() const { + CHECK(getType().isFloatTy()); + const unsigned InlineBits = sizeof(ValueHandle) * 8; + const unsigned Bits = getType().getFloatBitWidth(); + return Bits <= InlineBits; + } + public: Value(const TypeDescriptor &Type, ValueHandle Val) : Type(Type), Val(Val) {} diff --git a/make/AppleBI.mk b/make/AppleBI.mk index b5e702b10e66..d3d4771309c4 100644 --- a/make/AppleBI.mk +++ b/make/AppleBI.mk @@ -57,7 +57,14 @@ $(OBJROOT)/libcompiler_rt-%.dylib : $(OBJROOT)/darwin_bni/Release/%/libcompiler_ $(OBJROOT)/version.c -arch $* -dynamiclib \ -install_name /usr/lib/system/libcompiler_rt.dylib \ -compatibility_version 1 -current_version $(RC_ProjectSourceVersion) \ - -nodefaultlibs -lSystem -umbrella System -dead_strip \ + -nodefaultlibs -umbrella System -dead_strip \ + -Wl,-upward-lunwind \ + -Wl,-upward-lsystem_m \ + -Wl,-upward-lsystem_c \ + -Wl,-upward-lsystem_platform \ + -Wl,-ldyld \ + -Wl,-lsystem_kernel \ + -L$(SDKROOT)/usr/lib/system \ $(DYLIB_FLAGS) -Wl,-force_load,$^ -o $@ # Rule to make fat dylib diff --git a/make/platform/clang_darwin.mk b/make/platform/clang_darwin.mk index fe84a0565929..cb61744e5b7e 100644 --- a/make/platform/clang_darwin.mk +++ b/make/platform/clang_darwin.mk @@ -70,9 +70,6 @@ Configs += profile_ios UniversalArchs.profile_ios := $(call CheckArches,i386 x86_64 armv7,profile_ios) # Configurations which define the ASAN support functions. -Configs += asan_osx -UniversalArchs.asan_osx := $(call CheckArches,i386 x86_64,asan_osx) - Configs += asan_osx_dynamic UniversalArchs.asan_osx_dynamic := $(call CheckArches,i386 x86_64,asan_osx_dynamic) @@ -83,7 +80,7 @@ UniversalArchs.ubsan_osx := $(call CheckArches,i386 x86_64,ubsan_osx) # object files. If we are on that platform, strip out all ARM archs. We still # build the libraries themselves so that Clang can find them where it expects # them, even though they might not have an expected slice. -ifneq ($(shell sw_vers -productVersion | grep 10.6),) +ifneq ($(shell test -x /usr/bin/sw_vers && sw_vers -productVersion | grep 10.6),) UniversalArchs.ios := $(filter-out armv7, $(UniversalArchs.ios)) UniversalArchs.cc_kext := $(filter-out armv7, $(UniversalArchs.cc_kext)) UniversalArchs.cc_kext_ios5 := $(filter-out armv7, $(UniversalArchs.cc_kext_ios5)) @@ -116,9 +113,9 @@ CFLAGS := -Wall -Werror -O3 -fomit-frame-pointer # supported deployment target -- nothing in the compiler-rt libraries should # actually depend on the deployment target. OSX_DEPLOYMENT_ARGS := -mmacosx-version-min=10.4 -IOS_DEPLOYMENT_ARGS := -miphoneos-version-min=1.0 -IOS6_DEPLOYMENT_ARGS := -miphoneos-version-min=6.0 -IOSSIM_DEPLOYMENT_ARGS := -miphoneos-version-min=1.0 +IOS_DEPLOYMENT_ARGS := -mios-version-min=1.0 +IOS6_DEPLOYMENT_ARGS := -mios-version-min=6.0 +IOSSIM_DEPLOYMENT_ARGS := -mios-simulator-version-min=1.0 # Use our stub SDK as the sysroot to support more portable building. OSX_DEPLOYMENT_ARGS += -isysroot $(ProjSrcRoot)/SDKs/darwin @@ -129,10 +126,11 @@ IOSSIM_DEPLOYMENT_ARGS += -isysroot $(ProjSrcRoot)/SDKs/darwin CFLAGS.eprintf := $(CFLAGS) $(OSX_DEPLOYMENT_ARGS) CFLAGS.10.4 := $(CFLAGS) $(OSX_DEPLOYMENT_ARGS) # FIXME: We can't build ASAN with our stub SDK yet. -CFLAGS.asan_osx := $(CFLAGS) -mmacosx-version-min=10.5 -fno-builtin CFLAGS.asan_osx_dynamic := \ $(CFLAGS) -mmacosx-version-min=10.5 -fno-builtin \ - -DMAC_INTERPOSE_FUNCTIONS=1 + -gline-tables-only \ + -DMAC_INTERPOSE_FUNCTIONS=1 \ + -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=1 CFLAGS.ubsan_osx := $(CFLAGS) -mmacosx-version-min=10.5 -fno-builtin @@ -165,7 +163,7 @@ CFLAGS.profile_ios.armv7s := $(CFLAGS) $(IOS_DEPLOYMENT_ARGS) # Configure the asan_osx_dynamic library to be built shared. SHARED_LIBRARY.asan_osx_dynamic := 1 -LDFLAGS.asan_osx_dynamic := -framework Foundation -lstdc++ +LDFLAGS.asan_osx_dynamic := -framework Foundation -lstdc++ -undefined dynamic_lookup FUNCTIONS.eprintf := eprintf FUNCTIONS.10.4 := eprintf floatundidf floatundisf floatundixf @@ -182,13 +180,12 @@ FUNCTIONS.osx := mulosi4 mulodi4 muloti4 FUNCTIONS.profile_osx := GCDAProfiling FUNCTIONS.profile_ios := GCDAProfiling -FUNCTIONS.asan_osx := $(AsanFunctions) $(InterceptionFunctions) \ - $(SanitizerCommonFunctions) FUNCTIONS.asan_osx_dynamic := $(AsanFunctions) $(InterceptionFunctions) \ $(SanitizerCommonFunctions) \ $(AsanDynamicFunctions) -FUNCTIONS.ubsan_osx := $(UbsanFunctions) $(SanitizerCommonFunctions) +FUNCTIONS.ubsan_osx := $(UbsanFunctions) $(UbsanCXXFunctions) \ + $(SanitizerCommonFunctions) CCKEXT_COMMON_FUNCTIONS := \ absvdi2 \ diff --git a/make/platform/clang_linux.mk b/make/platform/clang_linux.mk index adfe8917de87..05efdb6d67ea 100644 --- a/make/platform/clang_linux.mk +++ b/make/platform/clang_linux.mk @@ -8,8 +8,8 @@ Configs := # We don't currently have any general purpose way to target architectures other # than the compiler defaults (because there is no generalized way to invoke -# cross compilers). For now, we just find the target archicture of the compiler -# and only define configurations we know that compiler can generate. +# cross compilers). For now, we just find the target architecture of the +# compiler and only define configurations we know that compiler can generate. CompilerTargetTriple := $(shell \ $(CC) -v 2>&1 | grep 'Target:' | cut -d' ' -f2) ifneq ($(DEBUGMAKE),) @@ -51,21 +51,27 @@ endif # Build runtime libraries for i386. ifeq ($(call contains,$(SupportedArches),i386),true) -Configs += full-i386 profile-i386 asan-i386 ubsan-i386 +Configs += full-i386 profile-i386 san-i386 asan-i386 ubsan-i386 ubsan_cxx-i386 Arch.full-i386 := i386 Arch.profile-i386 := i386 +Arch.san-i386 := i386 Arch.asan-i386 := i386 Arch.ubsan-i386 := i386 +Arch.ubsan_cxx-i386 := i386 endif # Build runtime libraries for x86_64. ifeq ($(call contains,$(SupportedArches),x86_64),true) -Configs += full-x86_64 profile-x86_64 asan-x86_64 tsan-x86_64 ubsan-x86_64 +Configs += full-x86_64 profile-x86_64 san-x86_64 asan-x86_64 tsan-x86_64 \ + msan-x86_64 ubsan-x86_64 ubsan_cxx-x86_64 Arch.full-x86_64 := x86_64 Arch.profile-x86_64 := x86_64 +Arch.san-x86_64 := x86_64 Arch.asan-x86_64 := x86_64 Arch.tsan-x86_64 := x86_64 +Arch.msan-x86_64 := x86_64 Arch.ubsan-x86_64 := x86_64 +Arch.ubsan_cxx-x86_64 := x86_64 endif ifneq ($(LLVM_ANDROID_TOOLCHAIN_DIR),) @@ -79,23 +85,31 @@ endif ### CFLAGS := -Wall -Werror -O3 -fomit-frame-pointer +SANITIZER_CFLAGS := -fPIE -fno-builtin -gline-tables-only CFLAGS.full-i386 := $(CFLAGS) -m32 CFLAGS.full-x86_64 := $(CFLAGS) -m64 CFLAGS.profile-i386 := $(CFLAGS) -m32 CFLAGS.profile-x86_64 := $(CFLAGS) -m64 -CFLAGS.asan-i386 := $(CFLAGS) -m32 -fPIE -fno-builtin -CFLAGS.asan-x86_64 := $(CFLAGS) -m64 -fPIE -fno-builtin -CFLAGS.tsan-x86_64 := $(CFLAGS) -m64 -fPIE -fno-builtin -CFLAGS.ubsan-i386 := $(CFLAGS) -m32 -fPIE -fno-builtin -CFLAGS.ubsan-x86_64 := $(CFLAGS) -m64 -fPIE -fno-builtin +CFLAGS.san-i386 := $(CFLAGS) -m32 $(SANITIZER_CFLAGS) -fno-rtti +CFLAGS.san-x86_64 := $(CFLAGS) -m64 $(SANITIZER_CFLAGS) -fno-rtti +CFLAGS.asan-i386 := $(CFLAGS) -m32 $(SANITIZER_CFLAGS) -fno-rtti \ + -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=1 +CFLAGS.asan-x86_64 := $(CFLAGS) -m64 $(SANITIZER_CFLAGS) -fno-rtti \ + -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=1 +CFLAGS.tsan-x86_64 := $(CFLAGS) -m64 $(SANITIZER_CFLAGS) -fno-rtti +CFLAGS.msan-x86_64 := $(CFLAGS) -m64 $(SANITIZER_CFLAGS) -fno-rtti +CFLAGS.ubsan-i386 := $(CFLAGS) -m32 $(SANITIZER_CFLAGS) -fno-rtti +CFLAGS.ubsan-x86_64 := $(CFLAGS) -m64 $(SANITIZER_CFLAGS) -fno-rtti +CFLAGS.ubsan_cxx-i386 := $(CFLAGS) -m32 $(SANITIZER_CFLAGS) +CFLAGS.ubsan_cxx-x86_64 := $(CFLAGS) -m64 $(SANITIZER_CFLAGS) SHARED_LIBRARY.asan-arm-android := 1 ANDROID_COMMON_FLAGS := -target arm-linux-androideabi \ --sysroot=$(LLVM_ANDROID_TOOLCHAIN_DIR)/sysroot \ -B$(LLVM_ANDROID_TOOLCHAIN_DIR) CFLAGS.asan-arm-android := $(CFLAGS) -fPIC -fno-builtin \ - $(ANDROID_COMMON_FLAGS) -mllvm -arm-enable-ehabi + $(ANDROID_COMMON_FLAGS) -mllvm -arm-enable-ehabi -fno-rtti LDFLAGS.asan-arm-android := $(LDFLAGS) $(ANDROID_COMMON_FLAGS) -ldl \ -Wl,-soname=libclang_rt.asan-arm-android.so @@ -111,16 +125,22 @@ FUNCTIONS.full-i386 := $(CommonFunctions) $(ArchFunctions.i386) FUNCTIONS.full-x86_64 := $(CommonFunctions) $(ArchFunctions.x86_64) FUNCTIONS.profile-i386 := GCDAProfiling FUNCTIONS.profile-x86_64 := GCDAProfiling +FUNCTIONS.san-i386 := $(SanitizerCommonFunctions) +FUNCTIONS.san-x86_64 := $(SanitizerCommonFunctions) FUNCTIONS.asan-i386 := $(AsanFunctions) $(InterceptionFunctions) \ $(SanitizerCommonFunctions) FUNCTIONS.asan-x86_64 := $(AsanFunctions) $(InterceptionFunctions) \ - $(SanitizerCommonFunctions) + $(SanitizerCommonFunctions) $(LsanCommonFunctions) FUNCTIONS.asan-arm-android := $(AsanFunctions) $(InterceptionFunctions) \ $(SanitizerCommonFunctions) FUNCTIONS.tsan-x86_64 := $(TsanFunctions) $(InterceptionFunctions) \ $(SanitizerCommonFunctions) -FUNCTIONS.ubsan-i386 := $(UbsanFunctions) $(SanitizerCommonFunctions) -FUNCTIONS.ubsan-x86_64 := $(UbsanFunctions) $(SanitizerCommonFunctions) +FUNCTIONS.msan-x86_64 := $(MsanFunctions) $(InterceptionFunctions) \ + $(SanitizerCommonFunctions) +FUNCTIONS.ubsan-i386 := $(UbsanFunctions) +FUNCTIONS.ubsan-x86_64 := $(UbsanFunctions) +FUNCTIONS.ubsan_cxx-i386 := $(UbsanCXXFunctions) +FUNCTIONS.ubsan_cxx-x86_64 := $(UbsanCXXFunctions) # Always use optimized variants. OPTIMIZED := 1 diff --git a/make/platform/darwin_bni.mk b/make/platform/darwin_bni.mk index d12cfdff7040..afd04313e62d 100644 --- a/make/platform/darwin_bni.mk +++ b/make/platform/darwin_bni.mk @@ -47,7 +47,7 @@ FUNCTIONS := absvdi2 absvsi2 addvdi3 addvsi3 ashldi3 ashrdi3 \ mulodi4 muloti4 mulsc3 mulvdi3 mulvsi3 negdi2 negvdi2 negvsi2 \ paritydi2 paritysi2 popcountdi2 popcountsi2 powidf2 \ powisf2 subvdi3 subvsi3 ucmpdi2 udivdi3 \ - udivmoddi4 umoddi3 apple_versioning eprintf + udivmoddi4 umoddi3 apple_versioning eprintf atomic FUNCTIONS.i386 := $(FUNCTIONS) \ divxc3 fixunsxfdi fixunsxfsi fixxfdi floatdixf \ diff --git a/test/timing/modsi3.c b/test/timing/modsi3.c new file mode 100644 index 000000000000..3275b8324474 --- /dev/null +++ b/test/timing/modsi3.c @@ -0,0 +1,52 @@ +#include "timing.h" +#include <stdio.h> + +#define INPUT_TYPE int32_t +#define INPUT_SIZE 256 +#define FUNCTION_NAME __modsi3 + +#ifndef LIBNAME +#define LIBNAME UNKNOWN +#endif + +#define LIBSTRING LIBSTRINGX(LIBNAME) +#define LIBSTRINGX(a) LIBSTRINGXX(a) +#define LIBSTRINGXX(a) #a + +INPUT_TYPE FUNCTION_NAME(INPUT_TYPE input1, INPUT_TYPE input2); + +int main(int argc, char *argv[]) { + INPUT_TYPE input1[INPUT_SIZE]; + INPUT_TYPE input2[INPUT_SIZE]; + int i, j; + + srand(42); + + // Initialize the input array with data of various sizes. + for (i=0; i<INPUT_SIZE; ++i) { + input1[i] = rand(); + input2[i] = rand() + 1; + } + + int64_t fixedInput = INT64_C(0x1234567890ABCDEF); + + double bestTime = __builtin_inf(); + void *dummyp; + for (j=0; j<1024; ++j) { + + uint64_t startTime = mach_absolute_time(); + for (i=0; i<INPUT_SIZE; ++i) + FUNCTION_NAME(input1[i], input2[i]); + uint64_t endTime = mach_absolute_time(); + + double thisTime = intervalInCycles(startTime, endTime); + bestTime = __builtin_fmin(thisTime, bestTime); + + // Move the stack alignment between trials to eliminate (mostly) aliasing effects + dummyp = alloca(1); + } + + printf("%16s: %f cycles.\n", LIBSTRING, bestTime / (double) INPUT_SIZE); + + return 0; +} |