diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2018-07-28 11:06:48 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2018-07-28 11:06:48 +0000 |
commit | 93c1b73a09a52d4a265f683bf1954b08bb430049 (patch) | |
tree | 5543464d74945196cc890e9d9099e5d0660df7eb /lib | |
parent | 0d8e7490d6e8a13a8f0977d9b7771803b9f64ea0 (diff) | |
download | src-93c1b73a09a52d4a265f683bf1954b08bb430049.tar.gz src-93c1b73a09a52d4a265f683bf1954b08bb430049.zip |
Notes
Diffstat (limited to 'lib')
411 files changed, 30548 insertions, 5156 deletions
diff --git a/lib/asan/.clang-format b/lib/asan/.clang-format index f6cb8ad931f5..560308c91dee 100644 --- a/lib/asan/.clang-format +++ b/lib/asan/.clang-format @@ -1 +1,2 @@ BasedOnStyle: Google +AllowShortIfStatementsOnASingleLine: false diff --git a/lib/asan/CMakeLists.txt b/lib/asan/CMakeLists.txt index fbd72f69298f..2ae5c85ecefb 100644 --- a/lib/asan/CMakeLists.txt +++ b/lib/asan/CMakeLists.txt @@ -23,6 +23,7 @@ set(ASAN_SOURCES asan_posix.cc asan_premap_shadow.cc asan_report.cc + asan_rtems.cc asan_rtl.cc asan_shadow_setup.cc asan_stack.cc @@ -37,6 +38,34 @@ set(ASAN_CXX_SOURCES set(ASAN_PREINIT_SOURCES asan_preinit.cc) +SET(ASAN_HEADERS + asan_activation.h + asan_activation_flags.inc + asan_allocator.h + asan_descriptions.h + asan_errors.h + asan_fake_stack.h + asan_flags.h + asan_flags.inc + asan_init_version.h + asan_interceptors.h + asan_interceptors_memintrinsics.h + asan_interface.inc + asan_interface_internal.h + asan_internal.h + asan_lock.h + asan_malloc_local.h + asan_mapping.h + asan_mapping_myriad.h + asan_poisoning.h + asan_premap_shadow.h + asan_report.h + asan_scariness_score.h + asan_stack.h + asan_stats.h + asan_suppressions.h + asan_thread.h) + include_directories(..) set(ASAN_CFLAGS ${SANITIZER_COMMON_CFLAGS}) @@ -46,20 +75,6 @@ append_rtti_flag(OFF ASAN_CFLAGS) set(ASAN_DYNAMIC_LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS}) -if(ANDROID) -# On Android, -z global does not do what it is documented to do. -# On Android, -z global moves the library ahead in the lookup order, -# placing it right after the LD_PRELOADs. This is used to compensate for the fact -# that Android linker does not look at the dependencies of the main executable -# that aren't dependencies of the current DSO when resolving symbols from said DSO. -# As a net result, this allows running ASan executables without LD_PRELOAD-ing the -# ASan runtime library. -# The above is applicable to L MR1 or newer. - if (COMPILER_RT_HAS_Z_GLOBAL) - list(APPEND ASAN_DYNAMIC_LINK_FLAGS -Wl,-z,global) - endif() -endif() - set(ASAN_DYNAMIC_DEFINITIONS ${ASAN_COMMON_DEFINITIONS} ASAN_DYNAMIC=1) append_list_if(WIN32 INTERCEPTION_DYNAMIC_CRT ASAN_DYNAMIC_DEFINITIONS) @@ -83,21 +98,28 @@ add_compiler_rt_object_libraries(RTAsan_dynamic OS ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${ASAN_SUPPORTED_ARCH} SOURCES ${ASAN_SOURCES} ${ASAN_CXX_SOURCES} + ADDITIONAL_HEADERS ${ASAN_HEADERS} CFLAGS ${ASAN_DYNAMIC_CFLAGS} DEFS ${ASAN_DYNAMIC_DEFINITIONS}) if(NOT APPLE) add_compiler_rt_object_libraries(RTAsan ARCHS ${ASAN_SUPPORTED_ARCH} - SOURCES ${ASAN_SOURCES} CFLAGS ${ASAN_CFLAGS} + SOURCES ${ASAN_SOURCES} + ADDITIONAL_HEADERS ${ASAN_HEADERS} + CFLAGS ${ASAN_CFLAGS} DEFS ${ASAN_COMMON_DEFINITIONS}) add_compiler_rt_object_libraries(RTAsan_cxx ARCHS ${ASAN_SUPPORTED_ARCH} - SOURCES ${ASAN_CXX_SOURCES} CFLAGS ${ASAN_CFLAGS} + SOURCES ${ASAN_CXX_SOURCES} + ADDITIONAL_HEADERS ${ASAN_HEADERS} + CFLAGS ${ASAN_CFLAGS} DEFS ${ASAN_COMMON_DEFINITIONS}) add_compiler_rt_object_libraries(RTAsan_preinit ARCHS ${ASAN_SUPPORTED_ARCH} - SOURCES ${ASAN_PREINIT_SOURCES} CFLAGS ${ASAN_CFLAGS} + SOURCES ${ASAN_PREINIT_SOURCES} + ADDITIONAL_HEADERS ${ASAN_HEADERS} + CFLAGS ${ASAN_CFLAGS} DEFS ${ASAN_COMMON_DEFINITIONS}) file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/dummy.cc "") @@ -125,6 +147,8 @@ if(APPLE) RTInterception RTSanitizerCommon RTSanitizerCommonLibc + RTSanitizerCommonCoverage + RTSanitizerCommonSymbolizer RTLSanCommon RTUbsan CFLAGS ${ASAN_DYNAMIC_CFLAGS} @@ -138,6 +162,8 @@ else() RTInterception RTSanitizerCommon RTSanitizerCommonLibc + RTSanitizerCommonCoverage + RTSanitizerCommonSymbolizer RTLSanCommon RTUbsan) @@ -223,7 +249,7 @@ else() DEFS ${ASAN_DYNAMIC_DEFINITIONS} PARENT_TARGET asan) - if (UNIX AND NOT ${arch} STREQUAL "i386") + if (SANITIZER_USE_SYMBOLS AND NOT ${arch} STREQUAL "i386") add_sanitizer_rt_symbols(clang_rt.asan_cxx ARCHS ${arch}) add_dependencies(asan clang_rt.asan_cxx-${arch}-symbols) diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc index a437ae1cd3b1..c0fad4fa042b 100644 --- a/lib/asan/asan_allocator.cc +++ b/lib/asan/asan_allocator.cc @@ -134,8 +134,9 @@ struct AsanChunk: ChunkBase { }; struct QuarantineCallback { - explicit QuarantineCallback(AllocatorCache *cache) - : cache_(cache) { + QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack) + : cache_(cache), + stack_(stack) { } void Recycle(AsanChunk *m) { @@ -168,7 +169,7 @@ struct QuarantineCallback { void *res = get_allocator().Allocate(cache_, size, 1); // TODO(alekseys): Consider making quarantine OOM-friendly. if (UNLIKELY(!res)) - return DieOnFailure::OnOOM(); + ReportOutOfMemory(size, stack_); return res; } @@ -176,7 +177,9 @@ struct QuarantineCallback { get_allocator().Deallocate(cache_, p); } - AllocatorCache *cache_; + private: + AllocatorCache* const cache_; + BufferedStackTrace* const stack_; }; typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; @@ -397,8 +400,11 @@ struct Allocator { AllocType alloc_type, bool can_fill) { if (UNLIKELY(!asan_inited)) AsanInitFromRtl(); - if (RssLimitExceeded()) - return AsanAllocator::FailureHandler::OnOOM(); + if (RssLimitExceeded()) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportRssLimitExceeded(stack); + } Flags &fl = *flags(); CHECK(stack); const uptr min_alignment = SHADOW_GRANULARITY; @@ -431,9 +437,13 @@ struct Allocator { } CHECK(IsAligned(needed_size, min_alignment)); if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { - Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", - (void*)size); - return AsanAllocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) { + Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", + (void*)size); + return nullptr; + } + ReportAllocationSizeTooBig(size, needed_size, kMaxAllowedMallocSize, + stack); } AsanThread *t = GetCurrentThread(); @@ -446,8 +456,12 @@ struct Allocator { AllocatorCache *cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, needed_size, 8); } - if (!allocated) - return nullptr; + if (UNLIKELY(!allocated)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportOutOfMemory(size, stack); + } if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) { // Heap poisoning is enabled, but the allocator provides an unpoisoned @@ -583,13 +597,13 @@ struct Allocator { if (t) { AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); AllocatorCache *ac = GetAllocatorCache(ms); - quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m, - m->UsedSize()); + quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m, + m->UsedSize()); } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *ac = &fallback_allocator_cache; - quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m, - m->UsedSize()); + quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack), + m, m->UsedSize()); } } @@ -660,8 +674,11 @@ struct Allocator { } void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { - if (CheckForCallocOverflow(size, nmemb)) - return AsanAllocator::FailureHandler::OnBadRequest(); + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportCallocOverflow(nmemb, size, stack); + } void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); // If the memory comes from the secondary allocator no need to clear it // as it comes directly from mmap. @@ -677,9 +694,9 @@ struct Allocator { ReportFreeNotMalloced((uptr)ptr, stack); } - void CommitBack(AsanThreadLocalMallocStorage *ms) { + void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) { AllocatorCache *ac = GetAllocatorCache(ms); - quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac)); + quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack)); allocator.SwallowCache(ac); } @@ -739,17 +756,19 @@ struct Allocator { return AsanChunkView(m1); } - void Purge() { + void Purge(BufferedStackTrace *stack) { AsanThread *t = GetCurrentThread(); if (t) { AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); quarantine.DrainAndRecycle(GetQuarantineCache(ms), - QuarantineCallback(GetAllocatorCache(ms))); + QuarantineCallback(GetAllocatorCache(ms), + stack)); } { SpinMutexLock l(&fallback_mutex); quarantine.DrainAndRecycle(&fallback_quarantine_cache, - QuarantineCallback(&fallback_allocator_cache)); + QuarantineCallback(&fallback_allocator_cache, + stack)); } allocator.ForceReleaseToOS(); @@ -836,7 +855,8 @@ AsanChunkView FindHeapChunkByAllocBeg(uptr addr) { } void AsanThreadLocalMallocStorage::CommitBack() { - instance.CommitBack(this); + GET_STACK_TRACE_MALLOC; + instance.CommitBack(this, &stack); } void PrintInternalAllocatorStats() { @@ -883,7 +903,9 @@ void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { uptr PageSize = GetPageSizeCached(); if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { errno = errno_ENOMEM; - return AsanAllocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportPvallocOverflow(size, stack); } // pvalloc(0) should allocate one page. size = size ? RoundUpTo(size, PageSize) : PageSize; @@ -895,20 +917,35 @@ void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, AllocType alloc_type) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; - return AsanAllocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAllocationAlignment(alignment, stack); } return SetErrnoOnNull( instance.Allocate(size, alignment, stack, alloc_type, true)); } +void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAlignedAllocAlignment(size, alignment, stack); + } + return SetErrnoOnNull( + instance.Allocate(size, alignment, stack, FROM_MALLOC, true)); +} + int asan_posix_memalign(void **memptr, uptr alignment, uptr size, BufferedStackTrace *stack) { if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { - AsanAllocator::FailureHandler::OnBadRequest(); - return errno_EINVAL; + if (AllocatorMayReturnNull()) + return errno_EINVAL; + ReportInvalidPosixMemalignAlignment(alignment, stack); } void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); if (UNLIKELY(!ptr)) + // OOM error is already taken care of by Allocate. return errno_ENOMEM; CHECK(IsAligned((uptr)ptr, alignment)); *memptr = ptr; @@ -1054,7 +1091,8 @@ uptr __sanitizer_get_allocated_size(const void *p) { } void __sanitizer_purge_allocator() { - instance.Purge(); + GET_STACK_TRACE_MALLOC; + instance.Purge(&stack); } #if !SANITIZER_SUPPORTS_WEAK_HOOKS diff --git a/lib/asan/asan_allocator.h b/lib/asan/asan_allocator.h index 26483db4c60e..93d6f29c5bf5 100644 --- a/lib/asan/asan_allocator.h +++ b/lib/asan/asan_allocator.h @@ -125,11 +125,12 @@ const uptr kAllocatorSpace = ~(uptr)0; const uptr kAllocatorSize = 0x40000000000ULL; // 4T. typedef DefaultSizeClassMap SizeClassMap; # elif defined(__powerpc64__) -const uptr kAllocatorSpace = 0xa0000000000ULL; +const uptr kAllocatorSpace = ~(uptr)0; const uptr kAllocatorSize = 0x20000000000ULL; // 2T. typedef DefaultSizeClassMap SizeClassMap; # elif defined(__aarch64__) && SANITIZER_ANDROID -const uptr kAllocatorSpace = 0x3000000000ULL; +// Android needs to support 39, 42 and 48 bit VMA. +const uptr kAllocatorSpace = ~(uptr)0; const uptr kAllocatorSize = 0x2000000000ULL; // 128G. typedef VeryCompactSizeClassMap SizeClassMap; # elif defined(__aarch64__) @@ -207,6 +208,7 @@ void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack); void *asan_valloc(uptr size, BufferedStackTrace *stack); void *asan_pvalloc(uptr size, BufferedStackTrace *stack); +void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack); int asan_posix_memalign(void **memptr, uptr alignment, uptr size, BufferedStackTrace *stack); uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp); diff --git a/lib/asan/asan_debugging.cc b/lib/asan/asan_debugging.cc index 37c5922dc0fc..7c877ded30cf 100644 --- a/lib/asan/asan_debugging.cc +++ b/lib/asan/asan_debugging.cc @@ -27,7 +27,8 @@ using namespace __asan; static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset, char *name, uptr name_size, uptr ®ion_address, uptr ®ion_size) { - InternalMmapVector<StackVarDescr> vars(16); + InternalMmapVector<StackVarDescr> vars; + vars.reserve(16); if (!ParseFrameDescription(frame_descr, &vars)) { return; } diff --git a/lib/asan/asan_descriptions.cc b/lib/asan/asan_descriptions.cc index 86c6af7d9f75..cdb562d975ff 100644 --- a/lib/asan/asan_descriptions.cc +++ b/lib/asan/asan_descriptions.cc @@ -20,23 +20,25 @@ namespace __asan { -// Return " (thread_name) " or an empty string if the name is empty. -const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[], - uptr buff_len) { - const char *name = t->name; - if (name[0] == '\0') return ""; - buff[0] = 0; - internal_strncat(buff, " (", 3); - internal_strncat(buff, name, buff_len - 4); - internal_strncat(buff, ")", 2); - return buff; +AsanThreadIdAndName::AsanThreadIdAndName(AsanThreadContext *t) { + Init(t->tid, t->name); } -const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len) { - if (tid == kInvalidTid) return ""; - asanThreadRegistry().CheckLocked(); - AsanThreadContext *t = GetThreadContextByTidLocked(tid); - return ThreadNameWithParenthesis(t, buff, buff_len); +AsanThreadIdAndName::AsanThreadIdAndName(u32 tid) { + if (tid == kInvalidTid) { + Init(tid, ""); + } else { + asanThreadRegistry().CheckLocked(); + AsanThreadContext *t = GetThreadContextByTidLocked(tid); + Init(tid, t->name); + } +} + +void AsanThreadIdAndName::Init(u32 tid, const char *tname) { + int len = internal_snprintf(name, sizeof(name), "T%d", tid); + CHECK(((unsigned int)len) < sizeof(name)); + if (tname[0] != '\0') + internal_snprintf(&name[len], sizeof(name) - len, " (%s)", tname); } void DescribeThread(AsanThreadContext *context) { @@ -47,18 +49,15 @@ void DescribeThread(AsanThreadContext *context) { return; } context->announced = true; - char tname[128]; InternalScopedString str(1024); - str.append("Thread T%d%s", context->tid, - ThreadNameWithParenthesis(context->tid, tname, sizeof(tname))); + str.append("Thread %s", AsanThreadIdAndName(context).c_str()); if (context->parent_tid == kInvalidTid) { str.append(" created by unknown thread\n"); Printf("%s", str.data()); return; } - str.append( - " created by T%d%s here:\n", context->parent_tid, - ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname))); + str.append(" created by %s here:\n", + AsanThreadIdAndName(context->parent_tid).c_str()); Printf("%s", str.data()); StackDepotGet(context->stack_id).Print(); // Recursively described parent thread if needed. @@ -358,10 +357,9 @@ bool GlobalAddressDescription::PointsInsideTheSameVariable( void StackAddressDescription::Print() const { Decorator d; - char tname[128]; Printf("%s", d.Location()); - Printf("Address %p is located in stack of thread T%d%s", addr, tid, - ThreadNameWithParenthesis(tid, tname, sizeof(tname))); + Printf("Address %p is located in stack of thread %s", addr, + AsanThreadIdAndName(tid).c_str()); if (!frame_descr) { Printf("%s\n", d.Default()); @@ -380,7 +378,8 @@ void StackAddressDescription::Print() const { StackTrace alloca_stack(&frame_pc, 1); alloca_stack.Print(); - InternalMmapVector<StackVarDescr> vars(16); + InternalMmapVector<StackVarDescr> vars; + vars.reserve(16); if (!ParseFrameDescription(frame_descr, &vars)) { Printf( "AddressSanitizer can't parse the stack frame " @@ -402,7 +401,7 @@ void StackAddressDescription::Print() const { } Printf( "HINT: this may be a false positive if your program uses " - "some custom stack unwind mechanism or swapcontext\n"); + "some custom stack unwind mechanism, swapcontext or vfork\n"); if (SANITIZER_WINDOWS) Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n"); else @@ -418,26 +417,19 @@ void HeapAddressDescription::Print() const { AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(alloc_tid); StackTrace alloc_stack = GetStackTraceFromId(alloc_stack_id); - char tname[128]; Decorator d; AsanThreadContext *free_thread = nullptr; if (free_tid != kInvalidTid) { free_thread = GetThreadContextByTidLocked(free_tid); - Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(), - free_thread->tid, - ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)), - d.Default()); + Printf("%sfreed by thread %s here:%s\n", d.Allocation(), + AsanThreadIdAndName(free_thread).c_str(), d.Default()); StackTrace free_stack = GetStackTraceFromId(free_stack_id); free_stack.Print(); - Printf("%spreviously allocated by thread T%d%s here:%s\n", d.Allocation(), - alloc_thread->tid, - ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)), - d.Default()); + Printf("%spreviously allocated by thread %s here:%s\n", d.Allocation(), + AsanThreadIdAndName(alloc_thread).c_str(), d.Default()); } else { - Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(), - alloc_thread->tid, - ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)), - d.Default()); + Printf("%sallocated by thread %s here:%s\n", d.Allocation(), + AsanThreadIdAndName(alloc_thread).c_str(), d.Default()); } alloc_stack.Print(); DescribeThread(GetCurrentThread()); diff --git a/lib/asan/asan_descriptions.h b/lib/asan/asan_descriptions.h index 1a1b01cf20cd..5c2d7662a35a 100644 --- a/lib/asan/asan_descriptions.h +++ b/lib/asan/asan_descriptions.h @@ -26,9 +26,20 @@ void DescribeThread(AsanThreadContext *context); static inline void DescribeThread(AsanThread *t) { if (t) DescribeThread(t->context()); } -const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[], - uptr buff_len); -const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len); + +class AsanThreadIdAndName { + public: + explicit AsanThreadIdAndName(AsanThreadContext *t); + explicit AsanThreadIdAndName(u32 tid); + + // Contains "T%tid (%name)" or "T%tid" if the name is empty. + const char *c_str() const { return &name[0]; } + + private: + void Init(u32 tid, const char *tname); + + char name[128]; +}; class Decorator : public __sanitizer::SanitizerCommonDecorator { public: diff --git a/lib/asan/asan_errors.cc b/lib/asan/asan_errors.cc index 0f4a3abff9a7..33d0613f79f4 100644 --- a/lib/asan/asan_errors.cc +++ b/lib/asan/asan_errors.cc @@ -45,13 +45,11 @@ void ErrorDeadlySignal::Print() { void ErrorDoubleFree::Print() { Decorator d; - Printf("%s", d.Warning()); - char tname[128]; + Printf("%s", d.Error()); Report( - "ERROR: AddressSanitizer: attempting %s on %p in " - "thread T%d%s:\n", - scariness.GetDescription(), addr_description.addr, tid, - ThreadNameWithParenthesis(tid, tname, sizeof(tname))); + "ERROR: AddressSanitizer: attempting %s on %p in thread %s:\n", + scariness.GetDescription(), addr_description.addr, + AsanThreadIdAndName(tid).c_str()); Printf("%s", d.Default()); scariness.Print(); GET_STACK_TRACE_FATAL(second_free_stack->trace[0], @@ -63,13 +61,11 @@ void ErrorDoubleFree::Print() { void ErrorNewDeleteTypeMismatch::Print() { Decorator d; - Printf("%s", d.Warning()); - char tname[128]; + Printf("%s", d.Error()); Report( - "ERROR: AddressSanitizer: %s on %p in thread " - "T%d%s:\n", - scariness.GetDescription(), addr_description.addr, tid, - ThreadNameWithParenthesis(tid, tname, sizeof(tname))); + "ERROR: AddressSanitizer: %s on %p in thread %s:\n", + scariness.GetDescription(), addr_description.addr, + AsanThreadIdAndName(tid).c_str()); Printf("%s object passed to delete has wrong type:\n", d.Default()); if (delete_size != 0) { Printf( @@ -106,13 +102,11 @@ void ErrorNewDeleteTypeMismatch::Print() { void ErrorFreeNotMalloced::Print() { Decorator d; - Printf("%s", d.Warning()); - char tname[128]; + Printf("%s", d.Error()); Report( "ERROR: AddressSanitizer: attempting free on address " - "which was not malloc()-ed: %p in thread T%d%s\n", - addr_description.Address(), tid, - ThreadNameWithParenthesis(tid, tname, sizeof(tname))); + "which was not malloc()-ed: %p in thread %s\n", + addr_description.Address(), AsanThreadIdAndName(tid).c_str()); Printf("%s", d.Default()); CHECK_GT(free_stack->size, 0); scariness.Print(); @@ -129,7 +123,7 @@ void ErrorAllocTypeMismatch::Print() { "operator delete []"}; CHECK_NE(alloc_type, dealloc_type); Decorator d; - Printf("%s", d.Warning()); + Printf("%s", d.Error()); Report("ERROR: AddressSanitizer: %s (%s vs %s) on %p\n", scariness.GetDescription(), alloc_names[alloc_type], dealloc_names[dealloc_type], @@ -148,7 +142,7 @@ void ErrorAllocTypeMismatch::Print() { void ErrorMallocUsableSizeNotOwned::Print() { Decorator d; - Printf("%s", d.Warning()); + Printf("%s", d.Error()); Report( "ERROR: AddressSanitizer: attempting to call malloc_usable_size() for " "pointer which is not owned: %p\n", @@ -161,7 +155,7 @@ void ErrorMallocUsableSizeNotOwned::Print() { void ErrorSanitizerGetAllocatedSizeNotOwned::Print() { Decorator d; - Printf("%s", d.Warning()); + Printf("%s", d.Error()); Report( "ERROR: AddressSanitizer: attempting to call " "__sanitizer_get_allocated_size() for pointer which is not owned: %p\n", @@ -172,11 +166,123 @@ void ErrorSanitizerGetAllocatedSizeNotOwned::Print() { ReportErrorSummary(scariness.GetDescription(), stack); } +void ErrorCallocOverflow::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: calloc parameters overflow: count * size " + "(%zd * %zd) cannot be represented in type size_t (thread %s)\n", + count, size, AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorPvallocOverflow::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: pvalloc parameters overflow: size 0x%zx " + "rounded up to system page size 0x%zx cannot be represented in type " + "size_t (thread %s)\n", + size, GetPageSizeCached(), AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorInvalidAllocationAlignment::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: invalid allocation alignment: %zd, " + "alignment must be a power of two (thread %s)\n", + alignment, AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorInvalidAlignedAllocAlignment::Print() { + Decorator d; + Printf("%s", d.Error()); +#if SANITIZER_POSIX + Report("ERROR: AddressSanitizer: invalid alignment requested in " + "aligned_alloc: %zd, alignment must be a power of two and the " + "requested size 0x%zx must be a multiple of alignment " + "(thread %s)\n", alignment, size, AsanThreadIdAndName(tid).c_str()); +#else + Report("ERROR: AddressSanitizer: invalid alignment requested in " + "aligned_alloc: %zd, the requested size 0x%zx must be a multiple of " + "alignment (thread %s)\n", alignment, size, + AsanThreadIdAndName(tid).c_str()); +#endif + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorInvalidPosixMemalignAlignment::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: invalid alignment requested in posix_memalign: " + "%zd, alignment must be a power of two and a multiple of sizeof(void*) " + "== %zd (thread %s)\n", + alignment, sizeof(void*), AsanThreadIdAndName(tid).c_str()); // NOLINT + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorAllocationSizeTooBig::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: requested allocation size 0x%zx (0x%zx after " + "adjustments for alignment, red zones etc.) exceeds maximum supported " + "size of 0x%zx (thread %s)\n", + user_size, total_size, max_size, AsanThreadIdAndName(tid).c_str()); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorRssLimitExceeded::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: specified RSS limit exceeded, currently set to " + "soft_rss_limit_mb=%zd\n", common_flags()->soft_rss_limit_mb); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + +void ErrorOutOfMemory::Print() { + Decorator d; + Printf("%s", d.Error()); + Report( + "ERROR: AddressSanitizer: allocator is out of memory trying to allocate " + "0x%zx bytes\n", requested_size); + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(scariness.GetDescription(), stack); +} + void ErrorStringFunctionMemoryRangesOverlap::Print() { Decorator d; char bug_type[100]; internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function); - Printf("%s", d.Warning()); + Printf("%s", d.Error()); Report( "ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) " "overlap\n", @@ -193,7 +299,7 @@ void ErrorStringFunctionMemoryRangesOverlap::Print() { void ErrorStringFunctionSizeOverflow::Print() { Decorator d; - Printf("%s", d.Warning()); + Printf("%s", d.Error()); Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", scariness.GetDescription(), size); Printf("%s", d.Default()); @@ -221,7 +327,7 @@ void ErrorBadParamsToAnnotateContiguousContainer::Print() { void ErrorODRViolation::Print() { Decorator d; - Printf("%s", d.Warning()); + Printf("%s", d.Error()); Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(), global1.beg); Printf("%s", d.Default()); @@ -250,7 +356,7 @@ void ErrorODRViolation::Print() { void ErrorInvalidPointerPair::Print() { Decorator d; - Printf("%s", d.Warning()); + Printf("%s", d.Error()); Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(), addr1_description.Address(), addr2_description.Address()); Printf("%s", d.Default()); @@ -414,6 +520,7 @@ static void PrintLegend(InternalScopedString *str) { PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic); PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic); PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic); + PrintShadowByte(str, " Shadow gap: ", kAsanShadowGap); } static void PrintShadowBytes(InternalScopedString *str, const char *before, @@ -453,17 +560,15 @@ static void PrintShadowMemoryForAddress(uptr addr) { void ErrorGeneric::Print() { Decorator d; - Printf("%s", d.Warning()); + Printf("%s", d.Error()); uptr addr = addr_description.Address(); Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n", bug_descr, (void *)addr, pc, bp, sp); Printf("%s", d.Default()); - char tname[128]; - Printf("%s%s of size %zu at %p thread T%d%s%s\n", d.Access(), + Printf("%s%s of size %zu at %p thread %s%s\n", d.Access(), access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size, - (void *)addr, tid, - ThreadNameWithParenthesis(tid, tname, sizeof(tname)), d.Default()); + (void *)addr, AsanThreadIdAndName(tid).c_str(), d.Default()); scariness.Print(); GET_STACK_TRACE_FATAL(pc, bp); diff --git a/lib/asan/asan_errors.h b/lib/asan/asan_errors.h index 518ba0c6945e..574197ebff86 100644 --- a/lib/asan/asan_errors.h +++ b/lib/asan/asan_errors.h @@ -20,20 +20,30 @@ namespace __asan { +// (*) VS2013 does not implement unrestricted unions, so we need a trivial +// default constructor explicitly defined for each particular error. + +// None of the error classes own the stack traces mentioned in them. + struct ErrorBase { - ErrorBase() = default; - explicit ErrorBase(u32 tid_) : tid(tid_) {} ScarinessScoreBase scariness; u32 tid; + + ErrorBase() = default; // (*) + explicit ErrorBase(u32 tid_) : tid(tid_) {} + ErrorBase(u32 tid_, int initial_score, const char *reason) : tid(tid_) { + scariness.Clear(); + scariness.Scare(initial_score, reason); + } }; struct ErrorDeadlySignal : ErrorBase { SignalContext signal; - // VS2013 doesn't implement unrestricted unions, so we need a trivial default - // constructor - ErrorDeadlySignal() = default; + + ErrorDeadlySignal() = default; // (*) ErrorDeadlySignal(u32 tid, const SignalContext &sig) - : ErrorBase(tid), signal(sig) { + : ErrorBase(tid), + signal(sig) { scariness.Clear(); if (signal.IsStackOverflow()) { scariness.Scare(10, "stack-overflow"); @@ -55,125 +65,206 @@ struct ErrorDeadlySignal : ErrorBase { }; struct ErrorDoubleFree : ErrorBase { - // ErrorDoubleFree doesn't own the stack trace. const BufferedStackTrace *second_free_stack; HeapAddressDescription addr_description; - // VS2013 doesn't implement unrestricted unions, so we need a trivial default - // constructor - ErrorDoubleFree() = default; + + ErrorDoubleFree() = default; // (*) ErrorDoubleFree(u32 tid, BufferedStackTrace *stack, uptr addr) - : ErrorBase(tid), second_free_stack(stack) { + : ErrorBase(tid, 42, "double-free"), + second_free_stack(stack) { CHECK_GT(second_free_stack->size, 0); GetHeapAddressInformation(addr, 1, &addr_description); - scariness.Clear(); - scariness.Scare(42, "double-free"); } void Print(); }; struct ErrorNewDeleteTypeMismatch : ErrorBase { - // ErrorNewDeleteTypeMismatch doesn't own the stack trace. const BufferedStackTrace *free_stack; HeapAddressDescription addr_description; uptr delete_size; uptr delete_alignment; - // VS2013 doesn't implement unrestricted unions, so we need a trivial default - // constructor - ErrorNewDeleteTypeMismatch() = default; + + ErrorNewDeleteTypeMismatch() = default; // (*) ErrorNewDeleteTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr, uptr delete_size_, uptr delete_alignment_) - : ErrorBase(tid), free_stack(stack), delete_size(delete_size_), + : ErrorBase(tid, 10, "new-delete-type-mismatch"), + free_stack(stack), + delete_size(delete_size_), delete_alignment(delete_alignment_) { GetHeapAddressInformation(addr, 1, &addr_description); - scariness.Clear(); - scariness.Scare(10, "new-delete-type-mismatch"); } void Print(); }; struct ErrorFreeNotMalloced : ErrorBase { - // ErrorFreeNotMalloced doesn't own the stack trace. const BufferedStackTrace *free_stack; AddressDescription addr_description; - // VS2013 doesn't implement unrestricted unions, so we need a trivial default - // constructor - ErrorFreeNotMalloced() = default; + + ErrorFreeNotMalloced() = default; // (*) ErrorFreeNotMalloced(u32 tid, BufferedStackTrace *stack, uptr addr) - : ErrorBase(tid), + : ErrorBase(tid, 40, "bad-free"), free_stack(stack), - addr_description(addr, /*shouldLockThreadRegistry=*/false) { - scariness.Clear(); - scariness.Scare(40, "bad-free"); - } + addr_description(addr, /*shouldLockThreadRegistry=*/false) {} void Print(); }; struct ErrorAllocTypeMismatch : ErrorBase { - // ErrorAllocTypeMismatch doesn't own the stack trace. const BufferedStackTrace *dealloc_stack; HeapAddressDescription addr_description; AllocType alloc_type, dealloc_type; - // VS2013 doesn't implement unrestricted unions, so we need a trivial default - // constructor - ErrorAllocTypeMismatch() = default; + + ErrorAllocTypeMismatch() = default; // (*) ErrorAllocTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr, AllocType alloc_type_, AllocType dealloc_type_) - : ErrorBase(tid), + : ErrorBase(tid, 10, "alloc-dealloc-mismatch"), dealloc_stack(stack), alloc_type(alloc_type_), dealloc_type(dealloc_type_) { GetHeapAddressInformation(addr, 1, &addr_description); - scariness.Clear(); - scariness.Scare(10, "alloc-dealloc-mismatch"); }; void Print(); }; struct ErrorMallocUsableSizeNotOwned : ErrorBase { - // ErrorMallocUsableSizeNotOwned doesn't own the stack trace. const BufferedStackTrace *stack; AddressDescription addr_description; - // VS2013 doesn't implement unrestricted unions, so we need a trivial default - // constructor - ErrorMallocUsableSizeNotOwned() = default; + + ErrorMallocUsableSizeNotOwned() = default; // (*) ErrorMallocUsableSizeNotOwned(u32 tid, BufferedStackTrace *stack_, uptr addr) - : ErrorBase(tid), + : ErrorBase(tid, 10, "bad-malloc_usable_size"), stack(stack_), - addr_description(addr, /*shouldLockThreadRegistry=*/false) { - scariness.Clear(); - scariness.Scare(10, "bad-malloc_usable_size"); - } + addr_description(addr, /*shouldLockThreadRegistry=*/false) {} void Print(); }; struct ErrorSanitizerGetAllocatedSizeNotOwned : ErrorBase { - // ErrorSanitizerGetAllocatedSizeNotOwned doesn't own the stack trace. const BufferedStackTrace *stack; AddressDescription addr_description; - // VS2013 doesn't implement unrestricted unions, so we need a trivial default - // constructor - ErrorSanitizerGetAllocatedSizeNotOwned() = default; + + ErrorSanitizerGetAllocatedSizeNotOwned() = default; // (*) ErrorSanitizerGetAllocatedSizeNotOwned(u32 tid, BufferedStackTrace *stack_, uptr addr) - : ErrorBase(tid), + : ErrorBase(tid, 10, "bad-__sanitizer_get_allocated_size"), stack(stack_), - addr_description(addr, /*shouldLockThreadRegistry=*/false) { - scariness.Clear(); - scariness.Scare(10, "bad-__sanitizer_get_allocated_size"); - } + addr_description(addr, /*shouldLockThreadRegistry=*/false) {} + void Print(); +}; + +struct ErrorCallocOverflow : ErrorBase { + const BufferedStackTrace *stack; + uptr count; + uptr size; + + ErrorCallocOverflow() = default; // (*) + ErrorCallocOverflow(u32 tid, BufferedStackTrace *stack_, uptr count_, + uptr size_) + : ErrorBase(tid, 10, "calloc-overflow"), + stack(stack_), + count(count_), + size(size_) {} + void Print(); +}; + +struct ErrorPvallocOverflow : ErrorBase { + const BufferedStackTrace *stack; + uptr size; + + ErrorPvallocOverflow() = default; // (*) + ErrorPvallocOverflow(u32 tid, BufferedStackTrace *stack_, uptr size_) + : ErrorBase(tid, 10, "pvalloc-overflow"), + stack(stack_), + size(size_) {} + void Print(); +}; + +struct ErrorInvalidAllocationAlignment : ErrorBase { + const BufferedStackTrace *stack; + uptr alignment; + + ErrorInvalidAllocationAlignment() = default; // (*) + ErrorInvalidAllocationAlignment(u32 tid, BufferedStackTrace *stack_, + uptr alignment_) + : ErrorBase(tid, 10, "invalid-allocation-alignment"), + stack(stack_), + alignment(alignment_) {} + void Print(); +}; + +struct ErrorInvalidAlignedAllocAlignment : ErrorBase { + const BufferedStackTrace *stack; + uptr size; + uptr alignment; + + ErrorInvalidAlignedAllocAlignment() = default; // (*) + ErrorInvalidAlignedAllocAlignment(u32 tid, BufferedStackTrace *stack_, + uptr size_, uptr alignment_) + : ErrorBase(tid, 10, "invalid-aligned-alloc-alignment"), + stack(stack_), + size(size_), + alignment(alignment_) {} + void Print(); +}; + +struct ErrorInvalidPosixMemalignAlignment : ErrorBase { + const BufferedStackTrace *stack; + uptr alignment; + + ErrorInvalidPosixMemalignAlignment() = default; // (*) + ErrorInvalidPosixMemalignAlignment(u32 tid, BufferedStackTrace *stack_, + uptr alignment_) + : ErrorBase(tid, 10, "invalid-posix-memalign-alignment"), + stack(stack_), + alignment(alignment_) {} + void Print(); +}; + +struct ErrorAllocationSizeTooBig : ErrorBase { + const BufferedStackTrace *stack; + uptr user_size; + uptr total_size; + uptr max_size; + + ErrorAllocationSizeTooBig() = default; // (*) + ErrorAllocationSizeTooBig(u32 tid, BufferedStackTrace *stack_, + uptr user_size_, uptr total_size_, uptr max_size_) + : ErrorBase(tid, 10, "allocation-size-too-big"), + stack(stack_), + user_size(user_size_), + total_size(total_size_), + max_size(max_size_) {} + void Print(); +}; + +struct ErrorRssLimitExceeded : ErrorBase { + const BufferedStackTrace *stack; + + ErrorRssLimitExceeded() = default; // (*) + ErrorRssLimitExceeded(u32 tid, BufferedStackTrace *stack_) + : ErrorBase(tid, 10, "rss-limit-exceeded"), + stack(stack_) {} + void Print(); +}; + +struct ErrorOutOfMemory : ErrorBase { + const BufferedStackTrace *stack; + uptr requested_size; + + ErrorOutOfMemory() = default; // (*) + ErrorOutOfMemory(u32 tid, BufferedStackTrace *stack_, uptr requested_size_) + : ErrorBase(tid, 10, "out-of-memory"), + stack(stack_), + requested_size(requested_size_) {} void Print(); }; struct ErrorStringFunctionMemoryRangesOverlap : ErrorBase { - // ErrorStringFunctionMemoryRangesOverlap doesn't own the stack trace. const BufferedStackTrace *stack; uptr length1, length2; AddressDescription addr1_description; AddressDescription addr2_description; const char *function; - // VS2013 doesn't implement unrestricted unions, so we need a trivial default - // constructor - ErrorStringFunctionMemoryRangesOverlap() = default; + + ErrorStringFunctionMemoryRangesOverlap() = default; // (*) ErrorStringFunctionMemoryRangesOverlap(u32 tid, BufferedStackTrace *stack_, uptr addr1, uptr length1_, uptr addr2, uptr length2_, const char *function_) @@ -193,65 +284,51 @@ struct ErrorStringFunctionMemoryRangesOverlap : ErrorBase { }; struct ErrorStringFunctionSizeOverflow : ErrorBase { - // ErrorStringFunctionSizeOverflow doesn't own the stack trace. const BufferedStackTrace *stack; AddressDescription addr_description; uptr size; - // VS2013 doesn't implement unrestricted unions, so we need a trivial default - // constructor - ErrorStringFunctionSizeOverflow() = default; + + ErrorStringFunctionSizeOverflow() = default; // (*) ErrorStringFunctionSizeOverflow(u32 tid, BufferedStackTrace *stack_, uptr addr, uptr size_) - : ErrorBase(tid), + : ErrorBase(tid, 10, "negative-size-param"), stack(stack_), addr_description(addr, /*shouldLockThreadRegistry=*/false), - size(size_) { - scariness.Clear(); - scariness.Scare(10, "negative-size-param"); - } + size(size_) {} void Print(); }; struct ErrorBadParamsToAnnotateContiguousContainer : ErrorBase { - // ErrorBadParamsToAnnotateContiguousContainer doesn't own the stack trace. const BufferedStackTrace *stack; uptr beg, end, old_mid, new_mid; - // VS2013 doesn't implement unrestricted unions, so we need a trivial default - // constructor - ErrorBadParamsToAnnotateContiguousContainer() = default; + + ErrorBadParamsToAnnotateContiguousContainer() = default; // (*) // PS4: Do we want an AddressDescription for beg? ErrorBadParamsToAnnotateContiguousContainer(u32 tid, BufferedStackTrace *stack_, uptr beg_, uptr end_, uptr old_mid_, uptr new_mid_) - : ErrorBase(tid), + : ErrorBase(tid, 10, "bad-__sanitizer_annotate_contiguous_container"), stack(stack_), beg(beg_), end(end_), old_mid(old_mid_), - new_mid(new_mid_) { - scariness.Clear(); - scariness.Scare(10, "bad-__sanitizer_annotate_contiguous_container"); - } + new_mid(new_mid_) {} void Print(); }; struct ErrorODRViolation : ErrorBase { __asan_global global1, global2; u32 stack_id1, stack_id2; - // VS2013 doesn't implement unrestricted unions, so we need a trivial default - // constructor - ErrorODRViolation() = default; + + ErrorODRViolation() = default; // (*) ErrorODRViolation(u32 tid, const __asan_global *g1, u32 stack_id1_, const __asan_global *g2, u32 stack_id2_) - : ErrorBase(tid), + : ErrorBase(tid, 10, "odr-violation"), global1(*g1), global2(*g2), stack_id1(stack_id1_), - stack_id2(stack_id2_) { - scariness.Clear(); - scariness.Scare(10, "odr-violation"); - } + stack_id2(stack_id2_) {} void Print(); }; @@ -259,20 +336,16 @@ struct ErrorInvalidPointerPair : ErrorBase { uptr pc, bp, sp; AddressDescription addr1_description; AddressDescription addr2_description; - // VS2013 doesn't implement unrestricted unions, so we need a trivial default - // constructor - ErrorInvalidPointerPair() = default; + + ErrorInvalidPointerPair() = default; // (*) ErrorInvalidPointerPair(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr p1, uptr p2) - : ErrorBase(tid), + : ErrorBase(tid, 10, "invalid-pointer-pair"), pc(pc_), bp(bp_), sp(sp_), addr1_description(p1, 1, /*shouldLockThreadRegistry=*/false), - addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) { - scariness.Clear(); - scariness.Scare(10, "invalid-pointer-pair"); - } + addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) {} void Print(); }; @@ -283,9 +356,8 @@ struct ErrorGeneric : ErrorBase { const char *bug_descr; bool is_write; u8 shadow_val; - // VS2013 doesn't implement unrestricted unions, so we need a trivial default - // constructor - ErrorGeneric() = default; + + ErrorGeneric() = default; // (*) ErrorGeneric(u32 tid, uptr addr, uptr pc_, uptr bp_, uptr sp_, bool is_write_, uptr access_size_); void Print(); @@ -300,6 +372,14 @@ struct ErrorGeneric : ErrorBase { macro(AllocTypeMismatch) \ macro(MallocUsableSizeNotOwned) \ macro(SanitizerGetAllocatedSizeNotOwned) \ + macro(CallocOverflow) \ + macro(PvallocOverflow) \ + macro(InvalidAllocationAlignment) \ + macro(InvalidAlignedAllocAlignment) \ + macro(InvalidPosixMemalignAlignment) \ + macro(AllocationSizeTooBig) \ + macro(RssLimitExceeded) \ + macro(OutOfMemory) \ macro(StringFunctionMemoryRangesOverlap) \ macro(StringFunctionSizeOverflow) \ macro(BadParamsToAnnotateContiguousContainer) \ @@ -334,6 +414,7 @@ struct ErrorDescription { }; ErrorDescription() { internal_memset(this, 0, sizeof(*this)); } + explicit ErrorDescription(LinkerInitialized) {} ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_CONSTRUCTOR) bool IsValid() { return kind != kErrorKindInvalid; } diff --git a/lib/asan/asan_flags.cc b/lib/asan/asan_flags.cc index 562168e6d428..5682ab4eb476 100644 --- a/lib/asan/asan_flags.cc +++ b/lib/asan/asan_flags.cc @@ -33,10 +33,7 @@ static const char *MaybeCallAsanDefaultOptions() { static const char *MaybeUseAsanDefaultOptionsCompileDefinition() { #ifdef ASAN_DEFAULT_OPTIONS -// Stringize the macro value. -# define ASAN_STRINGIZE(x) #x -# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options) - return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS); + return SANITIZER_STRINGIFY(ASAN_DEFAULT_OPTIONS); #else return ""; #endif @@ -163,6 +160,10 @@ void InitializeFlags() { CHECK_LE(f->max_redzone, 2048); CHECK(IsPowerOfTwo(f->redzone)); CHECK(IsPowerOfTwo(f->max_redzone)); + if (SANITIZER_RTEMS) { + CHECK(!f->unmap_shadow_on_exit); + CHECK(!f->protect_shadow_gap); + } // quarantine_size is deprecated but we still honor it. // quarantine_size can not be used together with quarantine_size_mb. diff --git a/lib/asan/asan_flags.inc b/lib/asan/asan_flags.inc index 00071d39f041..4af94c56fca0 100644 --- a/lib/asan/asan_flags.inc +++ b/lib/asan/asan_flags.inc @@ -88,7 +88,8 @@ ASAN_FLAG(bool, check_malloc_usable_size, true, "295.*.") ASAN_FLAG(bool, unmap_shadow_on_exit, false, "If set, explicitly unmaps the (huge) shadow at exit.") -ASAN_FLAG(bool, protect_shadow_gap, true, "If set, mprotect the shadow gap") +ASAN_FLAG(bool, protect_shadow_gap, !SANITIZER_RTEMS, + "If set, mprotect the shadow gap") ASAN_FLAG(bool, print_stats, false, "Print various statistics after printing an error message or if " "atexit=1.") @@ -136,9 +137,9 @@ ASAN_FLAG( "Android. ") ASAN_FLAG( int, detect_invalid_pointer_pairs, 0, - "If non-zero, try to detect operations like <, <=, >, >= and - on " - "invalid pointer pairs (e.g. when pointers belong to different objects). " - "The bigger the value the harder we try.") + "If >= 2, detect operations like <, <=, >, >= and - on invalid pointer " + "pairs (e.g. when pointers belong to different objects); " + "If == 1, detect invalid operations only when both pointers are non-null.") ASAN_FLAG( bool, detect_container_overflow, true, "If true, honor the container overflow annotations. See " diff --git a/lib/asan/asan_globals.cc b/lib/asan/asan_globals.cc index 0db65d010349..898f7f40d31b 100644 --- a/lib/asan/asan_globals.cc +++ b/lib/asan/asan_globals.cc @@ -224,8 +224,9 @@ static void RegisterGlobal(const Global *g) { list_of_all_globals = l; if (g->has_dynamic_init) { if (!dynamic_init_globals) { - dynamic_init_globals = new(allocator_for_globals) - VectorOfGlobals(kDynamicInitGlobalsInitialCapacity); + dynamic_init_globals = + new (allocator_for_globals) VectorOfGlobals; // NOLINT + dynamic_init_globals->reserve(kDynamicInitGlobalsInitialCapacity); } DynInitGlobal dyn_global = { *g, false }; dynamic_init_globals->push_back(dyn_global); @@ -358,9 +359,11 @@ void __asan_register_globals(__asan_global *globals, uptr n) { GET_STACK_TRACE_MALLOC; u32 stack_id = StackDepotPut(stack); BlockingMutexLock lock(&mu_for_globals); - if (!global_registration_site_vector) + if (!global_registration_site_vector) { global_registration_site_vector = - new(allocator_for_globals) GlobalRegistrationSiteVector(128); + new (allocator_for_globals) GlobalRegistrationSiteVector; // NOLINT + global_registration_site_vector->reserve(128); + } GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]}; global_registration_site_vector->push_back(site); if (flags()->report_globals >= 2) { diff --git a/lib/asan/asan_globals_win.cc b/lib/asan/asan_globals_win.cc index 261762b63e2c..29ab5ebf16d4 100644 --- a/lib/asan/asan_globals_win.cc +++ b/lib/asan/asan_globals_win.cc @@ -19,9 +19,9 @@ namespace __asan { #pragma section(".ASAN$GA", read, write) // NOLINT #pragma section(".ASAN$GZ", read, write) // NOLINT extern "C" __declspec(allocate(".ASAN$GA")) -__asan_global __asan_globals_start = {}; + ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_start = {}; extern "C" __declspec(allocate(".ASAN$GZ")) -__asan_global __asan_globals_end = {}; + ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_end = {}; #pragma comment(linker, "/merge:.ASAN=.data") static void call_on_globals(void (*hook)(__asan_global *, uptr)) { diff --git a/lib/asan/asan_interceptors.cc b/lib/asan/asan_interceptors.cc index cb7dcb3b1ca8..aac2bb8a6bbf 100644 --- a/lib/asan/asan_interceptors.cc +++ b/lib/asan/asan_interceptors.cc @@ -24,15 +24,20 @@ #include "lsan/lsan_common.h" #include "sanitizer_common/sanitizer_libc.h" -// There is no general interception at all on Fuchsia. +// There is no general interception at all on Fuchsia and RTEMS. // Only the functions in asan_interceptors_memintrinsics.cc are // really defined to replace libc functions. -#if !SANITIZER_FUCHSIA +#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS #if SANITIZER_POSIX #include "sanitizer_common/sanitizer_posix.h" #endif +#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION || \ + ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION +#include <unwind.h> +#endif + #if defined(__i386) && SANITIZER_LINUX #define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1" #elif defined(__mips__) && SANITIZER_LINUX @@ -178,6 +183,7 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) (void)(s); \ } while (false) #include "sanitizer_common/sanitizer_common_syscalls.inc" +#include "sanitizer_common/sanitizer_syscalls_netbsd.inc" struct ThreadStartParam { atomic_uintptr_t t; @@ -269,7 +275,15 @@ INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp, uptr stack, ssize; ReadContextStack(ucp, &stack, &ssize); ClearShadowMemoryForContextStack(stack, ssize); +#if __has_attribute(__indirect_return__) && \ + (defined(__x86_64__) || defined(__i386__)) + int (*real_swapcontext)(struct ucontext_t *, struct ucontext_t *) + __attribute__((__indirect_return__)) + = REAL(swapcontext); + int res = real_swapcontext(oucp, ucp); +#else int res = REAL(swapcontext)(oucp, ucp); +#endif // swapcontext technically does not return, but program may swap context to // "oucp" later, that would look as if swapcontext() returned 0. // We need to clear shadow for ucp once again, as it may be in arbitrary @@ -318,6 +332,32 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) { } #endif +#if ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION +INTERCEPTOR(void, __cxa_rethrow_primary_exception, void *a) { + CHECK(REAL(__cxa_rethrow_primary_exception)); + __asan_handle_no_return(); + REAL(__cxa_rethrow_primary_exception)(a); +} +#endif + +#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION +INTERCEPTOR(_Unwind_Reason_Code, _Unwind_RaiseException, + _Unwind_Exception *object) { + CHECK(REAL(_Unwind_RaiseException)); + __asan_handle_no_return(); + return REAL(_Unwind_RaiseException)(object); +} +#endif + +#if ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION +INTERCEPTOR(_Unwind_Reason_Code, _Unwind_SjLj_RaiseException, + _Unwind_Exception *object) { + CHECK(REAL(_Unwind_SjLj_RaiseException)); + __asan_handle_no_return(); + return REAL(_Unwind_SjLj_RaiseException)(object); +} +#endif + #if ASAN_INTERCEPT_INDEX # if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX INTERCEPTOR(char*, index, const char *string, int c) @@ -540,14 +580,6 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg, } #endif // ASAN_INTERCEPT___CXA_ATEXIT -#if ASAN_INTERCEPT_FORK -INTERCEPTOR(int, fork, void) { - ENSURE_ASAN_INITED(); - int pid = REAL(fork)(); - return pid; -} -#endif // ASAN_INTERCEPT_FORK - // ---------------------- InitializeAsanInterceptors ---------------- {{{1 namespace __asan { void InitializeAsanInterceptors() { @@ -598,6 +630,17 @@ void InitializeAsanInterceptors() { #if ASAN_INTERCEPT___CXA_THROW ASAN_INTERCEPT_FUNC(__cxa_throw); #endif +#if ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION + ASAN_INTERCEPT_FUNC(__cxa_rethrow_primary_exception); +#endif + // Indirectly intercept std::rethrow_exception. +#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION + INTERCEPT_FUNCTION(_Unwind_RaiseException); +#endif + // Indirectly intercept std::rethrow_exception. +#if ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION + INTERCEPT_FUNCTION(_Unwind_SjLj_RaiseException); +#endif // Intercept threading-related functions #if ASAN_INTERCEPT_PTHREAD_CREATE @@ -614,10 +657,6 @@ void InitializeAsanInterceptors() { ASAN_INTERCEPT_FUNC(__cxa_atexit); #endif -#if ASAN_INTERCEPT_FORK - ASAN_INTERCEPT_FUNC(fork); -#endif - InitializePlatformInterceptors(); VReport(1, "AddressSanitizer: libc interceptors initialized\n"); diff --git a/lib/asan/asan_interceptors.h b/lib/asan/asan_interceptors.h index e13bdecfdede..50895b167990 100644 --- a/lib/asan/asan_interceptors.h +++ b/lib/asan/asan_interceptors.h @@ -34,10 +34,10 @@ void InitializePlatformInterceptors(); } // namespace __asan -// There is no general interception at all on Fuchsia. +// There is no general interception at all on Fuchsia and RTEMS. // Only the functions in asan_interceptors_memintrinsics.h are // really defined to replace libc functions. -#if !SANITIZER_FUCHSIA +#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS // Use macro to describe if specific function should be // intercepted on a given platform. @@ -46,13 +46,11 @@ void InitializePlatformInterceptors(); # define ASAN_INTERCEPT__LONGJMP 1 # define ASAN_INTERCEPT_INDEX 1 # define ASAN_INTERCEPT_PTHREAD_CREATE 1 -# define ASAN_INTERCEPT_FORK 1 #else # define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0 # define ASAN_INTERCEPT__LONGJMP 0 # define ASAN_INTERCEPT_INDEX 0 # define ASAN_INTERCEPT_PTHREAD_CREATE 0 -# define ASAN_INTERCEPT_FORK 0 #endif #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ @@ -80,13 +78,20 @@ void InitializePlatformInterceptors(); # define ASAN_INTERCEPT___LONGJMP_CHK 0 #endif -// Android bug: https://code.google.com/p/android/issues/detail?id=61799 -#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && \ - !(SANITIZER_ANDROID && defined(__i386)) && \ - !SANITIZER_SOLARIS +#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \ + !SANITIZER_NETBSD # define ASAN_INTERCEPT___CXA_THROW 1 +# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1 +# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__)) +# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1 +# else +# define ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION 1 +# endif #else # define ASAN_INTERCEPT___CXA_THROW 0 +# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0 +# define ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION 0 +# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 0 #endif #if !SANITIZER_WINDOWS diff --git a/lib/asan/asan_interceptors_memintrinsics.cc b/lib/asan/asan_interceptors_memintrinsics.cc index c89cb011492e..39e32cdad12e 100644 --- a/lib/asan/asan_interceptors_memintrinsics.cc +++ b/lib/asan/asan_interceptors_memintrinsics.cc @@ -31,14 +31,14 @@ void *__asan_memmove(void *to, const void *from, uptr size) { ASAN_MEMMOVE_IMPL(nullptr, to, from, size); } -#if SANITIZER_FUCHSIA +#if SANITIZER_FUCHSIA || SANITIZER_RTEMS -// Fuchsia doesn't use sanitizer_common_interceptors.inc, but the only -// things there it wants are these three. Just define them as aliases -// here rather than repeating the contents. +// Fuchsia and RTEMS don't use sanitizer_common_interceptors.inc, but +// the only things there it wants are these three. Just define them +// as aliases here rather than repeating the contents. -decltype(memcpy) memcpy[[gnu::alias("__asan_memcpy")]]; -decltype(memmove) memmove[[gnu::alias("__asan_memmove")]]; -decltype(memset) memset[[gnu::alias("__asan_memset")]]; +extern "C" decltype(__asan_memcpy) memcpy[[gnu::alias("__asan_memcpy")]]; +extern "C" decltype(__asan_memmove) memmove[[gnu::alias("__asan_memmove")]]; +extern "C" decltype(__asan_memset) memset[[gnu::alias("__asan_memset")]]; -#endif // SANITIZER_FUCHSIA +#endif // SANITIZER_FUCHSIA || SANITIZER_RTEMS diff --git a/lib/asan/asan_interceptors_memintrinsics.h b/lib/asan/asan_interceptors_memintrinsics.h index 5a8339a23e97..a071e8f684a0 100644 --- a/lib/asan/asan_interceptors_memintrinsics.h +++ b/lib/asan/asan_interceptors_memintrinsics.h @@ -133,15 +133,22 @@ static inline bool RangesOverlap(const char *offset1, uptr length1, const char *offset2, uptr length2) { return !((offset1 + length1 <= offset2) || (offset2 + length2 <= offset1)); } -#define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) do { \ - const char *offset1 = (const char*)_offset1; \ - const char *offset2 = (const char*)_offset2; \ - if (RangesOverlap(offset1, length1, offset2, length2)) { \ - GET_STACK_TRACE_FATAL_HERE; \ - ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \ - offset2, length2, &stack); \ - } \ -} while (0) +#define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) \ + do { \ + const char *offset1 = (const char *)_offset1; \ + const char *offset2 = (const char *)_offset2; \ + if (RangesOverlap(offset1, length1, offset2, length2)) { \ + GET_STACK_TRACE_FATAL_HERE; \ + bool suppressed = IsInterceptorSuppressed(name); \ + if (!suppressed && HaveStackTraceBasedSuppressions()) { \ + suppressed = IsStackTraceSuppressed(&stack); \ + } \ + if (!suppressed) { \ + ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \ + offset2, length2, &stack); \ + } \ + } \ + } while (0) } // namespace __asan diff --git a/lib/asan/asan_internal.h b/lib/asan/asan_internal.h index 19133e5291a9..654878cd15f0 100644 --- a/lib/asan/asan_internal.h +++ b/lib/asan/asan_internal.h @@ -36,7 +36,7 @@ // If set, values like allocator chunk size, as well as defaults for some flags // will be changed towards less memory overhead. #ifndef ASAN_LOW_MEMORY -# if SANITIZER_IOS || SANITIZER_ANDROID +# if SANITIZER_IOS || SANITIZER_ANDROID || SANITIZER_RTEMS # define ASAN_LOW_MEMORY 1 # else # define ASAN_LOW_MEMORY 0 @@ -78,7 +78,7 @@ void InitializeShadowMemory(); // asan_malloc_linux.cc / asan_malloc_mac.cc void ReplaceSystemMalloc(); -// asan_linux.cc / asan_mac.cc / asan_win.cc +// asan_linux.cc / asan_mac.cc / asan_rtems.cc / asan_win.cc uptr FindDynamicShadowStart(); void *AsanDoesNotSupportStaticLinkage(); void AsanCheckDynamicRTPrereqs(); @@ -147,6 +147,9 @@ const int kAsanArrayCookieMagic = 0xac; const int kAsanIntraObjectRedzone = 0xbb; const int kAsanAllocaLeftMagic = 0xca; const int kAsanAllocaRightMagic = 0xcb; +// Used to populate the shadow gap for systems without memory +// protection there (i.e. Myriad). +const int kAsanShadowGap = 0xcc; static const uptr kCurrentStackFrameMagic = 0x41B58AB3; static const uptr kRetiredStackFrameMagic = 0x45E0360E; diff --git a/lib/asan/asan_linux.cc b/lib/asan/asan_linux.cc index 047e1dbb72fa..625f32d408df 100644 --- a/lib/asan/asan_linux.cc +++ b/lib/asan/asan_linux.cc @@ -32,6 +32,7 @@ #include <sys/types.h> #include <dlfcn.h> #include <fcntl.h> +#include <limits.h> #include <pthread.h> #include <stdio.h> #include <unistd.h> @@ -214,7 +215,7 @@ void AsanCheckIncompatibleRT() { // the functions in dynamic ASan runtime instead of the functions in // system libraries, causing crashes later in ASan initialization. MemoryMappingLayout proc_maps(/*cache_enabled*/true); - char filename[128]; + char filename[PATH_MAX]; MemoryMappedSegment segment(filename, sizeof(filename)); while (proc_maps.Next(&segment)) { if (IsDynamicRTName(segment.filename)) { diff --git a/lib/asan/asan_mac.cc b/lib/asan/asan_mac.cc index b7af1a58664c..17a0ec57701d 100644 --- a/lib/asan/asan_mac.cc +++ b/lib/asan/asan_mac.cc @@ -62,16 +62,36 @@ uptr FindDynamicShadowStart() { uptr space_size = kHighShadowEnd + left_padding; uptr largest_gap_found = 0; - uptr shadow_start = FindAvailableMemoryRange(space_size, alignment, - granularity, &largest_gap_found); + uptr max_occupied_addr = 0; + VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size); + uptr shadow_start = + FindAvailableMemoryRange(space_size, alignment, granularity, + &largest_gap_found, &max_occupied_addr); // If the shadow doesn't fit, restrict the address space to make it fit. if (shadow_start == 0) { + VReport( + 2, + "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n", + largest_gap_found, max_occupied_addr); uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment); + if (new_max_vm < max_occupied_addr) { + Report("Unable to find a memory range for dynamic shadow.\n"); + Report( + "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, " + "new_max_vm = %p\n", + space_size, largest_gap_found, max_occupied_addr, new_max_vm); + CHECK(0 && "cannot place shadow"); + } RestrictMemoryToMaxAddress(new_max_vm); kHighMemEnd = new_max_vm - 1; space_size = kHighShadowEnd + left_padding; - shadow_start = - FindAvailableMemoryRange(space_size, alignment, granularity, nullptr); + VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size); + shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity, + nullptr, nullptr); + if (shadow_start == 0) { + Report("Unable to find a memory range after restricting VM.\n"); + CHECK(0 && "cannot place shadow after restricting vm"); + } } CHECK_NE((uptr)0, shadow_start); CHECK(IsAligned(shadow_start, alignment)); diff --git a/lib/asan/asan_malloc_linux.cc b/lib/asan/asan_malloc_linux.cc index 6697ff8764ed..76bdff999d62 100644 --- a/lib/asan/asan_malloc_linux.cc +++ b/lib/asan/asan_malloc_linux.cc @@ -16,19 +16,23 @@ #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \ - SANITIZER_NETBSD || SANITIZER_SOLARIS + SANITIZER_NETBSD || SANITIZER_RTEMS || SANITIZER_SOLARIS +#include "sanitizer_common/sanitizer_allocator_checks.h" +#include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" #include "asan_allocator.h" #include "asan_interceptors.h" #include "asan_internal.h" +#include "asan_malloc_local.h" #include "asan_stack.h" // ---------------------- Replacement functions ---------------- {{{1 using namespace __asan; // NOLINT static uptr allocated_for_dlsym; -static const uptr kDlsymAllocPoolSize = 1024; +static uptr last_dlsym_alloc_size_in_words; +static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024; static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize]; static INLINE bool IsInDlsymAllocPool(const void *ptr) { @@ -39,21 +43,73 @@ static INLINE bool IsInDlsymAllocPool(const void *ptr) { static void *AllocateFromLocalPool(uptr size_in_bytes) { uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize; void *mem = (void*)&alloc_memory_for_dlsym[allocated_for_dlsym]; + last_dlsym_alloc_size_in_words = size_in_words; allocated_for_dlsym += size_in_words; CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize); return mem; } +static void DeallocateFromLocalPool(const void *ptr) { + // Hack: since glibc 2.27 dlsym no longer uses stack-allocated memory to store + // error messages and instead uses malloc followed by free. To avoid pool + // exhaustion due to long object filenames, handle that special case here. + uptr prev_offset = allocated_for_dlsym - last_dlsym_alloc_size_in_words; + void *prev_mem = (void*)&alloc_memory_for_dlsym[prev_offset]; + if (prev_mem == ptr) { + REAL(memset)(prev_mem, 0, last_dlsym_alloc_size_in_words * kWordSize); + allocated_for_dlsym = prev_offset; + last_dlsym_alloc_size_in_words = 0; + } +} + +static int PosixMemalignFromLocalPool(void **memptr, uptr alignment, + uptr size_in_bytes) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) + return errno_EINVAL; + + CHECK(alignment >= kWordSize); + + uptr addr = (uptr)&alloc_memory_for_dlsym[allocated_for_dlsym]; + uptr aligned_addr = RoundUpTo(addr, alignment); + uptr aligned_size = RoundUpTo(size_in_bytes, kWordSize); + + uptr *end_mem = (uptr*)(aligned_addr + aligned_size); + uptr allocated = end_mem - alloc_memory_for_dlsym; + if (allocated >= kDlsymAllocPoolSize) + return errno_ENOMEM; + + allocated_for_dlsym = allocated; + *memptr = (void*)aligned_addr; + return 0; +} + +#if SANITIZER_RTEMS +void* MemalignFromLocalPool(uptr alignment, uptr size) { + void *ptr = nullptr; + alignment = Max(alignment, kWordSize); + PosixMemalignFromLocalPool(&ptr, alignment, size); + return ptr; +} + +bool IsFromLocalPool(const void *ptr) { + return IsInDlsymAllocPool(ptr); +} +#endif + static INLINE bool MaybeInDlsym() { // Fuchsia doesn't use dlsym-based interceptors. return !SANITIZER_FUCHSIA && asan_init_is_running; } +static INLINE bool UseLocalPool() { + return EarlyMalloc() || MaybeInDlsym(); +} + static void *ReallocFromLocalPool(void *ptr, uptr size) { const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym; const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset); void *new_ptr; - if (UNLIKELY(MaybeInDlsym())) { + if (UNLIKELY(UseLocalPool())) { new_ptr = AllocateFromLocalPool(size); } else { ENSURE_ASAN_INITED(); @@ -66,8 +122,10 @@ static void *ReallocFromLocalPool(void *ptr, uptr size) { INTERCEPTOR(void, free, void *ptr) { GET_STACK_TRACE_FREE; - if (UNLIKELY(IsInDlsymAllocPool(ptr))) + if (UNLIKELY(IsInDlsymAllocPool(ptr))) { + DeallocateFromLocalPool(ptr); return; + } asan_free(ptr, &stack, FROM_MALLOC); } @@ -81,7 +139,7 @@ INTERCEPTOR(void, cfree, void *ptr) { #endif // SANITIZER_INTERCEPT_CFREE INTERCEPTOR(void*, malloc, uptr size) { - if (UNLIKELY(MaybeInDlsym())) + if (UNLIKELY(UseLocalPool())) // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym. return AllocateFromLocalPool(size); ENSURE_ASAN_INITED(); @@ -90,7 +148,7 @@ INTERCEPTOR(void*, malloc, uptr size) { } INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { - if (UNLIKELY(MaybeInDlsym())) + if (UNLIKELY(UseLocalPool())) // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. return AllocateFromLocalPool(nmemb * size); ENSURE_ASAN_INITED(); @@ -101,7 +159,7 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { INTERCEPTOR(void*, realloc, void *ptr, uptr size) { if (UNLIKELY(IsInDlsymAllocPool(ptr))) return ReallocFromLocalPool(ptr, size); - if (UNLIKELY(MaybeInDlsym())) + if (UNLIKELY(UseLocalPool())) return AllocateFromLocalPool(size); ENSURE_ASAN_INITED(); GET_STACK_TRACE_MALLOC; @@ -122,10 +180,12 @@ INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) { } #endif // SANITIZER_INTERCEPT_MEMALIGN +#if SANITIZER_INTERCEPT_ALIGNED_ALLOC INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) { GET_STACK_TRACE_MALLOC; - return asan_memalign(boundary, size, &stack, FROM_MALLOC); + return asan_aligned_alloc(boundary, size, &stack); } +#endif // SANITIZER_INTERCEPT_ALIGNED_ALLOC INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { GET_CURRENT_PC_BP_SP; @@ -154,8 +214,9 @@ INTERCEPTOR(int, mallopt, int cmd, int value) { #endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { + if (UNLIKELY(UseLocalPool())) + return PosixMemalignFromLocalPool(memptr, alignment, size); GET_STACK_TRACE_MALLOC; - // Printf("posix_memalign: %zx %zu\n", alignment, size); return asan_posix_memalign(memptr, alignment, size, &stack); } diff --git a/lib/asan/asan_malloc_local.h b/lib/asan/asan_malloc_local.h new file mode 100644 index 000000000000..0e8de207d98d --- /dev/null +++ b/lib/asan/asan_malloc_local.h @@ -0,0 +1,44 @@ +//===-- asan_malloc_local.h -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Provide interfaces to check for and handle local pool memory allocation. +//===----------------------------------------------------------------------===// + +#ifndef ASAN_MALLOC_LOCAL_H +#define ASAN_MALLOC_LOCAL_H + +#include "sanitizer_common/sanitizer_platform.h" +#include "asan_internal.h" + +// On RTEMS, we use the local pool to handle memory allocation when the ASan +// run-time is not up. +static INLINE bool EarlyMalloc() { + return SANITIZER_RTEMS && (!__asan::asan_inited || + __asan::asan_init_is_running); +} + +void* MemalignFromLocalPool(uptr alignment, uptr size); + +#if SANITIZER_RTEMS + +bool IsFromLocalPool(const void *ptr); + +#define ALLOCATE_FROM_LOCAL_POOL UNLIKELY(EarlyMalloc()) +#define IS_FROM_LOCAL_POOL(ptr) UNLIKELY(IsFromLocalPool(ptr)) + +#else // SANITIZER_RTEMS + +#define ALLOCATE_FROM_LOCAL_POOL 0 +#define IS_FROM_LOCAL_POOL(ptr) 0 + +#endif // SANITIZER_RTEMS + +#endif // ASAN_MALLOC_LOCAL_H diff --git a/lib/asan/asan_malloc_mac.cc b/lib/asan/asan_malloc_mac.cc index 744728d40df5..733ba2d86e13 100644 --- a/lib/asan/asan_malloc_mac.cc +++ b/lib/asan/asan_malloc_mac.cc @@ -38,6 +38,9 @@ using namespace __asan; #define COMMON_MALLOC_CALLOC(count, size) \ GET_STACK_TRACE_MALLOC; \ void *p = asan_calloc(count, size, &stack); +#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \ + GET_STACK_TRACE_MALLOC; \ + int res = asan_posix_memalign(memptr, alignment, size, &stack); #define COMMON_MALLOC_VALLOC(size) \ GET_STACK_TRACE_MALLOC; \ void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); diff --git a/lib/asan/asan_mapping.h b/lib/asan/asan_mapping.h index d3f360ffc969..3f8cc004b95f 100644 --- a/lib/asan/asan_mapping.h +++ b/lib/asan/asan_mapping.h @@ -122,6 +122,13 @@ // || `[0x400000000000, 0x47ffffffffff]` || LowShadow || // || `[0x000000000000, 0x3fffffffffff]` || LowMem || // +// Shadow mapping on NerBSD/i386 with SHADOW_OFFSET == 0x40000000: +// || `[0x60000000, 0xfffff000]` || HighMem || +// || `[0x4c000000, 0x5fffffff]` || HighShadow || +// || `[0x48000000, 0x4bffffff]` || ShadowGap || +// || `[0x40000000, 0x47ffffff]` || LowShadow || +// || `[0x00000000, 0x3fffffff]` || LowMem || +// // Default Windows/i386 mapping: // (the exact location of HighShadow/HighMem may vary depending // on WoW64, /LARGEADDRESSAWARE, etc). @@ -130,11 +137,17 @@ // || `[0x36000000, 0x39ffffff]` || ShadowGap || // || `[0x30000000, 0x35ffffff]` || LowShadow || // || `[0x00000000, 0x2fffffff]` || LowMem || +// +// Shadow mapping on Myriad2 (for shadow scale 5): +// || `[0x9ff80000, 0x9fffffff]` || ShadowGap || +// || `[0x9f000000, 0x9ff7ffff]` || LowShadow || +// || `[0x80000000, 0x9effffff]` || LowMem || +// || `[0x00000000, 0x7fffffff]` || Ignored || #if defined(ASAN_SHADOW_SCALE) static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE; #else -static const u64 kDefaultShadowScale = 3; +static const u64 kDefaultShadowScale = SANITIZER_MYRIAD2 ? 5 : 3; #endif static const u64 kDefaultShadowSentinel = ~(uptr)0; static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000 @@ -152,9 +165,19 @@ static const u64 kPPC64_ShadowOffset64 = 1ULL << 44; static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52; static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 +static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 +static const u64 kMyriadMemoryOffset32 = 0x80000000ULL; +static const u64 kMyriadMemorySize32 = 0x20000000ULL; +static const u64 kMyriadMemoryEnd32 = + kMyriadMemoryOffset32 + kMyriadMemorySize32 - 1; +static const u64 kMyriadShadowOffset32 = + (kMyriadMemoryOffset32 + kMyriadMemorySize32 - + (kMyriadMemorySize32 >> kDefaultShadowScale)); +static const u64 kMyriadCacheBitMask32 = 0x40000000ULL; + #define SHADOW_SCALE kDefaultShadowScale #if SANITIZER_FUCHSIA @@ -166,6 +189,8 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 # define SHADOW_OFFSET kMIPS32_ShadowOffset32 # elif SANITIZER_FREEBSD # define SHADOW_OFFSET kFreeBSD_ShadowOffset32 +# elif SANITIZER_NETBSD +# define SHADOW_OFFSET kNetBSD_ShadowOffset32 # elif SANITIZER_WINDOWS # define SHADOW_OFFSET kWindowsShadowOffset32 # elif SANITIZER_IOS @@ -174,6 +199,8 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 # else # define SHADOW_OFFSET kIosShadowOffset32 # endif +# elif SANITIZER_MYRIAD2 +# define SHADOW_OFFSET kMyriadShadowOffset32 # else # define SHADOW_OFFSET kDefaultShadowOffset32 # endif @@ -212,6 +239,39 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 #endif #define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE) + +#define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below. + +#if DO_ASAN_MAPPING_PROFILE +# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++; +#else +# define PROFILE_ASAN_MAPPING() +#endif + +// If 1, all shadow boundaries are constants. +// Don't set to 1 other than for testing. +#define ASAN_FIXED_MAPPING 0 + +namespace __asan { + +extern uptr AsanMappingProfile[]; + +#if ASAN_FIXED_MAPPING +// Fixed mapping for 64-bit Linux. Mostly used for performance comparison +// with non-fixed mapping. As of r175253 (Feb 2013) the performance +// difference between fixed and non-fixed mapping is below the noise level. +static uptr kHighMemEnd = 0x7fffffffffffULL; +static uptr kMidMemBeg = 0x3000000000ULL; +static uptr kMidMemEnd = 0x4fffffffffULL; +#else +extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init. +#endif + +} // namespace __asan + +#if SANITIZER_MYRIAD2 +#include "asan_mapping_myriad.h" +#else #define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET)) #define kLowMemBeg 0 @@ -243,36 +303,11 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 #define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0) #define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0) -#define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below. - -#if DO_ASAN_MAPPING_PROFILE -# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++; -#else -# define PROFILE_ASAN_MAPPING() -#endif - -// If 1, all shadow boundaries are constants. -// Don't set to 1 other than for testing. -#define ASAN_FIXED_MAPPING 0 - namespace __asan { -extern uptr AsanMappingProfile[]; - -#if ASAN_FIXED_MAPPING -// Fixed mapping for 64-bit Linux. Mostly used for performance comparison -// with non-fixed mapping. As of r175253 (Feb 2013) the performance -// difference between fixed and non-fixed mapping is below the noise level. -static uptr kHighMemEnd = 0x7fffffffffffULL; -static uptr kMidMemBeg = 0x3000000000ULL; -static uptr kMidMemEnd = 0x4fffffffffULL; -#else -extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init. -#endif - static inline bool AddrIsInLowMem(uptr a) { PROFILE_ASAN_MAPPING(); - return a < kLowMemEnd; + return a <= kLowMemEnd; } static inline bool AddrIsInLowShadow(uptr a) { @@ -280,14 +315,24 @@ static inline bool AddrIsInLowShadow(uptr a) { return a >= kLowShadowBeg && a <= kLowShadowEnd; } +static inline bool AddrIsInMidMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd; +} + +static inline bool AddrIsInMidShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return kMidMemBeg && a >= kMidShadowBeg && a <= kMidShadowEnd; +} + static inline bool AddrIsInHighMem(uptr a) { PROFILE_ASAN_MAPPING(); - return a >= kHighMemBeg && a <= kHighMemEnd; + return kHighMemBeg && a >= kHighMemBeg && a <= kHighMemEnd; } -static inline bool AddrIsInMidMem(uptr a) { +static inline bool AddrIsInHighShadow(uptr a) { PROFILE_ASAN_MAPPING(); - return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd; + return kHighMemBeg && a >= kHighShadowBeg && a <= kHighShadowEnd; } static inline bool AddrIsInShadowGap(uptr a) { @@ -305,6 +350,12 @@ static inline bool AddrIsInShadowGap(uptr a) { return a >= kShadowGapBeg && a <= kShadowGapEnd; } +} // namespace __asan + +#endif // SANITIZER_MYRIAD2 + +namespace __asan { + static inline bool AddrIsInMem(uptr a) { PROFILE_ASAN_MAPPING(); return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) || @@ -317,16 +368,6 @@ static inline uptr MemToShadow(uptr p) { return MEM_TO_SHADOW(p); } -static inline bool AddrIsInHighShadow(uptr a) { - PROFILE_ASAN_MAPPING(); - return a >= kHighShadowBeg && a <= kHighMemEnd; -} - -static inline bool AddrIsInMidShadow(uptr a) { - PROFILE_ASAN_MAPPING(); - return kMidMemBeg && a >= kMidShadowBeg && a <= kMidMemEnd; -} - static inline bool AddrIsInShadow(uptr a) { PROFILE_ASAN_MAPPING(); return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a); @@ -339,6 +380,8 @@ static inline bool AddrIsAlignedByGranularity(uptr a) { static inline bool AddressIsPoisoned(uptr a) { PROFILE_ASAN_MAPPING(); + if (SANITIZER_MYRIAD2 && !AddrIsInMem(a) && !AddrIsInShadow(a)) + return false; const uptr kAccessSize = 1; u8 *shadow_address = (u8*)MEM_TO_SHADOW(a); s8 shadow_value = *shadow_address; diff --git a/lib/asan/asan_mapping_myriad.h b/lib/asan/asan_mapping_myriad.h new file mode 100644 index 000000000000..baa3247bd190 --- /dev/null +++ b/lib/asan/asan_mapping_myriad.h @@ -0,0 +1,86 @@ +//===-- asan_mapping_myriad.h -----------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Myriad-specific definitions for ASan memory mapping. +//===----------------------------------------------------------------------===// +#ifndef ASAN_MAPPING_MYRIAD_H +#define ASAN_MAPPING_MYRIAD_H + +#define RAW_ADDR(mem) ((mem) & ~kMyriadCacheBitMask32) +#define MEM_TO_SHADOW(mem) \ + (((RAW_ADDR(mem) - kLowMemBeg) >> SHADOW_SCALE) + (SHADOW_OFFSET)) + +#define kLowMemBeg kMyriadMemoryOffset32 +#define kLowMemEnd (SHADOW_OFFSET - 1) + +#define kLowShadowBeg SHADOW_OFFSET +#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd) + +#define kHighMemBeg 0 + +#define kHighShadowBeg 0 +#define kHighShadowEnd 0 + +#define kMidShadowBeg 0 +#define kMidShadowEnd 0 + +#define kShadowGapBeg (kLowShadowEnd + 1) +#define kShadowGapEnd kMyriadMemoryEnd32 + +#define kShadowGap2Beg 0 +#define kShadowGap2End 0 + +#define kShadowGap3Beg 0 +#define kShadowGap3End 0 + +namespace __asan { + +static inline bool AddrIsInLowMem(uptr a) { + PROFILE_ASAN_MAPPING(); + a = RAW_ADDR(a); + return a >= kLowMemBeg && a <= kLowMemEnd; +} + +static inline bool AddrIsInLowShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + a = RAW_ADDR(a); + return a >= kLowShadowBeg && a <= kLowShadowEnd; +} + +static inline bool AddrIsInMidMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return false; +} + +static inline bool AddrIsInMidShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return false; +} + +static inline bool AddrIsInHighMem(uptr a) { + PROFILE_ASAN_MAPPING(); + return false; +} + +static inline bool AddrIsInHighShadow(uptr a) { + PROFILE_ASAN_MAPPING(); + return false; +} + +static inline bool AddrIsInShadowGap(uptr a) { + PROFILE_ASAN_MAPPING(); + a = RAW_ADDR(a); + return a >= kShadowGapBeg && a <= kShadowGapEnd; +} + +} // namespace __asan + +#endif // ASAN_MAPPING_MYRIAD_H diff --git a/lib/asan/asan_memory_profile.cc b/lib/asan/asan_memory_profile.cc index 603284c8c688..8c86d9fe49f2 100644 --- a/lib/asan/asan_memory_profile.cc +++ b/lib/asan/asan_memory_profile.cc @@ -31,9 +31,9 @@ struct AllocationSite { class HeapProfile { public: - HeapProfile() : allocations_(1024) {} + HeapProfile() { allocations_.reserve(1024); } - void ProcessChunk(const AsanChunkView& cv) { + void ProcessChunk(const AsanChunkView &cv) { if (cv.IsAllocated()) { total_allocated_user_size_ += cv.UsedSize(); total_allocated_count_++; @@ -49,10 +49,10 @@ class HeapProfile { } void Print(uptr top_percent, uptr max_number_of_contexts) { - InternalSort(&allocations_, allocations_.size(), - [](const AllocationSite &a, const AllocationSite &b) { - return a.total_size > b.total_size; - }); + Sort(allocations_.data(), allocations_.size(), + [](const AllocationSite &a, const AllocationSite &b) { + return a.total_size > b.total_size; + }); CHECK(total_allocated_user_size_); uptr total_shown = 0; Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: " diff --git a/lib/asan/asan_new_delete.cc b/lib/asan/asan_new_delete.cc index 072f027addd6..30efd61a9680 100644 --- a/lib/asan/asan_new_delete.cc +++ b/lib/asan/asan_new_delete.cc @@ -14,6 +14,8 @@ #include "asan_allocator.h" #include "asan_internal.h" +#include "asan_malloc_local.h" +#include "asan_report.h" #include "asan_stack.h" #include "interception/interception.h" @@ -67,16 +69,28 @@ struct nothrow_t {}; enum class align_val_t: size_t {}; } // namespace std -// TODO(alekseys): throw std::bad_alloc instead of dying on OOM. +// TODO(alekseyshl): throw std::bad_alloc instead of dying on OOM. +// For local pool allocation, align to SHADOW_GRANULARITY to match asan +// allocator behavior. #define OPERATOR_NEW_BODY(type, nothrow) \ + if (ALLOCATE_FROM_LOCAL_POOL) {\ + void *res = MemalignFromLocalPool(SHADOW_GRANULARITY, size);\ + if (!nothrow) CHECK(res);\ + return res;\ + }\ GET_STACK_TRACE_MALLOC;\ void *res = asan_memalign(0, size, &stack, type);\ - if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ + if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ return res; #define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \ + if (ALLOCATE_FROM_LOCAL_POOL) {\ + void *res = MemalignFromLocalPool((uptr)align, size);\ + if (!nothrow) CHECK(res);\ + return res;\ + }\ GET_STACK_TRACE_MALLOC;\ void *res = asan_memalign((uptr)align, size, &stack, type);\ - if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ + if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ return res; // On OS X it's not enough to just provide our own 'operator new' and @@ -128,18 +142,22 @@ INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) { #endif // !SANITIZER_MAC #define OPERATOR_DELETE_BODY(type) \ + if (IS_FROM_LOCAL_POOL(ptr)) return;\ GET_STACK_TRACE_FREE;\ asan_delete(ptr, 0, 0, &stack, type); #define OPERATOR_DELETE_BODY_SIZE(type) \ + if (IS_FROM_LOCAL_POOL(ptr)) return;\ GET_STACK_TRACE_FREE;\ asan_delete(ptr, size, 0, &stack, type); #define OPERATOR_DELETE_BODY_ALIGN(type) \ + if (IS_FROM_LOCAL_POOL(ptr)) return;\ GET_STACK_TRACE_FREE;\ asan_delete(ptr, 0, static_cast<uptr>(align), &stack, type); #define OPERATOR_DELETE_BODY_SIZE_ALIGN(type) \ + if (IS_FROM_LOCAL_POOL(ptr)) return;\ GET_STACK_TRACE_FREE;\ asan_delete(ptr, size, static_cast<uptr>(align), &stack, type); diff --git a/lib/asan/asan_poisoning.cc b/lib/asan/asan_poisoning.cc index c3a82aa0049e..1e9c37a13a16 100644 --- a/lib/asan/asan_poisoning.cc +++ b/lib/asan/asan_poisoning.cc @@ -32,7 +32,7 @@ bool CanPoisonMemory() { } void PoisonShadow(uptr addr, uptr size, u8 value) { - if (!CanPoisonMemory()) return; + if (value && !CanPoisonMemory()) return; CHECK(AddrIsAlignedByGranularity(addr)); CHECK(AddrIsInMem(addr)); CHECK(AddrIsAlignedByGranularity(addr + size)); @@ -182,8 +182,15 @@ int __asan_address_is_poisoned(void const volatile *addr) { uptr __asan_region_is_poisoned(uptr beg, uptr size) { if (!size) return 0; uptr end = beg + size; - if (!AddrIsInMem(beg)) return beg; - if (!AddrIsInMem(end)) return end; + if (SANITIZER_MYRIAD2) { + // On Myriad, address not in DRAM range need to be treated as + // unpoisoned. + if (!AddrIsInMem(beg) && !AddrIsInShadow(beg)) return 0; + if (!AddrIsInMem(end) && !AddrIsInShadow(end)) return 0; + } else { + if (!AddrIsInMem(beg)) return beg; + if (!AddrIsInMem(end)) return end; + } CHECK_LT(beg, end); uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY); uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY); @@ -452,4 +459,3 @@ bool WordIsPoisoned(uptr addr) { return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0); } } - diff --git a/lib/asan/asan_poisoning.h b/lib/asan/asan_poisoning.h index 1e00070bcf63..c94794cead89 100644 --- a/lib/asan/asan_poisoning.h +++ b/lib/asan/asan_poisoning.h @@ -38,7 +38,7 @@ void PoisonShadowPartialRightRedzone(uptr addr, // performance-critical code with care. ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, u8 value) { - DCHECK(CanPoisonMemory()); + DCHECK(!value || CanPoisonMemory()); uptr shadow_beg = MEM_TO_SHADOW(aligned_beg); uptr shadow_end = MEM_TO_SHADOW( aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1; @@ -51,6 +51,9 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, // changed at all. It doesn't currently have an efficient means // to zero a bunch of pages, but maybe we should add one. SANITIZER_FUCHSIA == 1 || + // RTEMS doesn't have have pages, let alone a fast way to zero + // them, so default to memset. + SANITIZER_RTEMS == 1 || shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) { REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg); } else { diff --git a/lib/asan/asan_report.cc b/lib/asan/asan_report.cc index e3bc02994efd..439712350e46 100644 --- a/lib/asan/asan_report.cc +++ b/lib/asan/asan_report.cc @@ -84,7 +84,7 @@ static void PrintZoneForPointer(uptr ptr, uptr zone_ptr, bool ParseFrameDescription(const char *frame_descr, InternalMmapVector<StackVarDescr> *vars) { CHECK(frame_descr); - char *p; + const char *p; // This string is created by the compiler and has the following form: // "n alloc_1 alloc_2 ... alloc_n" // where alloc_i looks like "offset size len ObjectName" @@ -134,6 +134,10 @@ class ScopedInErrorReport { } ~ScopedInErrorReport() { + if (halt_on_error_ && !__sanitizer_acquire_crash_state()) { + asanThreadRegistry().Unlock(); + return; + } ASAN_ON_ERROR(); if (current_error_.IsValid()) current_error_.Print(); @@ -152,7 +156,7 @@ class ScopedInErrorReport { // Copy the message buffer so that we could start logging without holding a // lock that gets aquired during printing. - InternalScopedBuffer<char> buffer_copy(kErrorMessageBufferSize); + InternalMmapVector<char> buffer_copy(kErrorMessageBufferSize); { BlockingMutexLock l(&error_message_buf_mutex); internal_memcpy(buffer_copy.data(), @@ -202,7 +206,7 @@ class ScopedInErrorReport { bool halt_on_error_; }; -ErrorDescription ScopedInErrorReport::current_error_; +ErrorDescription ScopedInErrorReport::current_error_(LINKER_INITIALIZED); void ReportDeadlySignal(const SignalContext &sig) { ScopedInErrorReport in_report(/*fatal*/ true); @@ -254,6 +258,62 @@ void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, in_report.ReportError(error); } +void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorCallocOverflow error(GetCurrentTidOrInvalid(), stack, count, size); + in_report.ReportError(error); +} + +void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorPvallocOverflow error(GetCurrentTidOrInvalid(), stack, size); + in_report.ReportError(error); +} + +void ReportInvalidAllocationAlignment(uptr alignment, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorInvalidAllocationAlignment error(GetCurrentTidOrInvalid(), stack, + alignment); + in_report.ReportError(error); +} + +void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorInvalidAlignedAllocAlignment error(GetCurrentTidOrInvalid(), stack, + size, alignment); + in_report.ReportError(error); +} + +void ReportInvalidPosixMemalignAlignment(uptr alignment, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorInvalidPosixMemalignAlignment error(GetCurrentTidOrInvalid(), stack, + alignment); + in_report.ReportError(error); +} + +void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size, + BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorAllocationSizeTooBig error(GetCurrentTidOrInvalid(), stack, user_size, + total_size, max_size); + in_report.ReportError(error); +} + +void ReportRssLimitExceeded(BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorRssLimitExceeded error(GetCurrentTidOrInvalid(), stack); + in_report.ReportError(error); +} + +void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack) { + ScopedInErrorReport in_report(/*fatal*/ true); + ErrorOutOfMemory error(GetCurrentTidOrInvalid(), stack, requested_size); + in_report.ReportError(error); +} + void ReportStringFunctionMemoryRangesOverlap(const char *function, const char *offset1, uptr length1, const char *offset2, uptr length2, @@ -343,7 +403,11 @@ static bool IsInvalidPointerPair(uptr a1, uptr a2) { } static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) { - if (!flags()->detect_invalid_pointer_pairs) return; + switch (flags()->detect_invalid_pointer_pairs) { + case 0 : return; + case 1 : if (p1 == nullptr || p2 == nullptr) return; break; + } + uptr a1 = reinterpret_cast<uptr>(p1); uptr a2 = reinterpret_cast<uptr>(p2); diff --git a/lib/asan/asan_report.h b/lib/asan/asan_report.h index f2cdad47ee81..f7153d4810d0 100644 --- a/lib/asan/asan_report.h +++ b/lib/asan/asan_report.h @@ -58,6 +58,18 @@ void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack); void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, BufferedStackTrace *stack); +void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack); +void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack); +void ReportInvalidAllocationAlignment(uptr alignment, + BufferedStackTrace *stack); +void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment, + BufferedStackTrace *stack); +void ReportInvalidPosixMemalignAlignment(uptr alignment, + BufferedStackTrace *stack); +void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size, + BufferedStackTrace *stack); +void ReportRssLimitExceeded(BufferedStackTrace *stack); +void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack); void ReportStringFunctionMemoryRangesOverlap(const char *function, const char *offset1, uptr length1, const char *offset2, uptr length2, diff --git a/lib/asan/asan_rtems.cc b/lib/asan/asan_rtems.cc new file mode 100644 index 000000000000..a4af940057eb --- /dev/null +++ b/lib/asan/asan_rtems.cc @@ -0,0 +1,253 @@ +//===-- asan_rtems.cc -----------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// RTEMS-specific details. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_rtems.h" +#if SANITIZER_RTEMS + +#include "asan_internal.h" +#include "asan_interceptors.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_libc.h" + +#include <pthread.h> +#include <stdlib.h> + +namespace __asan { + +static void ResetShadowMemory() { + uptr shadow_start = SHADOW_OFFSET; + uptr shadow_end = MEM_TO_SHADOW(kMyriadMemoryEnd32); + uptr gap_start = MEM_TO_SHADOW(shadow_start); + uptr gap_end = MEM_TO_SHADOW(shadow_end); + + REAL(memset)((void *)shadow_start, 0, shadow_end - shadow_start); + REAL(memset)((void *)gap_start, kAsanShadowGap, gap_end - gap_start); +} + +void InitializeShadowMemory() { + kHighMemEnd = 0; + kMidMemBeg = 0; + kMidMemEnd = 0; + + ResetShadowMemory(); +} + +void AsanApplyToGlobals(globals_op_fptr op, const void *needle) { + UNIMPLEMENTED(); +} + +void AsanCheckDynamicRTPrereqs() {} +void AsanCheckIncompatibleRT() {} +void InitializeAsanInterceptors() {} +void InitializePlatformInterceptors() {} +void InitializePlatformExceptionHandlers() {} + +// RTEMS only support static linking; it sufficies to return with no +// error. +void *AsanDoesNotSupportStaticLinkage() { return nullptr; } + +void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { + UNIMPLEMENTED(); +} + +void EarlyInit() { + // Provide early initialization of shadow memory so that + // instrumented code running before full initialzation will not + // report spurious errors. + ResetShadowMemory(); +} + +// We can use a plain thread_local variable for TSD. +static thread_local void *per_thread; + +void *AsanTSDGet() { return per_thread; } + +void AsanTSDSet(void *tsd) { per_thread = tsd; } + +// There's no initialization needed, and the passed-in destructor +// will never be called. Instead, our own thread destruction hook +// (below) will call AsanThread::TSDDtor directly. +void AsanTSDInit(void (*destructor)(void *tsd)) { + DCHECK(destructor == &PlatformTSDDtor); +} + +void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); } + +// +// Thread registration. We provide an API similar to the Fushia port. +// + +struct AsanThread::InitOptions { + uptr stack_bottom, stack_size, tls_bottom, tls_size; +}; + +// Shared setup between thread creation and startup for the initial thread. +static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid, + uptr user_id, bool detached, + uptr stack_bottom, uptr stack_size, + uptr tls_bottom, uptr tls_size) { + // In lieu of AsanThread::Create. + AsanThread *thread = (AsanThread *)MmapOrDie(sizeof(AsanThread), __func__); + AsanThreadContext::CreateThreadContextArgs args = {thread, stack}; + asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args); + + // On other systems, AsanThread::Init() is called from the new + // thread itself. But on RTEMS we already know the stack address + // range beforehand, so we can do most of the setup right now. + const AsanThread::InitOptions options = {stack_bottom, stack_size, + tls_bottom, tls_size}; + thread->Init(&options); + return thread; +} + +// This gets the same arguments passed to Init by CreateAsanThread, above. +// We're in the creator thread before the new thread is actually started, but +// its stack and tls address range are already known. +void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) { + DCHECK_NE(GetCurrentThread(), this); + DCHECK_NE(GetCurrentThread(), nullptr); + CHECK_NE(options->stack_bottom, 0); + CHECK_NE(options->stack_size, 0); + stack_bottom_ = options->stack_bottom; + stack_top_ = options->stack_bottom + options->stack_size; + tls_begin_ = options->tls_bottom; + tls_end_ = options->tls_bottom + options->tls_size; +} + +// Called by __asan::AsanInitInternal (asan_rtl.c). Unlike other ports, the +// main thread on RTEMS does not require special treatment; its AsanThread is +// already created by the provided hooks. This function simply looks up and +// returns the created thread. +AsanThread *CreateMainThread() { + return GetThreadContextByTidLocked(0)->thread; +} + +// This is called before each thread creation is attempted. So, in +// its first call, the calling thread is the initial and sole thread. +static void *BeforeThreadCreateHook(uptr user_id, bool detached, + uptr stack_bottom, uptr stack_size, + uptr tls_bottom, uptr tls_size) { + EnsureMainThreadIDIsCorrect(); + // Strict init-order checking is thread-hostile. + if (flags()->strict_init_order) StopInitOrderChecking(); + + GET_STACK_TRACE_THREAD; + u32 parent_tid = GetCurrentTidOrInvalid(); + + return CreateAsanThread(&stack, parent_tid, user_id, detached, + stack_bottom, stack_size, tls_bottom, tls_size); +} + +// This is called after creating a new thread (in the creating thread), +// with the pointer returned by BeforeThreadCreateHook (above). +static void ThreadCreateHook(void *hook, bool aborted) { + AsanThread *thread = static_cast<AsanThread *>(hook); + if (!aborted) { + // The thread was created successfully. + // ThreadStartHook is already running in the new thread. + } else { + // The thread wasn't created after all. + // Clean up everything we set up in BeforeThreadCreateHook. + asanThreadRegistry().FinishThread(thread->tid()); + UnmapOrDie(thread, sizeof(AsanThread)); + } +} + +// This is called (1) in the newly-created thread before it runs anything else, +// with the pointer returned by BeforeThreadCreateHook (above). (2) before a +// thread restart. +static void ThreadStartHook(void *hook, uptr os_id) { + if (!hook) + return; + + AsanThread *thread = static_cast<AsanThread *>(hook); + SetCurrentThread(thread); + + ThreadStatus status = + asanThreadRegistry().GetThreadLocked(thread->tid())->status; + DCHECK(status == ThreadStatusCreated || status == ThreadStatusRunning); + // Determine whether we are starting or restarting the thread. + if (status == ThreadStatusCreated) + // In lieu of AsanThread::ThreadStart. + asanThreadRegistry().StartThread(thread->tid(), os_id, + /*workerthread*/ false, nullptr); + else { + // In a thread restart, a thread may resume execution at an + // arbitrary function entry point, with its stack and TLS state + // reset. We unpoison the stack in that case. + PoisonShadow(thread->stack_bottom(), thread->stack_size(), 0); + } +} + +// Each thread runs this just before it exits, +// with the pointer returned by BeforeThreadCreateHook (above). +// All per-thread destructors have already been called. +static void ThreadExitHook(void *hook, uptr os_id) { + AsanThread *thread = static_cast<AsanThread *>(hook); + if (thread) + AsanThread::TSDDtor(thread->context()); +} + +static void HandleExit() { + // Disable ASan by setting it to uninitialized. Also reset the + // shadow memory to avoid reporting errors after the run-time has + // been desroyed. + if (asan_inited) { + asan_inited = false; + ResetShadowMemory(); + } +} + +} // namespace __asan + +// These are declared (in extern "C") by <some_path/sanitizer.h>. +// The system runtime will call our definitions directly. + +extern "C" { +void __sanitizer_early_init() { + __asan::EarlyInit(); +} + +void *__sanitizer_before_thread_create_hook(uptr thread, bool detached, + const char *name, + void *stack_base, size_t stack_size, + void *tls_base, size_t tls_size) { + return __asan::BeforeThreadCreateHook( + thread, detached, + reinterpret_cast<uptr>(stack_base), stack_size, + reinterpret_cast<uptr>(tls_base), tls_size); +} + +void __sanitizer_thread_create_hook(void *handle, uptr thread, int status) { + __asan::ThreadCreateHook(handle, status != 0); +} + +void __sanitizer_thread_start_hook(void *handle, uptr self) { + __asan::ThreadStartHook(handle, self); +} + +void __sanitizer_thread_exit_hook(void *handle, uptr self) { + __asan::ThreadExitHook(handle, self); +} + +void __sanitizer_exit() { + __asan::HandleExit(); +} +} // "C" + +#endif // SANITIZER_RTEMS diff --git a/lib/asan/asan_rtl.cc b/lib/asan/asan_rtl.cc index 21fd0e240708..4cff736f213a 100644 --- a/lib/asan/asan_rtl.cc +++ b/lib/asan/asan_rtl.cc @@ -56,7 +56,8 @@ static void AsanDie() { UnmapOrDie((void*)kLowShadowBeg, kMidMemBeg - kLowShadowBeg); UnmapOrDie((void*)kMidMemEnd, kHighShadowEnd - kMidMemEnd); } else { - UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); + if (kHighShadowEnd) + UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); } } } @@ -65,8 +66,14 @@ static void AsanCheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2) { Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, line, cond, (uptr)v1, (uptr)v2); - // FIXME: check for infinite recursion without a thread-local counter here. - PRINT_CURRENT_STACK_CHECK(); + + // Print a stack trace the first time we come here. Otherwise, we probably + // failed a CHECK during symbolization. + static atomic_uint32_t num_calls; + if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) == 0) { + PRINT_CURRENT_STACK_CHECK(); + } + Die(); } @@ -140,6 +147,8 @@ ASAN_REPORT_ERROR_N(load, false) ASAN_REPORT_ERROR_N(store, true) #define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \ + if (SANITIZER_MYRIAD2 && !AddrIsInMem(addr) && !AddrIsInShadow(addr)) \ + return; \ uptr sp = MEM_TO_SHADOW(addr); \ uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \ : *reinterpret_cast<u16 *>(sp); \ @@ -306,6 +315,7 @@ static void asan_atexit() { } static void InitializeHighMemEnd() { +#if !SANITIZER_MYRIAD2 #if !ASAN_FIXED_MAPPING kHighMemEnd = GetMaxUserVirtualAddress(); // Increase kHighMemEnd to make sure it's properly @@ -313,13 +323,16 @@ static void InitializeHighMemEnd() { kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1; #endif // !ASAN_FIXED_MAPPING CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0); +#endif // !SANITIZER_MYRIAD2 } void PrintAddressSpaceLayout() { - Printf("|| `[%p, %p]` || HighMem ||\n", - (void*)kHighMemBeg, (void*)kHighMemEnd); - Printf("|| `[%p, %p]` || HighShadow ||\n", - (void*)kHighShadowBeg, (void*)kHighShadowEnd); + if (kHighMemBeg) { + Printf("|| `[%p, %p]` || HighMem ||\n", + (void*)kHighMemBeg, (void*)kHighMemEnd); + Printf("|| `[%p, %p]` || HighShadow ||\n", + (void*)kHighShadowBeg, (void*)kHighShadowEnd); + } if (kMidMemBeg) { Printf("|| `[%p, %p]` || ShadowGap3 ||\n", (void*)kShadowGap3Beg, (void*)kShadowGap3End); @@ -338,11 +351,14 @@ void PrintAddressSpaceLayout() { Printf("|| `[%p, %p]` || LowMem ||\n", (void*)kLowMemBeg, (void*)kLowMemEnd); } - Printf("MemToShadow(shadow): %p %p %p %p", + Printf("MemToShadow(shadow): %p %p", (void*)MEM_TO_SHADOW(kLowShadowBeg), - (void*)MEM_TO_SHADOW(kLowShadowEnd), - (void*)MEM_TO_SHADOW(kHighShadowBeg), - (void*)MEM_TO_SHADOW(kHighShadowEnd)); + (void*)MEM_TO_SHADOW(kLowShadowEnd)); + if (kHighMemBeg) { + Printf(" %p %p", + (void*)MEM_TO_SHADOW(kHighShadowBeg), + (void*)MEM_TO_SHADOW(kHighShadowEnd)); + } if (kMidMemBeg) { Printf(" %p %p", (void*)MEM_TO_SHADOW(kMidShadowBeg), @@ -374,6 +390,7 @@ static void AsanInitInternal() { asan_init_is_running = true; CacheBinaryName(); + CheckASLR(); // Initialize flags. This must be done early, because most of the // initialization steps look at flags(). @@ -526,6 +543,9 @@ void NOINLINE __asan_handle_no_return() { if (curr_thread) { top = curr_thread->stack_top(); bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1); + } else if (SANITIZER_RTEMS) { + // Give up On RTEMS. + return; } else { CHECK(!SANITIZER_FUCHSIA); // If we haven't seen this thread, try asking the OS for stack bounds. diff --git a/lib/asan/asan_shadow_setup.cc b/lib/asan/asan_shadow_setup.cc index b3cf0b5c1ce2..083926e70aa2 100644 --- a/lib/asan/asan_shadow_setup.cc +++ b/lib/asan/asan_shadow_setup.cc @@ -14,8 +14,9 @@ #include "sanitizer_common/sanitizer_platform.h" -// asan_fuchsia.cc has its own InitializeShadowMemory implementation. -#if !SANITIZER_FUCHSIA +// asan_fuchsia.cc and asan_rtems.cc have their own +// InitializeShadowMemory implementation. +#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS #include "asan_internal.h" #include "asan_mapping.h" @@ -30,8 +31,7 @@ void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { CHECK_EQ(((end + 1) % GetMmapGranularity()), 0); uptr size = end - beg + 1; DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. - void *res = MmapFixedNoReserve(beg, size, name); - if (res != (void *)beg) { + if (!MmapFixedNoReserve(beg, size, name)) { Report( "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " "Perhaps you're using ulimit -v\n", @@ -162,4 +162,4 @@ void InitializeShadowMemory() { } // namespace __asan -#endif // !SANITIZER_FUCHSIA +#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS diff --git a/lib/asan/asan_thread.cc b/lib/asan/asan_thread.cc index ad81512dff08..faf423d305b7 100644 --- a/lib/asan/asan_thread.cc +++ b/lib/asan/asan_thread.cc @@ -221,22 +221,25 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { void AsanThread::Init(const InitOptions *options) { next_stack_top_ = next_stack_bottom_ = 0; atomic_store(&stack_switching_, false, memory_order_release); - fake_stack_ = nullptr; // Will be initialized lazily if needed. CHECK_EQ(this->stack_size(), 0U); SetThreadStackAndTls(options); CHECK_GT(this->stack_size(), 0U); CHECK(AddrIsInMem(stack_bottom_)); CHECK(AddrIsInMem(stack_top_ - 1)); ClearShadowForThreadStackAndTLS(); + fake_stack_ = nullptr; + if (__asan_option_detect_stack_use_after_return) + AsyncSignalSafeLazyInitFakeStack(); int local = 0; VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(), (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_, &local); } -// Fuchsia doesn't use ThreadStart. -// asan_fuchsia.c defines CreateMainThread and SetThreadStackAndTls. -#if !SANITIZER_FUCHSIA +// Fuchsia and RTEMS don't use ThreadStart. +// asan_fuchsia.c/asan_rtems.c define CreateMainThread and +// SetThreadStackAndTls. +#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS thread_return_t AsanThread::ThreadStart( tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) { @@ -296,12 +299,17 @@ void AsanThread::SetThreadStackAndTls(const InitOptions *options) { CHECK(AddrIsInStack((uptr)&local)); } -#endif // !SANITIZER_FUCHSIA +#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS void AsanThread::ClearShadowForThreadStackAndTLS() { PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0); - if (tls_begin_ != tls_end_) - PoisonShadow(tls_begin_, tls_end_ - tls_begin_, 0); + if (tls_begin_ != tls_end_) { + uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY); + uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY); + FastPoisonShadowPartialRightRedzone(tls_begin_aligned, + tls_end_ - tls_begin_aligned, + tls_end_aligned - tls_end_, 0); + } } bool AsanThread::GetStackFrameAccessByAddr(uptr addr, @@ -386,6 +394,9 @@ static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base, } AsanThread *GetCurrentThread() { + if (SANITIZER_RTEMS && !asan_inited) + return nullptr; + AsanThreadContext *context = reinterpret_cast<AsanThreadContext *>(AsanTSDGet()); if (!context) { @@ -477,6 +488,11 @@ void UnlockThreadRegistry() { __asan::asanThreadRegistry().Unlock(); } +ThreadRegistry *GetThreadRegistryLocked() { + __asan::asanThreadRegistry().CheckLocked(); + return &__asan::asanThreadRegistry(); +} + void EnsureMainThreadIDIsCorrect() { __asan::EnsureMainThreadIDIsCorrect(); } diff --git a/lib/asan/asan_win.cc b/lib/asan/asan_win.cc index 68eedd1863bc..67125d38ad4a 100644 --- a/lib/asan/asan_win.cc +++ b/lib/asan/asan_win.cc @@ -222,8 +222,8 @@ uptr FindDynamicShadowStart() { uptr alignment = 8 * granularity; uptr left_padding = granularity; uptr space_size = kHighShadowEnd + left_padding; - uptr shadow_start = - FindAvailableMemoryRange(space_size, alignment, granularity, nullptr); + uptr shadow_start = FindAvailableMemoryRange(space_size, alignment, + granularity, nullptr, nullptr); CHECK_NE((uptr)0, shadow_start); CHECK(IsAligned(shadow_start, alignment)); return shadow_start; @@ -265,11 +265,6 @@ ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) { // Determine the address of the page that is being accessed. uptr page = RoundDownTo(addr, page_size); - // Query the existing page. - MEMORY_BASIC_INFORMATION mem_info = {}; - if (::VirtualQuery((LPVOID)page, &mem_info, sizeof(mem_info)) == 0) - return EXCEPTION_CONTINUE_SEARCH; - // Commit the page. uptr result = (uptr)::VirtualAlloc((LPVOID)page, page_size, MEM_COMMIT, PAGE_READWRITE); diff --git a/lib/asan/asan_win_dll_thunk.cc b/lib/asan/asan_win_dll_thunk.cc index c67116c42ca2..c6a313d24026 100644 --- a/lib/asan/asan_win_dll_thunk.cc +++ b/lib/asan/asan_win_dll_thunk.cc @@ -99,7 +99,7 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { } #endif -// Window specific functions not included in asan_interface.inc. +// Windows specific functions not included in asan_interface.inc. INTERCEPT_WRAP_W_V(__asan_should_detect_stack_use_after_return) INTERCEPT_WRAP_W_V(__asan_get_shadow_memory_dynamic_address) INTERCEPT_WRAP_W_W(__asan_unhandled_exception_filter) diff --git a/lib/asan/scripts/asan_device_setup b/lib/asan/scripts/asan_device_setup index 92a109727267..5e679e366514 100755 --- a/lib/asan/scripts/asan_device_setup +++ b/lib/asan/scripts/asan_device_setup @@ -309,7 +309,7 @@ if [[ -n "$ASAN_RT64" ]]; then cp "$ASAN_RT_PATH/$ASAN_RT64" "$TMPDIR/" fi -ASAN_OPTIONS=start_deactivated=1,malloc_context_size=0 +ASAN_OPTIONS=start_deactivated=1 # The name of a symlink to libclang_rt.asan-$ARCH-android.so used in LD_PRELOAD. # The idea is to have the same name in lib and lib64 to keep it from falling @@ -336,6 +336,13 @@ exec $_to \$@ EOF } +# On Android-L not allowing user segv handler breaks some applications. +# Since ~May 2017 this is the default setting; included for compatibility with +# older library versions. +if [[ PRE_L -eq 0 ]]; then + ASAN_OPTIONS="$ASAN_OPTIONS,allow_user_segv_handler=1" +fi + if [[ x$extra_options != x ]] ; then ASAN_OPTIONS="$ASAN_OPTIONS,$extra_options" fi diff --git a/lib/asan/tests/CMakeLists.txt b/lib/asan/tests/CMakeLists.txt index 67a8fafaba3c..1b7060591530 100644 --- a/lib/asan/tests/CMakeLists.txt +++ b/lib/asan/tests/CMakeLists.txt @@ -237,6 +237,9 @@ if(COMPILER_RT_CAN_EXECUTE_TESTS AND NOT ANDROID) if(APPLE) darwin_filter_host_archs(ASAN_SUPPORTED_ARCH ASAN_TEST_ARCH) endif() + if(OS_NAME MATCHES "SunOS") + list(REMOVE_ITEM ASAN_TEST_ARCH x86_64) + endif() foreach(arch ${ASAN_TEST_ARCH}) @@ -248,6 +251,8 @@ if(COMPILER_RT_CAN_EXECUTE_TESTS AND NOT ANDROID) $<TARGET_OBJECTS:RTInterception.osx> $<TARGET_OBJECTS:RTSanitizerCommon.osx> $<TARGET_OBJECTS:RTSanitizerCommonLibc.osx> + $<TARGET_OBJECTS:RTSanitizerCommonCoverage.osx> + $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.osx> $<TARGET_OBJECTS:RTLSanCommon.osx> $<TARGET_OBJECTS:RTUbsan.osx>) else() @@ -257,6 +262,8 @@ if(COMPILER_RT_CAN_EXECUTE_TESTS AND NOT ANDROID) $<TARGET_OBJECTS:RTInterception.${arch}> $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonCoverage.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}> $<TARGET_OBJECTS:RTLSanCommon.${arch}> $<TARGET_OBJECTS:RTUbsan.${arch}> $<TARGET_OBJECTS:RTUbsan_cxx.${arch}>) @@ -280,6 +287,8 @@ if(ANDROID) $<TARGET_OBJECTS:RTInterception.${arch}> $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonCoverage.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}> $<TARGET_OBJECTS:RTUbsan.${arch}> $<TARGET_OBJECTS:RTUbsan_cxx.${arch}> ${COMPILER_RT_GTEST_SOURCE} diff --git a/lib/asan/tests/asan_test.cc b/lib/asan/tests/asan_test.cc index ed000327f126..11a3506a4853 100644 --- a/lib/asan/tests/asan_test.cc +++ b/lib/asan/tests/asan_test.cc @@ -25,6 +25,11 @@ #endif #endif +#if defined(__sun__) && defined(__svr4__) +using std::_setjmp; +using std::_longjmp; +#endif + NOINLINE void *malloc_fff(size_t size) { void *res = malloc/**/(size); break_optimization(0); return res;} NOINLINE void *malloc_eee(size_t size) { diff --git a/lib/builtins/CMakeLists.txt b/lib/builtins/CMakeLists.txt index 0b50b5bb8d23..75ff66405649 100644 --- a/lib/builtins/CMakeLists.txt +++ b/lib/builtins/CMakeLists.txt @@ -173,8 +173,8 @@ set(GENERIC_TF_SOURCES trunctfsf2.c) option(COMPILER_RT_EXCLUDE_ATOMIC_BUILTIN - "Skip the atomic builtin (this may be needed if system headers are unavailable)" - Off) + "Skip the atomic builtin (these should normally be provided by a shared library)" + On) if(NOT FUCHSIA AND NOT COMPILER_RT_BAREMETAL_BUILD) set(GENERIC_SOURCES @@ -406,6 +406,7 @@ if(MINGW) arm/aeabi_ldivmod.S arm/aeabi_uidivmod.S arm/aeabi_uldivmod.S + arm/chkstk.S divmoddi4.c divmodsi4.c divdi3.c @@ -459,6 +460,41 @@ set(armv6m_SOURCES ${thumb1_SOURCES}) set(armv7m_SOURCES ${arm_SOURCES}) set(armv7em_SOURCES ${arm_SOURCES}) +# hexagon arch +set(hexagon_SOURCES ${GENERIC_SOURCES} ${GENERIC_TF_SOURCES}) +set(hexagon_SOURCES + hexagon/common_entry_exit_abi1.S + hexagon/common_entry_exit_abi2.S + hexagon/common_entry_exit_legacy.S + hexagon/dfaddsub.S + hexagon/dfdiv.S + hexagon/dffma.S + hexagon/dfminmax.S + hexagon/dfmul.S + hexagon/dfsqrt.S + hexagon/divdi3.S + hexagon/divsi3.S + hexagon/fabs_opt.S + hexagon/fastmath2_dlib_asm.S + hexagon/fastmath2_ldlib_asm.S + hexagon/fastmath_dlib_asm.S + hexagon/fma_opt.S + hexagon/fmax_opt.S + hexagon/fmin_opt.S + hexagon/memcpy_forward_vp4cp4n2.S + hexagon/memcpy_likely_aligned.S + hexagon/moddi3.S + hexagon/modsi3.S + hexagon/sfdiv_opt.S + hexagon/sfsqrt_opt.S + hexagon/udivdi3.S + hexagon/udivmoddi4.S + hexagon/udivmodsi4.S + hexagon/udivsi3.S + hexagon/umoddi3.S + hexagon/umodsi3.S) + + set(mips_SOURCES ${GENERIC_SOURCES}) set(mipsel_SOURCES ${mips_SOURCES}) set(mips64_SOURCES ${GENERIC_TF_SOURCES} @@ -480,6 +516,12 @@ set(powerpc64_SOURCES ${GENERIC_SOURCES}) set(powerpc64le_SOURCES ${powerpc64_SOURCES}) +set(riscv_SOURCES ${GENERIC_SOURCES} ${GENERIC_TF_SOURCES}) +set(riscv32_SOURCES + riscv/mulsi3.S + ${riscv_SOURCES}) +set(riscv64_SOURCES ${riscv_SOURCES}) + set(wasm32_SOURCES ${GENERIC_TF_SOURCES} ${GENERIC_SOURCES}) @@ -542,6 +584,12 @@ else () list(APPEND BUILTIN_CFLAGS -fomit-frame-pointer -DCOMPILER_RT_ARMHF_TARGET) endif() + # For RISCV32, we must force enable int128 for compiling long + # double routines. + if("${arch}" STREQUAL "riscv32") + list(APPEND BUILTIN_CFLAGS -fforce-enable-int128) + endif() + add_compiler_rt_runtime(clang_rt.builtins STATIC ARCHS ${arch} diff --git a/lib/builtins/arm/chkstk.S b/lib/builtins/arm/chkstk.S new file mode 100644 index 000000000000..e3002105897e --- /dev/null +++ b/lib/builtins/arm/chkstk.S @@ -0,0 +1,34 @@ +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. + +#include "../assembly.h" + +// __chkstk routine +// This routine is windows specific. +// http://msdn.microsoft.com/en-us/library/ms648426.aspx + +// This clobbers the register r12, and the condition codes, and uses r5 and r6 +// as temporaries by backing them up and restoring them afterwards. +// Does not modify any memory or the stack pointer. + +// movw r4, #256 // Number of bytes of stack, in units of 4 byte +// bl __chkstk +// sub.w sp, sp, r4 + +#define PAGE_SIZE 4096 + + .p2align 2 +DEFINE_COMPILERRT_FUNCTION(__chkstk) + lsl r4, r4, #2 + mov r12, sp + push {r5, r6} + mov r5, r4 +1: + sub r12, r12, #PAGE_SIZE + subs r5, r5, #PAGE_SIZE + ldr r6, [r12] + bgt 1b + + pop {r5, r6} + bx lr +END_COMPILERRT_FUNCTION(__chkstk) diff --git a/lib/builtins/clear_cache.c b/lib/builtins/clear_cache.c index 4a01cb46d4ac..9dcab344ad1b 100644 --- a/lib/builtins/clear_cache.c +++ b/lib/builtins/clear_cache.c @@ -33,6 +33,11 @@ uintptr_t GetCurrentProcess(void); #include <machine/sysarch.h> #endif +#if defined(__OpenBSD__) && defined(__mips__) + #include <sys/types.h> + #include <machine/sysarch.h> +#endif + #if defined(__linux__) && defined(__mips__) #include <sys/cachectl.h> #include <sys/syscall.h> @@ -96,6 +101,8 @@ void __clear_cache(void *start, void *end) { * Intel processors have a unified instruction and data cache * so there is nothing to do */ +#elif defined(_WIN32) && (defined(__arm__) || defined(__aarch64__)) + FlushInstructionCache(GetCurrentProcess(), start, end - start); #elif defined(__arm__) && !defined(__APPLE__) #if defined(__FreeBSD__) || defined(__NetBSD__) struct arm_sync_icache_args arg; @@ -123,8 +130,6 @@ void __clear_cache(void *start, void *end) { : "r"(syscall_nr), "r"(start_reg), "r"(end_reg), "r"(flags)); assert(start_reg == 0 && "Cache flush syscall failed."); - #elif defined(_WIN32) - FlushInstructionCache(GetCurrentProcess(), start, end - start); #else compilerrt_abort(); #endif @@ -142,6 +147,8 @@ void __clear_cache(void *start, void *end) { #else syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE); #endif +#elif defined(__mips__) && defined(__OpenBSD__) + cacheflush(start, (uintptr_t)end - (uintptr_t)start, BCACHE); #elif defined(__aarch64__) && !defined(__APPLE__) uint64_t xstart = (uint64_t)(uintptr_t) start; uint64_t xend = (uint64_t)(uintptr_t) end; @@ -156,12 +163,14 @@ void __clear_cache(void *start, void *end) { * uintptr_t in case this runs in an IPL32 environment. */ const size_t dcache_line_size = 4 << ((ctr_el0 >> 16) & 15); - for (addr = xstart; addr < xend; addr += dcache_line_size) + for (addr = xstart & ~(dcache_line_size - 1); addr < xend; + addr += dcache_line_size) __asm __volatile("dc cvau, %0" :: "r"(addr)); __asm __volatile("dsb ish"); const size_t icache_line_size = 4 << ((ctr_el0 >> 0) & 15); - for (addr = xstart; addr < xend; addr += icache_line_size) + for (addr = xstart & ~(icache_line_size - 1); addr < xend; + addr += icache_line_size) __asm __volatile("ic ivau, %0" :: "r"(addr)); __asm __volatile("isb sy"); #elif defined (__powerpc64__) diff --git a/lib/builtins/clzdi2.c b/lib/builtins/clzdi2.c index b9e64da492bd..b56d98f5c01f 100644 --- a/lib/builtins/clzdi2.c +++ b/lib/builtins/clzdi2.c @@ -16,6 +16,12 @@ /* Returns: the number of leading 0-bits */ +#if !defined(__clang__) && (defined(__sparc64__) || defined(__mips64) || defined(__riscv__)) +/* gcc resolves __builtin_clz -> __clzdi2 leading to infinite recursion */ +#define __builtin_clz(a) __clzsi2(a) +extern si_int __clzsi2(si_int); +#endif + /* Precondition: a != 0 */ COMPILER_RT_ABI si_int diff --git a/lib/builtins/cpu_model.c b/lib/builtins/cpu_model.c index 4c96e9cd85d5..43b913390fc4 100644 --- a/lib/builtins/cpu_model.c +++ b/lib/builtins/cpu_model.c @@ -416,9 +416,9 @@ static void getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model, *Subtype = AMDFAM15H_BDVER3; break; // "bdver3"; 30h-3Fh: Steamroller } - if (Model >= 0x10 && Model <= 0x1f) { + if ((Model >= 0x10 && Model <= 0x1f) || Model == 0x02) { *Subtype = AMDFAM15H_BDVER2; - break; // "bdver2"; 10h-1Fh: Piledriver + break; // "bdver2"; 02h, 10h-1Fh: Piledriver } if (Model <= 0x0f) { *Subtype = AMDFAM15H_BDVER1; diff --git a/lib/builtins/ctzdi2.c b/lib/builtins/ctzdi2.c index db3c6fdc08f1..eecde29718d7 100644 --- a/lib/builtins/ctzdi2.c +++ b/lib/builtins/ctzdi2.c @@ -16,6 +16,12 @@ /* Returns: the number of trailing 0-bits */ +#if !defined(__clang__) && (defined(__sparc64__) || defined(__mips64) || defined(__riscv__)) +/* gcc resolves __builtin_ctz -> __ctzdi2 leading to infinite recursion */ +#define __builtin_ctz(a) __ctzsi2(a) +extern si_int __ctzsi2(si_int); +#endif + /* Precondition: a != 0 */ COMPILER_RT_ABI si_int diff --git a/lib/builtins/emutls.c b/lib/builtins/emutls.c index 5dd8dd154771..07d436e267d8 100644 --- a/lib/builtins/emutls.c +++ b/lib/builtins/emutls.c @@ -14,7 +14,22 @@ #include "int_lib.h" #include "int_util.h" +#ifdef __BIONIC__ +/* There are 4 pthread key cleanup rounds on Bionic. Delay emutls deallocation + to round 2. We need to delay deallocation because: + - Android versions older than M lack __cxa_thread_atexit_impl, so apps + use a pthread key destructor to call C++ destructors. + - Apps might use __thread/thread_local variables in pthread destructors. + We can't wait until the final two rounds, because jemalloc needs two rounds + after the final malloc/free call to free its thread-specific data (see + https://reviews.llvm.org/D46978#1107507). */ +#define EMUTLS_SKIP_DESTRUCTOR_ROUNDS 1 +#else +#define EMUTLS_SKIP_DESTRUCTOR_ROUNDS 0 +#endif + typedef struct emutls_address_array { + uintptr_t skip_destructor_rounds; uintptr_t size; /* number of elements in the 'data' array */ void* data[]; } emutls_address_array; @@ -65,9 +80,30 @@ static __inline void emutls_memalign_free(void *base) { #endif } +static __inline void emutls_setspecific(emutls_address_array *value) { + pthread_setspecific(emutls_pthread_key, (void*) value); +} + +static __inline emutls_address_array* emutls_getspecific() { + return (emutls_address_array*) pthread_getspecific(emutls_pthread_key); +} + static void emutls_key_destructor(void* ptr) { - emutls_shutdown((emutls_address_array*)ptr); - free(ptr); + emutls_address_array *array = (emutls_address_array*)ptr; + if (array->skip_destructor_rounds > 0) { + /* emutls is deallocated using a pthread key destructor. These + * destructors are called in several rounds to accommodate destructor + * functions that (re)initialize key values with pthread_setspecific. + * Delay the emutls deallocation to accommodate other end-of-thread + * cleanup tasks like calling thread_local destructors (e.g. the + * __cxa_thread_atexit fallback in libc++abi). + */ + array->skip_destructor_rounds--; + emutls_setspecific(array); + } else { + emutls_shutdown(array); + free(ptr); + } } static __inline void emutls_init(void) { @@ -88,15 +124,7 @@ static __inline void emutls_unlock() { pthread_mutex_unlock(&emutls_mutex); } -static __inline void emutls_setspecific(emutls_address_array *value) { - pthread_setspecific(emutls_pthread_key, (void*) value); -} - -static __inline emutls_address_array* emutls_getspecific() { - return (emutls_address_array*) pthread_getspecific(emutls_pthread_key); -} - -#else +#else /* _WIN32 */ #include <windows.h> #include <malloc.h> @@ -222,11 +250,11 @@ static __inline void __atomic_store_n(void *ptr, uintptr_t val, unsigned type) { InterlockedExchangePointer((void *volatile *)ptr, (void *)val); } -#endif +#endif /* __ATOMIC_RELEASE */ #pragma warning (pop) -#endif +#endif /* _WIN32 */ static size_t emutls_num_object = 0; /* number of allocated TLS objects */ @@ -314,11 +342,12 @@ static __inline void emutls_check_array_set_size(emutls_address_array *array, * which must be no smaller than the given index. */ static __inline uintptr_t emutls_new_data_array_size(uintptr_t index) { - /* Need to allocate emutls_address_array with one extra slot - * to store the data array size. + /* Need to allocate emutls_address_array with extra slots + * to store the header. * Round up the emutls_address_array size to multiple of 16. */ - return ((index + 1 + 15) & ~((uintptr_t)15)) - 1; + uintptr_t header_words = sizeof(emutls_address_array) / sizeof(void *); + return ((index + header_words + 15) & ~((uintptr_t)15)) - header_words; } /* Returns the size in bytes required for an emutls_address_array with @@ -337,8 +366,10 @@ emutls_get_address_array(uintptr_t index) { if (array == NULL) { uintptr_t new_size = emutls_new_data_array_size(index); array = (emutls_address_array*) malloc(emutls_asize(new_size)); - if (array) + if (array) { memset(array->data, 0, new_size * sizeof(void*)); + array->skip_destructor_rounds = EMUTLS_SKIP_DESTRUCTOR_ROUNDS; + } emutls_check_array_set_size(array, new_size); } else if (index > array->size) { uintptr_t orig_size = array->size; diff --git a/lib/builtins/hexagon/common_entry_exit_abi1.S b/lib/builtins/hexagon/common_entry_exit_abi1.S new file mode 100644 index 000000000000..d5479d2a52fa --- /dev/null +++ b/lib/builtins/hexagon/common_entry_exit_abi1.S @@ -0,0 +1,103 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +/* Functions that implement common sequences in function prologues and epilogues + used to save code size */ + + .macro FUNCTION_BEGIN name + .text + .globl \name + .type \name, @function + .falign +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + .macro FALLTHROUGH_TAIL_CALL name0 name1 + .size \name0, . - \name0 + .globl \name1 + .type \name1, @function + .falign +\name1: + .endm + + + + +/* Save r25:24 at fp+#-8 and r27:26 at fp+#-16. */ + + + + +/* The compiler knows that the __save_* functions clobber LR. No other + registers should be used without informing the compiler. */ + +/* Since we can only issue one store per packet, we don't hurt performance by + simply jumping to the right point in this sequence of stores. */ + +FUNCTION_BEGIN __save_r24_through_r27 + memd(fp+#-16) = r27:26 +FALLTHROUGH_TAIL_CALL __save_r24_through_r27 __save_r24_through_r25 + { + memd(fp+#-8) = r25:24 + jumpr lr + } +FUNCTION_END __save_r24_through_r25 + + + + +/* For each of the *_before_tailcall functions, jumpr lr is executed in parallel + with deallocframe. That way, the return gets the old value of lr, which is + where these functions need to return, and at the same time, lr gets the value + it needs going into the tail call. */ + +FUNCTION_BEGIN __restore_r24_through_r27_and_deallocframe_before_tailcall + r27:26 = memd(fp+#-16) +FALLTHROUGH_TAIL_CALL __restore_r24_through_r27_and_deallocframe_before_tailcall __restore_r24_through_r25_and_deallocframe_before_tailcall + { + r25:24 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r24_through_r25_and_deallocframe_before_tailcall + + + + +/* Here we use the extra load bandwidth to restore LR early, allowing the return + to occur in parallel with the deallocframe. */ + +FUNCTION_BEGIN __restore_r24_through_r27_and_deallocframe + { + lr = memw(fp+#4) + r27:26 = memd(fp+#-16) + } + { + r25:24 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r24_through_r27_and_deallocframe + + + + +/* Here the load bandwidth is maximized. */ + +FUNCTION_BEGIN __restore_r24_through_r25_and_deallocframe + { + r25:24 = memd(fp+#-8) + deallocframe + } + jumpr lr +FUNCTION_END __restore_r24_through_r25_and_deallocframe diff --git a/lib/builtins/hexagon/common_entry_exit_abi2.S b/lib/builtins/hexagon/common_entry_exit_abi2.S new file mode 100644 index 000000000000..6f470343db49 --- /dev/null +++ b/lib/builtins/hexagon/common_entry_exit_abi2.S @@ -0,0 +1,268 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +/* Functions that implement common sequences in function prologues and epilogues + used to save code size */ + + .macro FUNCTION_BEGIN name + .p2align 2 + .section .text.\name,"ax",@progbits + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + .macro FALLTHROUGH_TAIL_CALL name0 name1 + .p2align 2 + .size \name0, . - \name0 + .globl \name1 + .type \name1, @function +\name1: + .endm + + + + +/* Save r17:16 at fp+#-8, r19:18 at fp+#-16, r21:20 at fp+#-24, r23:22 at + fp+#-32, r25:24 at fp+#-40, and r27:26 at fp+#-48. + The compiler knows that the __save_* functions clobber LR. No other + registers should be used without informing the compiler. */ + +FUNCTION_BEGIN __save_r16_through_r27 + { + memd(fp+#-48) = r27:26 + memd(fp+#-40) = r25:24 + } + { + memd(fp+#-32) = r23:22 + memd(fp+#-24) = r21:20 + } + { + memd(fp+#-16) = r19:18 + memd(fp+#-8) = r17:16 + jumpr lr + } +FUNCTION_END __save_r16_through_r27 + +FUNCTION_BEGIN __save_r16_through_r25 + { + memd(fp+#-40) = r25:24 + memd(fp+#-32) = r23:22 + } + { + memd(fp+#-24) = r21:20 + memd(fp+#-16) = r19:18 + } + { + memd(fp+#-8) = r17:16 + jumpr lr + } +FUNCTION_END __save_r16_through_r25 + +FUNCTION_BEGIN __save_r16_through_r23 + { + memd(fp+#-32) = r23:22 + memd(fp+#-24) = r21:20 + } + { + memd(fp+#-16) = r19:18 + memd(fp+#-8) = r17:16 + jumpr lr + } +FUNCTION_END __save_r16_through_r23 + +FUNCTION_BEGIN __save_r16_through_r21 + { + memd(fp+#-24) = r21:20 + memd(fp+#-16) = r19:18 + } + { + memd(fp+#-8) = r17:16 + jumpr lr + } +FUNCTION_END __save_r16_through_r21 + +FUNCTION_BEGIN __save_r16_through_r19 + { + memd(fp+#-16) = r19:18 + memd(fp+#-8) = r17:16 + jumpr lr + } +FUNCTION_END __save_r16_through_r19 + +FUNCTION_BEGIN __save_r16_through_r17 + { + memd(fp+#-8) = r17:16 + jumpr lr + } +FUNCTION_END __save_r16_through_r17 + +/* For each of the *_before_tailcall functions, jumpr lr is executed in parallel + with deallocframe. That way, the return gets the old value of lr, which is + where these functions need to return, and at the same time, lr gets the value + it needs going into the tail call. */ + + +FUNCTION_BEGIN __restore_r16_through_r27_and_deallocframe_before_tailcall + r27:26 = memd(fp+#-48) + { + r25:24 = memd(fp+#-40) + r23:22 = memd(fp+#-32) + } + { + r21:20 = memd(fp+#-24) + r19:18 = memd(fp+#-16) + } + { + r17:16 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r16_through_r27_and_deallocframe_before_tailcall + +FUNCTION_BEGIN __restore_r16_through_r25_and_deallocframe_before_tailcall + { + r25:24 = memd(fp+#-40) + r23:22 = memd(fp+#-32) + } + { + r21:20 = memd(fp+#-24) + r19:18 = memd(fp+#-16) + } + { + r17:16 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r16_through_r25_and_deallocframe_before_tailcall + +FUNCTION_BEGIN __restore_r16_through_r23_and_deallocframe_before_tailcall + { + r23:22 = memd(fp+#-32) + r21:20 = memd(fp+#-24) + } + r19:18 = memd(fp+#-16) + { + r17:16 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r16_through_r23_and_deallocframe_before_tailcall + + +FUNCTION_BEGIN __restore_r16_through_r21_and_deallocframe_before_tailcall + { + r21:20 = memd(fp+#-24) + r19:18 = memd(fp+#-16) + } + { + r17:16 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r16_through_r19_and_deallocframe_before_tailcall + +FUNCTION_BEGIN __restore_r16_through_r19_and_deallocframe_before_tailcall + r19:18 = memd(fp+#-16) + { + r17:16 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r16_through_r19_and_deallocframe_before_tailcall + +FUNCTION_BEGIN __restore_r16_through_r17_and_deallocframe_before_tailcall + { + r17:16 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r16_through_r17_and_deallocframe_before_tailcall + + +FUNCTION_BEGIN __restore_r16_through_r27_and_deallocframe + r27:26 = memd(fp+#-48) + { + r25:24 = memd(fp+#-40) + r23:22 = memd(fp+#-32) + } + { + r21:20 = memd(fp+#-24) + r19:18 = memd(fp+#-16) + } + { + r17:16 = memd(fp+#-8) + dealloc_return + } +FUNCTION_END __restore_r16_through_r27_and_deallocframe + +FUNCTION_BEGIN __restore_r16_through_r25_and_deallocframe + { + r25:24 = memd(fp+#-40) + r23:22 = memd(fp+#-32) + } + { + r21:20 = memd(fp+#-24) + r19:18 = memd(fp+#-16) + } + { + r17:16 = memd(fp+#-8) + dealloc_return + } +FUNCTION_END __restore_r16_through_r25_and_deallocframe + +FUNCTION_BEGIN __restore_r16_through_r23_and_deallocframe + { + r23:22 = memd(fp+#-32) + } + { + r21:20 = memd(fp+#-24) + r19:18 = memd(fp+#-16) + } + { + r17:16 = memd(fp+#-8) + dealloc_return + } +FUNCTION_END __restore_r16_through_r23_and_deallocframe + +FUNCTION_BEGIN __restore_r16_through_r21_and_deallocframe + { + r21:20 = memd(fp+#-24) + r19:18 = memd(fp+#-16) + } + { + r17:16 = memd(fp+#-8) + dealloc_return + } +FUNCTION_END __restore_r16_through_r21_and_deallocframe + +FUNCTION_BEGIN __restore_r16_through_r19_and_deallocframe + { + r19:18 = memd(fp+#-16) + r17:16 = memd(fp+#-8) + } + { + dealloc_return + } +FUNCTION_END __restore_r16_through_r19_and_deallocframe + +FUNCTION_BEGIN __restore_r16_through_r17_and_deallocframe + { + r17:16 = memd(fp+#-8) + dealloc_return + } +FUNCTION_END __restore_r16_through_r17_and_deallocframe + +FUNCTION_BEGIN __deallocframe + dealloc_return +FUNCTION_END __deallocframe diff --git a/lib/builtins/hexagon/common_entry_exit_legacy.S b/lib/builtins/hexagon/common_entry_exit_legacy.S new file mode 100644 index 000000000000..3258f15a3267 --- /dev/null +++ b/lib/builtins/hexagon/common_entry_exit_legacy.S @@ -0,0 +1,157 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + +/* Functions that implement common sequences in function prologues and epilogues + used to save code size */ + + .macro FUNCTION_BEGIN name + .text + .globl \name + .type \name, @function + .falign +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + .macro FALLTHROUGH_TAIL_CALL name0 name1 + .size \name0, . - \name0 + .globl \name1 + .type \name1, @function + .falign +\name1: + .endm + + + + +/* Save r27:26 at fp+#-8, r25:24 at fp+#-16, r23:22 at fp+#-24, r21:20 at + fp+#-32, r19:18 at fp+#-40, and r17:16 at fp+#-48. */ + + + + +/* The compiler knows that the __save_* functions clobber LR. No other + registers should be used without informing the compiler. */ + +/* Since we can only issue one store per packet, we don't hurt performance by + simply jumping to the right point in this sequence of stores. */ + +FUNCTION_BEGIN __save_r27_through_r16 + memd(fp+#-48) = r17:16 +FALLTHROUGH_TAIL_CALL __save_r27_through_r16 __save_r27_through_r18 + memd(fp+#-40) = r19:18 +FALLTHROUGH_TAIL_CALL __save_r27_through_r18 __save_r27_through_r20 + memd(fp+#-32) = r21:20 +FALLTHROUGH_TAIL_CALL __save_r27_through_r20 __save_r27_through_r22 + memd(fp+#-24) = r23:22 +FALLTHROUGH_TAIL_CALL __save_r27_through_r22 __save_r27_through_r24 + memd(fp+#-16) = r25:24 + { + memd(fp+#-8) = r27:26 + jumpr lr + } +FUNCTION_END __save_r27_through_r24 + + + + +/* For each of the *_before_sibcall functions, jumpr lr is executed in parallel + with deallocframe. That way, the return gets the old value of lr, which is + where these functions need to return, and at the same time, lr gets the value + it needs going into the sibcall. */ + +FUNCTION_BEGIN __restore_r27_through_r20_and_deallocframe_before_sibcall + { + r21:20 = memd(fp+#-32) + r23:22 = memd(fp+#-24) + } +FALLTHROUGH_TAIL_CALL __restore_r27_through_r20_and_deallocframe_before_sibcall __restore_r27_through_r24_and_deallocframe_before_sibcall + { + r25:24 = memd(fp+#-16) + jump __restore_r27_through_r26_and_deallocframe_before_sibcall + } +FUNCTION_END __restore_r27_through_r24_and_deallocframe_before_sibcall + + + + +FUNCTION_BEGIN __restore_r27_through_r16_and_deallocframe_before_sibcall + r17:16 = memd(fp+#-48) +FALLTHROUGH_TAIL_CALL __restore_r27_through_r16_and_deallocframe_before_sibcall __restore_r27_through_r18_and_deallocframe_before_sibcall + { + r19:18 = memd(fp+#-40) + r21:20 = memd(fp+#-32) + } +FALLTHROUGH_TAIL_CALL __restore_r27_through_r18_and_deallocframe_before_sibcall __restore_r27_through_r22_and_deallocframe_before_sibcall + { + r23:22 = memd(fp+#-24) + r25:24 = memd(fp+#-16) + } +FALLTHROUGH_TAIL_CALL __restore_r27_through_r22_and_deallocframe_before_sibcall __restore_r27_through_r26_and_deallocframe_before_sibcall + { + r27:26 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r27_through_r26_and_deallocframe_before_sibcall + + + + +/* Here we use the extra load bandwidth to restore LR early, allowing the return + to occur in parallel with the deallocframe. */ + +FUNCTION_BEGIN __restore_r27_through_r16_and_deallocframe + { + r17:16 = memd(fp+#-48) + r19:18 = memd(fp+#-40) + } +FALLTHROUGH_TAIL_CALL __restore_r27_through_r16_and_deallocframe __restore_r27_through_r20_and_deallocframe + { + r21:20 = memd(fp+#-32) + r23:22 = memd(fp+#-24) + } +FALLTHROUGH_TAIL_CALL __restore_r27_through_r20_and_deallocframe __restore_r27_through_r24_and_deallocframe + { + lr = memw(fp+#4) + r25:24 = memd(fp+#-16) + } + { + r27:26 = memd(fp+#-8) + deallocframe + jumpr lr + } +FUNCTION_END __restore_r27_through_r24_and_deallocframe + + + + +/* Here the load bandwidth is maximized for all three functions. */ + +FUNCTION_BEGIN __restore_r27_through_r18_and_deallocframe + { + r19:18 = memd(fp+#-40) + r21:20 = memd(fp+#-32) + } +FALLTHROUGH_TAIL_CALL __restore_r27_through_r18_and_deallocframe __restore_r27_through_r22_and_deallocframe + { + r23:22 = memd(fp+#-24) + r25:24 = memd(fp+#-16) + } +FALLTHROUGH_TAIL_CALL __restore_r27_through_r22_and_deallocframe __restore_r27_through_r26_and_deallocframe + { + r27:26 = memd(fp+#-8) + deallocframe + } + jumpr lr +FUNCTION_END __restore_r27_through_r26_and_deallocframe diff --git a/lib/builtins/hexagon/dfaddsub.S b/lib/builtins/hexagon/dfaddsub.S new file mode 100644 index 000000000000..4173f86a4f54 --- /dev/null +++ b/lib/builtins/hexagon/dfaddsub.S @@ -0,0 +1,398 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +/* Double Precision Multiply */ + +#define A r1:0 +#define AH r1 +#define AL r0 +#define B r3:2 +#define BH r3 +#define BL r2 + +#define EXPA r4 +#define EXPB r5 +#define EXPB_A r5:4 + +#define ZTMP r7:6 +#define ZTMPH r7 +#define ZTMPL r6 + +#define ATMP r13:12 +#define ATMPH r13 +#define ATMPL r12 + +#define BTMP r9:8 +#define BTMPH r9 +#define BTMPL r8 + +#define ATMP2 r11:10 +#define ATMP2H r11 +#define ATMP2L r10 + +#define EXPDIFF r15 +#define EXTRACTOFF r14 +#define EXTRACTAMT r15:14 + +#define TMP r28 + +#define MANTBITS 52 +#define HI_MANTBITS 20 +#define EXPBITS 11 +#define BIAS 1024 +#define MANTISSA_TO_INT_BIAS 52 +#define SR_BIT_INEXACT 5 + +#ifndef SR_ROUND_OFF +#define SR_ROUND_OFF 22 +#endif + +#define NORMAL p3 +#define BIGB p2 + +#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG +#define FAST_ALIAS(TAG) .global __hexagon_fast_##TAG ; .set __hexagon_fast_##TAG, __hexagon_##TAG +#define FAST2_ALIAS(TAG) .global __hexagon_fast2_##TAG ; .set __hexagon_fast2_##TAG, __hexagon_##TAG +#define END(TAG) .size TAG,.-TAG + + .text + .global __hexagon_adddf3 + .global __hexagon_subdf3 + .type __hexagon_adddf3, @function + .type __hexagon_subdf3, @function + +Q6_ALIAS(adddf3) +FAST_ALIAS(adddf3) +FAST2_ALIAS(adddf3) +Q6_ALIAS(subdf3) +FAST_ALIAS(subdf3) +FAST2_ALIAS(subdf3) + + .p2align 5 +__hexagon_adddf3: + { + EXPA = extractu(AH,#EXPBITS,#HI_MANTBITS) + EXPB = extractu(BH,#EXPBITS,#HI_MANTBITS) + ATMP = combine(##0x20000000,#0) + } + { + NORMAL = dfclass(A,#2) + NORMAL = dfclass(B,#2) + BTMP = ATMP + BIGB = cmp.gtu(EXPB,EXPA) // Is B substantially greater than A? + } + { + if (!NORMAL) jump .Ladd_abnormal // If abnormal, go to special code + if (BIGB) A = B // if B >> A, swap A and B + if (BIGB) B = A // If B >> A, swap A and B + if (BIGB) EXPB_A = combine(EXPA,EXPB) // swap exponents + } + { + ATMP = insert(A,#MANTBITS,#EXPBITS-2) // Q1.62 + BTMP = insert(B,#MANTBITS,#EXPBITS-2) // Q1.62 + EXPDIFF = sub(EXPA,EXPB) + ZTMP = combine(#62,#1) + } +#undef BIGB +#undef NORMAL +#define B_POS p3 +#define A_POS p2 +#define NO_STICKIES p1 +.Ladd_continue: + { + EXPDIFF = min(EXPDIFF,ZTMPH) // If exponent difference >= ~60, + // will collapse to sticky bit + ATMP2 = neg(ATMP) + A_POS = cmp.gt(AH,#-1) + EXTRACTOFF = #0 + } + { + if (!A_POS) ATMP = ATMP2 + ATMP2 = extractu(BTMP,EXTRACTAMT) + BTMP = ASR(BTMP,EXPDIFF) +#undef EXTRACTAMT +#undef EXPDIFF +#undef EXTRACTOFF +#define ZERO r15:14 + ZERO = #0 + } + { + NO_STICKIES = cmp.eq(ATMP2,ZERO) + if (!NO_STICKIES.new) BTMPL = or(BTMPL,ZTMPL) + EXPB = add(EXPA,#-BIAS-60) + B_POS = cmp.gt(BH,#-1) + } + { + ATMP = add(ATMP,BTMP) // ADD!!! + ATMP2 = sub(ATMP,BTMP) // Negate and ADD --> SUB!!! + ZTMP = combine(#54,##2045) + } + { + p0 = cmp.gtu(EXPA,ZTMPH) // must be pretty high in case of large cancellation + p0 = !cmp.gtu(EXPA,ZTMPL) + if (!p0.new) jump:nt .Ladd_ovf_unf + if (!B_POS) ATMP = ATMP2 // if B neg, pick difference + } + { + A = convert_d2df(ATMP) // Convert to Double Precision, taking care of flags, etc. So nice! + p0 = cmp.eq(ATMPH,#0) + p0 = cmp.eq(ATMPL,#0) + if (p0.new) jump:nt .Ladd_zero // or maybe conversion handles zero case correctly? + } + { + AH += asl(EXPB,#HI_MANTBITS) + jumpr r31 + } + .falign +__hexagon_subdf3: + { + BH = togglebit(BH,#31) + jump __qdsp_adddf3 + } + + + .falign +.Ladd_zero: + // True zero, full cancellation + // +0 unless round towards negative infinity + { + TMP = USR + A = #0 + BH = #1 + } + { + TMP = extractu(TMP,#2,#22) + BH = asl(BH,#31) + } + { + p0 = cmp.eq(TMP,#2) + if (p0.new) AH = xor(AH,BH) + jumpr r31 + } + .falign +.Ladd_ovf_unf: + // Overflow or Denormal is possible + // Good news: Underflow flag is not possible! + /* + * ATMP has 2's complement value + * + * EXPA has A's exponent, EXPB has EXPA-BIAS-60 + * + * Convert, extract exponent, add adjustment. + * If > 2046, overflow + * If <= 0, denormal + * + * Note that we've not done our zero check yet, so do that too + * + */ + { + A = convert_d2df(ATMP) + p0 = cmp.eq(ATMPH,#0) + p0 = cmp.eq(ATMPL,#0) + if (p0.new) jump:nt .Ladd_zero + } + { + TMP = extractu(AH,#EXPBITS,#HI_MANTBITS) + AH += asl(EXPB,#HI_MANTBITS) + } + { + EXPB = add(EXPB,TMP) + B = combine(##0x00100000,#0) + } + { + p0 = cmp.gt(EXPB,##BIAS+BIAS-2) + if (p0.new) jump:nt .Ladd_ovf + } + { + p0 = cmp.gt(EXPB,#0) + if (p0.new) jumpr:t r31 + TMP = sub(#1,EXPB) + } + { + B = insert(A,#MANTBITS,#0) + A = ATMP + } + { + B = lsr(B,TMP) + } + { + A = insert(B,#63,#0) + jumpr r31 + } + .falign +.Ladd_ovf: + // We get either max finite value or infinity. Either way, overflow+inexact + { + A = ATMP // 2's complement value + TMP = USR + ATMP = combine(##0x7fefffff,#-1) // positive max finite + } + { + EXPB = extractu(TMP,#2,#SR_ROUND_OFF) // rounding bits + TMP = or(TMP,#0x28) // inexact + overflow + BTMP = combine(##0x7ff00000,#0) // positive infinity + } + { + USR = TMP + EXPB ^= lsr(AH,#31) // Does sign match rounding? + TMP = EXPB // unmodified rounding mode + } + { + p0 = !cmp.eq(TMP,#1) // If not round-to-zero and + p0 = !cmp.eq(EXPB,#2) // Not rounding the other way, + if (p0.new) ATMP = BTMP // we should get infinity + } + { + A = insert(ATMP,#63,#0) // insert inf/maxfinite, leave sign + } + { + p0 = dfcmp.eq(A,A) + jumpr r31 + } + +.Ladd_abnormal: + { + ATMP = extractu(A,#63,#0) // strip off sign + BTMP = extractu(B,#63,#0) // strip off sign + } + { + p3 = cmp.gtu(ATMP,BTMP) + if (!p3.new) A = B // sort values + if (!p3.new) B = A // sort values + } + { + // Any NaN --> NaN, possibly raise invalid if sNaN + p0 = dfclass(A,#0x0f) // A not NaN? + if (!p0.new) jump:nt .Linvalid_nan_add + if (!p3) ATMP = BTMP + if (!p3) BTMP = ATMP + } + { + // Infinity + non-infinity number is infinity + // Infinity + infinity --> inf or nan + p1 = dfclass(A,#0x08) // A is infinity + if (p1.new) jump:nt .Linf_add + } + { + p2 = dfclass(B,#0x01) // B is zero + if (p2.new) jump:nt .LB_zero // so return A or special 0+0 + ATMP = #0 + } + // We are left with adding one or more subnormals + { + p0 = dfclass(A,#4) + if (p0.new) jump:nt .Ladd_two_subnormal + ATMP = combine(##0x20000000,#0) + } + { + EXPA = extractu(AH,#EXPBITS,#HI_MANTBITS) + EXPB = #1 + // BTMP already ABS(B) + BTMP = asl(BTMP,#EXPBITS-2) + } +#undef ZERO +#define EXTRACTOFF r14 +#define EXPDIFF r15 + { + ATMP = insert(A,#MANTBITS,#EXPBITS-2) + EXPDIFF = sub(EXPA,EXPB) + ZTMP = combine(#62,#1) + jump .Ladd_continue + } + +.Ladd_two_subnormal: + { + ATMP = extractu(A,#63,#0) + BTMP = extractu(B,#63,#0) + } + { + ATMP = neg(ATMP) + BTMP = neg(BTMP) + p0 = cmp.gt(AH,#-1) + p1 = cmp.gt(BH,#-1) + } + { + if (p0) ATMP = A + if (p1) BTMP = B + } + { + ATMP = add(ATMP,BTMP) + } + { + BTMP = neg(ATMP) + p0 = cmp.gt(ATMPH,#-1) + B = #0 + } + { + if (!p0) A = BTMP + if (p0) A = ATMP + BH = ##0x80000000 + } + { + if (!p0) AH = or(AH,BH) + p0 = dfcmp.eq(A,B) + if (p0.new) jump:nt .Lzero_plus_zero + } + { + jumpr r31 + } + +.Linvalid_nan_add: + { + TMP = convert_df2sf(A) // will generate invalid if sNaN + p0 = dfclass(B,#0x0f) // if B is not NaN + if (p0.new) B = A // make it whatever A is + } + { + BL = convert_df2sf(B) // will generate invalid if sNaN + A = #-1 + jumpr r31 + } + .falign +.LB_zero: + { + p0 = dfcmp.eq(ATMP,A) // is A also zero? + if (!p0.new) jumpr:t r31 // If not, just return A + } + // 0 + 0 is special + // if equal integral values, they have the same sign, which is fine for all rounding + // modes. + // If unequal in sign, we get +0 for all rounding modes except round down +.Lzero_plus_zero: + { + p0 = cmp.eq(A,B) + if (p0.new) jumpr:t r31 + } + { + TMP = USR + } + { + TMP = extractu(TMP,#2,#SR_ROUND_OFF) + A = #0 + } + { + p0 = cmp.eq(TMP,#2) + if (p0.new) AH = ##0x80000000 + jumpr r31 + } +.Linf_add: + // adding infinities is only OK if they are equal + { + p0 = !cmp.eq(AH,BH) // Do they have different signs + p0 = dfclass(B,#8) // And is B also infinite? + if (!p0.new) jumpr:t r31 // If not, just a normal inf + } + { + BL = ##0x7f800001 // sNAN + } + { + A = convert_sf2df(BL) // trigger invalid, set NaN + jumpr r31 + } +END(__hexagon_adddf3) diff --git a/lib/builtins/hexagon/dfdiv.S b/lib/builtins/hexagon/dfdiv.S new file mode 100644 index 000000000000..0c5dbe272c89 --- /dev/null +++ b/lib/builtins/hexagon/dfdiv.S @@ -0,0 +1,492 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +/* Double Precision Divide */ + +#define A r1:0 +#define AH r1 +#define AL r0 + +#define B r3:2 +#define BH r3 +#define BL r2 + +#define Q r5:4 +#define QH r5 +#define QL r4 + +#define PROD r7:6 +#define PRODHI r7 +#define PRODLO r6 + +#define SFONE r8 +#define SFDEN r9 +#define SFERROR r10 +#define SFRECIP r11 + +#define EXPBA r13:12 +#define EXPB r13 +#define EXPA r12 + +#define REMSUB2 r15:14 + + + +#define SIGN r28 + +#define Q_POSITIVE p3 +#define NORMAL p2 +#define NO_OVF_UNF p1 +#define P_TMP p0 + +#define RECIPEST_SHIFT 3 +#define QADJ 61 + +#define DFCLASS_NORMAL 0x02 +#define DFCLASS_NUMBER 0x0F +#define DFCLASS_INFINITE 0x08 +#define DFCLASS_ZERO 0x01 +#define DFCLASS_NONZERO (DFCLASS_NUMBER ^ DFCLASS_ZERO) +#define DFCLASS_NONINFINITE (DFCLASS_NUMBER ^ DFCLASS_INFINITE) + +#define DF_MANTBITS 52 +#define DF_EXPBITS 11 +#define SF_MANTBITS 23 +#define SF_EXPBITS 8 +#define DF_BIAS 0x3ff + +#define SR_ROUND_OFF 22 + +#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG +#define FAST_ALIAS(TAG) .global __hexagon_fast_##TAG ; .set __hexagon_fast_##TAG, __hexagon_##TAG +#define FAST2_ALIAS(TAG) .global __hexagon_fast2_##TAG ; .set __hexagon_fast2_##TAG, __hexagon_##TAG +#define END(TAG) .size TAG,.-TAG + + .text + .global __hexagon_divdf3 + .type __hexagon_divdf3,@function + Q6_ALIAS(divdf3) + FAST_ALIAS(divdf3) + FAST2_ALIAS(divdf3) + .p2align 5 +__hexagon_divdf3: + { + NORMAL = dfclass(A,#DFCLASS_NORMAL) + NORMAL = dfclass(B,#DFCLASS_NORMAL) + EXPBA = combine(BH,AH) + SIGN = xor(AH,BH) + } +#undef A +#undef AH +#undef AL +#undef B +#undef BH +#undef BL +#define REM r1:0 +#define REMHI r1 +#define REMLO r0 +#define DENOM r3:2 +#define DENOMHI r3 +#define DENOMLO r2 + { + if (!NORMAL) jump .Ldiv_abnormal + PROD = extractu(DENOM,#SF_MANTBITS,#DF_MANTBITS-SF_MANTBITS) + SFONE = ##0x3f800001 + } + { + SFDEN = or(SFONE,PRODLO) + EXPB = extractu(EXPB,#DF_EXPBITS,#DF_MANTBITS-32) + EXPA = extractu(EXPA,#DF_EXPBITS,#DF_MANTBITS-32) + Q_POSITIVE = cmp.gt(SIGN,#-1) + } +#undef SIGN +#define ONE r28 +.Ldenorm_continue: + { + SFRECIP,P_TMP = sfrecipa(SFONE,SFDEN) + SFERROR = and(SFONE,#-2) + ONE = #1 + EXPA = sub(EXPA,EXPB) + } +#undef EXPB +#define RECIPEST r13 + { + SFERROR -= sfmpy(SFRECIP,SFDEN):lib + REMHI = insert(ONE,#DF_EXPBITS+1,#DF_MANTBITS-32) + RECIPEST = ##0x00800000 << RECIPEST_SHIFT + } + { + SFRECIP += sfmpy(SFRECIP,SFERROR):lib + DENOMHI = insert(ONE,#DF_EXPBITS+1,#DF_MANTBITS-32) + SFERROR = and(SFONE,#-2) + } + { + SFERROR -= sfmpy(SFRECIP,SFDEN):lib + QH = #-DF_BIAS+1 + QL = #DF_BIAS-1 + } + { + SFRECIP += sfmpy(SFRECIP,SFERROR):lib + NO_OVF_UNF = cmp.gt(EXPA,QH) + NO_OVF_UNF = !cmp.gt(EXPA,QL) + } + { + RECIPEST = insert(SFRECIP,#SF_MANTBITS,#RECIPEST_SHIFT) + Q = #0 + EXPA = add(EXPA,#-QADJ) + } +#undef SFERROR +#undef SFRECIP +#define TMP r10 +#define TMP1 r11 + { + RECIPEST = add(RECIPEST,#((-3) << RECIPEST_SHIFT)) + } + +#define DIV_ITER1B(QSHIFTINSN,QSHIFT,REMSHIFT,EXTRA) \ + { \ + PROD = mpyu(RECIPEST,REMHI); \ + REM = asl(REM,# ## ( REMSHIFT )); \ + }; \ + { \ + PRODLO = # ## 0; \ + REM -= mpyu(PRODHI,DENOMLO); \ + REMSUB2 = mpyu(PRODHI,DENOMHI); \ + }; \ + { \ + Q += QSHIFTINSN(PROD, # ## ( QSHIFT )); \ + REM -= asl(REMSUB2, # ## 32); \ + EXTRA \ + } + + + DIV_ITER1B(ASL,14,15,) + DIV_ITER1B(ASR,1,15,) + DIV_ITER1B(ASR,16,15,) + DIV_ITER1B(ASR,31,15,PROD=# ( 0 );) + +#undef REMSUB2 +#define TMPPAIR r15:14 +#define TMPPAIRHI r15 +#define TMPPAIRLO r14 +#undef RECIPEST +#define EXPB r13 + { + // compare or sub with carry + TMPPAIR = sub(REM,DENOM) + P_TMP = cmp.gtu(DENOM,REM) + // set up amt to add to q + if (!P_TMP.new) PRODLO = #2 + } + { + Q = add(Q,PROD) + if (!P_TMP) REM = TMPPAIR + TMPPAIR = #0 + } + { + P_TMP = cmp.eq(REM,TMPPAIR) + if (!P_TMP.new) QL = or(QL,ONE) + } + { + PROD = neg(Q) + } + { + if (!Q_POSITIVE) Q = PROD + } +#undef REM +#undef REMHI +#undef REMLO +#undef DENOM +#undef DENOMLO +#undef DENOMHI +#define A r1:0 +#define AH r1 +#define AL r0 +#define B r3:2 +#define BH r3 +#define BL r2 + { + A = convert_d2df(Q) + if (!NO_OVF_UNF) jump .Ldiv_ovf_unf + } + { + AH += asl(EXPA,#DF_MANTBITS-32) + jumpr r31 + } + +.Ldiv_ovf_unf: + { + AH += asl(EXPA,#DF_MANTBITS-32) + EXPB = extractu(AH,#DF_EXPBITS,#DF_MANTBITS-32) + } + { + PROD = abs(Q) + EXPA = add(EXPA,EXPB) + } + { + P_TMP = cmp.gt(EXPA,##DF_BIAS+DF_BIAS) // overflow + if (P_TMP.new) jump:nt .Ldiv_ovf + } + { + P_TMP = cmp.gt(EXPA,#0) + if (P_TMP.new) jump:nt .Lpossible_unf // round up to normal possible... + } + /* Underflow */ + /* We know what the infinite range exponent should be (EXPA) */ + /* Q is 2's complement, PROD is abs(Q) */ + /* Normalize Q, shift right, add a high bit, convert, change exponent */ + +#define FUDGE1 7 // how much to shift right +#define FUDGE2 4 // how many guard/round to keep at lsbs + + { + EXPB = add(clb(PROD),#-1) // doesn't need to be added in since + EXPA = sub(#FUDGE1,EXPA) // we extract post-converted exponent + TMP = USR + TMP1 = #63 + } + { + EXPB = min(EXPA,TMP1) + TMP1 = or(TMP,#0x030) + PROD = asl(PROD,EXPB) + EXPA = #0 + } + { + TMPPAIR = extractu(PROD,EXPBA) // bits that will get shifted out + PROD = lsr(PROD,EXPB) // shift out bits + B = #1 + } + { + P_TMP = cmp.gtu(B,TMPPAIR) + if (!P_TMP.new) PRODLO = or(BL,PRODLO) + PRODHI = setbit(PRODHI,#DF_MANTBITS-32+FUDGE2) + } + { + Q = neg(PROD) + P_TMP = bitsclr(PRODLO,#(1<<FUDGE2)-1) + if (!P_TMP.new) TMP = TMP1 + } + { + USR = TMP + if (Q_POSITIVE) Q = PROD + TMP = #-DF_BIAS-(DF_MANTBITS+FUDGE2) + } + { + A = convert_d2df(Q) + } + { + AH += asl(TMP,#DF_MANTBITS-32) + jumpr r31 + } + + +.Lpossible_unf: + /* If upper parts of Q were all F's, but abs(A) == 0x00100000_00000000, we rounded up to min_normal */ + /* The answer is correct, but we need to raise Underflow */ + { + B = extractu(A,#63,#0) + TMPPAIR = combine(##0x00100000,#0) // min normal + TMP = #0x7FFF + } + { + P_TMP = dfcmp.eq(TMPPAIR,B) // Is everything zero in the rounded value... + P_TMP = bitsset(PRODHI,TMP) // but a bunch of bits set in the unrounded abs(quotient)? + } + +#if (__HEXAGON_ARCH__ == 60) + TMP = USR // If not, just return + if (!P_TMP) jumpr r31 // Else, we want to set Unf+Inexact + // Note that inexact is already set... +#else + { + if (!P_TMP) jumpr r31 // If not, just return + TMP = USR // Else, we want to set Unf+Inexact + } // Note that inexact is already set... +#endif + { + TMP = or(TMP,#0x30) + } + { + USR = TMP + } + { + p0 = dfcmp.eq(A,A) + jumpr r31 + } + +.Ldiv_ovf: + /* + * Raise Overflow, and choose the correct overflow value (saturated normal or infinity) + */ + { + TMP = USR + B = combine(##0x7fefffff,#-1) + AH = mux(Q_POSITIVE,#0,#-1) + } + { + PROD = combine(##0x7ff00000,#0) + QH = extractu(TMP,#2,#SR_ROUND_OFF) + TMP = or(TMP,#0x28) + } + { + USR = TMP + QH ^= lsr(AH,#31) + QL = QH + } + { + p0 = !cmp.eq(QL,#1) // if not round-to-zero + p0 = !cmp.eq(QH,#2) // and not rounding the other way + if (p0.new) B = PROD // go to inf + p0 = dfcmp.eq(B,B) // get exceptions + } + { + A = insert(B,#63,#0) + jumpr r31 + } + +#undef ONE +#define SIGN r28 +#undef NORMAL +#undef NO_OVF_UNF +#define P_INF p1 +#define P_ZERO p2 +.Ldiv_abnormal: + { + P_TMP = dfclass(A,#DFCLASS_NUMBER) + P_TMP = dfclass(B,#DFCLASS_NUMBER) + Q_POSITIVE = cmp.gt(SIGN,#-1) + } + { + P_INF = dfclass(A,#DFCLASS_INFINITE) + P_INF = dfclass(B,#DFCLASS_INFINITE) + } + { + P_ZERO = dfclass(A,#DFCLASS_ZERO) + P_ZERO = dfclass(B,#DFCLASS_ZERO) + } + { + if (!P_TMP) jump .Ldiv_nan + if (P_INF) jump .Ldiv_invalid + } + { + if (P_ZERO) jump .Ldiv_invalid + } + { + P_ZERO = dfclass(A,#DFCLASS_NONZERO) // nonzero + P_ZERO = dfclass(B,#DFCLASS_NONINFINITE) // non-infinite + } + { + P_INF = dfclass(A,#DFCLASS_NONINFINITE) // non-infinite + P_INF = dfclass(B,#DFCLASS_NONZERO) // nonzero + } + { + if (!P_ZERO) jump .Ldiv_zero_result + if (!P_INF) jump .Ldiv_inf_result + } + /* Now we've narrowed it down to (de)normal / (de)normal */ + /* Set up A/EXPA B/EXPB and go back */ +#undef P_ZERO +#undef P_INF +#define P_TMP2 p1 + { + P_TMP = dfclass(A,#DFCLASS_NORMAL) + P_TMP2 = dfclass(B,#DFCLASS_NORMAL) + TMP = ##0x00100000 + } + { + EXPBA = combine(BH,AH) + AH = insert(TMP,#DF_EXPBITS+1,#DF_MANTBITS-32) // clear out hidden bit, sign bit + BH = insert(TMP,#DF_EXPBITS+1,#DF_MANTBITS-32) // clear out hidden bit, sign bit + } + { + if (P_TMP) AH = or(AH,TMP) // if normal, add back in hidden bit + if (P_TMP2) BH = or(BH,TMP) // if normal, add back in hidden bit + } + { + QH = add(clb(A),#-DF_EXPBITS) + QL = add(clb(B),#-DF_EXPBITS) + TMP = #1 + } + { + EXPA = extractu(EXPA,#DF_EXPBITS,#DF_MANTBITS-32) + EXPB = extractu(EXPB,#DF_EXPBITS,#DF_MANTBITS-32) + } + { + A = asl(A,QH) + B = asl(B,QL) + if (!P_TMP) EXPA = sub(TMP,QH) + if (!P_TMP2) EXPB = sub(TMP,QL) + } // recreate values needed by resume coke + { + PROD = extractu(B,#SF_MANTBITS,#DF_MANTBITS-SF_MANTBITS) + } + { + SFDEN = or(SFONE,PRODLO) + jump .Ldenorm_continue + } + +.Ldiv_zero_result: + { + AH = xor(AH,BH) + B = #0 + } + { + A = insert(B,#63,#0) + jumpr r31 + } +.Ldiv_inf_result: + { + p2 = dfclass(B,#DFCLASS_ZERO) + p2 = dfclass(A,#DFCLASS_NONINFINITE) + } + { + TMP = USR + if (!p2) jump 1f + AH = xor(AH,BH) + } + { + TMP = or(TMP,#0x04) // DBZ + } + { + USR = TMP + } +1: + { + B = combine(##0x7ff00000,#0) + p0 = dfcmp.uo(B,B) // take possible exception + } + { + A = insert(B,#63,#0) + jumpr r31 + } +.Ldiv_nan: + { + p0 = dfclass(A,#0x10) + p1 = dfclass(B,#0x10) + if (!p0.new) A = B + if (!p1.new) B = A + } + { + QH = convert_df2sf(A) // get possible invalid exceptions + QL = convert_df2sf(B) + } + { + A = #-1 + jumpr r31 + } + +.Ldiv_invalid: + { + TMP = ##0x7f800001 + } + { + A = convert_sf2df(TMP) // get invalid, get DF qNaN + jumpr r31 + } +END(__hexagon_divdf3) diff --git a/lib/builtins/hexagon/dffma.S b/lib/builtins/hexagon/dffma.S new file mode 100644 index 000000000000..97b885a3bf27 --- /dev/null +++ b/lib/builtins/hexagon/dffma.S @@ -0,0 +1,705 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG +#define END(TAG) .size TAG,.-TAG + +/* Double Precision Multiply */ + + +#define A r1:0 +#define AH r1 +#define AL r0 +#define B r3:2 +#define BH r3 +#define BL r2 +#define C r5:4 +#define CH r5 +#define CL r4 + + + +#define BTMP r15:14 +#define BTMPH r15 +#define BTMPL r14 + +#define ATMP r13:12 +#define ATMPH r13 +#define ATMPL r12 + +#define CTMP r11:10 +#define CTMPH r11 +#define CTMPL r10 + +#define PP_LL r9:8 +#define PP_LL_H r9 +#define PP_LL_L r8 + +#define PP_ODD r7:6 +#define PP_ODD_H r7 +#define PP_ODD_L r6 + + +#define PP_HH r17:16 +#define PP_HH_H r17 +#define PP_HH_L r16 + +#define EXPA r18 +#define EXPB r19 +#define EXPBA r19:18 + +#define TMP r28 + +#define P_TMP p0 +#define PROD_NEG p3 +#define EXACT p2 +#define SWAP p1 + +#define MANTBITS 52 +#define HI_MANTBITS 20 +#define EXPBITS 11 +#define BIAS 1023 +#define STACKSPACE 32 + +#define ADJUST 4 + +#define FUDGE 7 +#define FUDGE2 3 + +#ifndef SR_ROUND_OFF +#define SR_ROUND_OFF 22 +#endif + + /* + * First, classify for normal values, and abort if abnormal + * + * Next, unpack mantissa into 0x1000_0000_0000_0000 + mant<<8 + * + * Since we know that the 2 MSBs of the H registers is zero, we should never carry + * the partial products that involve the H registers + * + * Try to buy X slots, at the expense of latency if needed + * + * We will have PP_HH with the upper bits of the product, PP_LL with the lower + * PP_HH can have a maximum of 0x03FF_FFFF_FFFF_FFFF or thereabouts + * PP_HH can have a minimum of 0x0100_0000_0000_0000 + * + * 0x0100_0000_0000_0000 has EXP of EXPA+EXPB-BIAS + * + * We need to align CTMP. + * If CTMP >> PP, convert PP to 64 bit with sticky, align CTMP, and follow normal add + * If CTMP << PP align CTMP and add 128 bits. Then compute sticky + * If CTMP ~= PP, align CTMP and add 128 bits. May have massive cancellation. + * + * Convert partial product and CTMP to 2's complement prior to addition + * + * After we add, we need to normalize into upper 64 bits, then compute sticky. + * + * + */ + + .text + .global __hexagon_fmadf4 + .type __hexagon_fmadf4,@function + .global __hexagon_fmadf5 + .type __hexagon_fmadf5,@function + .global fma + .type fma,@function + Q6_ALIAS(fmadf5) + .p2align 5 +__hexagon_fmadf4: +__hexagon_fmadf5: +fma: + { + P_TMP = dfclass(A,#2) + P_TMP = dfclass(B,#2) + ATMP = #0 + BTMP = #0 + } + { + ATMP = insert(A,#MANTBITS,#EXPBITS-3) + BTMP = insert(B,#MANTBITS,#EXPBITS-3) + PP_ODD_H = ##0x10000000 + allocframe(#STACKSPACE) + } + { + PP_LL = mpyu(ATMPL,BTMPL) + if (!P_TMP) jump .Lfma_abnormal_ab + ATMPH = or(ATMPH,PP_ODD_H) + BTMPH = or(BTMPH,PP_ODD_H) + } + { + P_TMP = dfclass(C,#2) + if (!P_TMP.new) jump:nt .Lfma_abnormal_c + CTMP = combine(PP_ODD_H,#0) + PP_ODD = combine(#0,PP_LL_H) + } +.Lfma_abnormal_c_restart: + { + PP_ODD += mpyu(BTMPL,ATMPH) + CTMP = insert(C,#MANTBITS,#EXPBITS-3) + memd(r29+#0) = PP_HH + memd(r29+#8) = EXPBA + } + { + PP_ODD += mpyu(ATMPL,BTMPH) + EXPBA = neg(CTMP) + P_TMP = cmp.gt(CH,#-1) + TMP = xor(AH,BH) + } + { + EXPA = extractu(AH,#EXPBITS,#HI_MANTBITS) + EXPB = extractu(BH,#EXPBITS,#HI_MANTBITS) + PP_HH = combine(#0,PP_ODD_H) + if (!P_TMP) CTMP = EXPBA + } + { + PP_HH += mpyu(ATMPH,BTMPH) + PP_LL = combine(PP_ODD_L,PP_LL_L) +#undef PP_ODD +#undef PP_ODD_H +#undef PP_ODD_L +#undef ATMP +#undef ATMPL +#undef ATMPH +#undef BTMP +#undef BTMPL +#undef BTMPH +#define RIGHTLEFTSHIFT r13:12 +#define RIGHTSHIFT r13 +#define LEFTSHIFT r12 + + EXPA = add(EXPA,EXPB) +#undef EXPB +#undef EXPBA +#define EXPC r19 +#define EXPCA r19:18 + EXPC = extractu(CH,#EXPBITS,#HI_MANTBITS) + } + /* PP_HH:PP_LL now has product */ + /* CTMP is negated */ + /* EXPA,B,C are extracted */ + /* + * We need to negate PP + * Since we will be adding with carry later, if we need to negate, + * just invert all bits now, which we can do conditionally and in parallel + */ +#define PP_HH_TMP r15:14 +#define PP_LL_TMP r7:6 + { + EXPA = add(EXPA,#-BIAS+(ADJUST)) + PROD_NEG = !cmp.gt(TMP,#-1) + PP_LL_TMP = #0 + PP_HH_TMP = #0 + } + { + PP_LL_TMP = sub(PP_LL_TMP,PP_LL,PROD_NEG):carry + P_TMP = !cmp.gt(TMP,#-1) + SWAP = cmp.gt(EXPC,EXPA) // If C >> PP + if (SWAP.new) EXPCA = combine(EXPA,EXPC) + } + { + PP_HH_TMP = sub(PP_HH_TMP,PP_HH,PROD_NEG):carry + if (P_TMP) PP_LL = PP_LL_TMP +#undef PP_LL_TMP +#define CTMP2 r7:6 +#define CTMP2H r7 +#define CTMP2L r6 + CTMP2 = #0 + EXPC = sub(EXPA,EXPC) + } + { + if (P_TMP) PP_HH = PP_HH_TMP + P_TMP = cmp.gt(EXPC,#63) + if (SWAP) PP_LL = CTMP2 + if (SWAP) CTMP2 = PP_LL + } +#undef PP_HH_TMP +//#define ONE r15:14 +//#define S_ONE r14 +#define ZERO r15:14 +#define S_ZERO r15 +#undef PROD_NEG +#define P_CARRY p3 + { + if (SWAP) PP_HH = CTMP // Swap C and PP + if (SWAP) CTMP = PP_HH + if (P_TMP) EXPC = add(EXPC,#-64) + TMP = #63 + } + { + // If diff > 63, pre-shift-right by 64... + if (P_TMP) CTMP2 = CTMP + TMP = asr(CTMPH,#31) + RIGHTSHIFT = min(EXPC,TMP) + LEFTSHIFT = #0 + } +#undef C +#undef CH +#undef CL +#define STICKIES r5:4 +#define STICKIESH r5 +#define STICKIESL r4 + { + if (P_TMP) CTMP = combine(TMP,TMP) // sign extension of pre-shift-right-64 + STICKIES = extract(CTMP2,RIGHTLEFTSHIFT) + CTMP2 = lsr(CTMP2,RIGHTSHIFT) + LEFTSHIFT = sub(#64,RIGHTSHIFT) + } + { + ZERO = #0 + TMP = #-2 + CTMP2 |= lsl(CTMP,LEFTSHIFT) + CTMP = asr(CTMP,RIGHTSHIFT) + } + { + P_CARRY = cmp.gtu(STICKIES,ZERO) // If we have sticky bits from C shift + if (P_CARRY.new) CTMP2L = and(CTMP2L,TMP) // make sure adding 1 == OR +#undef ZERO +#define ONE r15:14 +#define S_ONE r14 + ONE = #1 + STICKIES = #0 + } + { + PP_LL = add(CTMP2,PP_LL,P_CARRY):carry // use the carry to add the sticky + } + { + PP_HH = add(CTMP,PP_HH,P_CARRY):carry + TMP = #62 + } + /* + * PP_HH:PP_LL now holds the sum + * We may need to normalize left, up to ??? bits. + * + * I think that if we have massive cancellation, the range we normalize by + * is still limited + */ + { + LEFTSHIFT = add(clb(PP_HH),#-2) + if (!cmp.eq(LEFTSHIFT.new,TMP)) jump:t 1f // all sign bits? + } + /* We had all sign bits, shift left by 62. */ + { + CTMP = extractu(PP_LL,#62,#2) + PP_LL = asl(PP_LL,#62) + EXPA = add(EXPA,#-62) // And adjust exponent of result + } + { + PP_HH = insert(CTMP,#62,#0) // Then shift 63 + } + { + LEFTSHIFT = add(clb(PP_HH),#-2) + } + .falign +1: + { + CTMP = asl(PP_HH,LEFTSHIFT) + STICKIES |= asl(PP_LL,LEFTSHIFT) + RIGHTSHIFT = sub(#64,LEFTSHIFT) + EXPA = sub(EXPA,LEFTSHIFT) + } + { + CTMP |= lsr(PP_LL,RIGHTSHIFT) + EXACT = cmp.gtu(ONE,STICKIES) + TMP = #BIAS+BIAS-2 + } + { + if (!EXACT) CTMPL = or(CTMPL,S_ONE) + // If EXPA is overflow/underflow, jump to ovf_unf + P_TMP = !cmp.gt(EXPA,TMP) + P_TMP = cmp.gt(EXPA,#1) + if (!P_TMP.new) jump:nt .Lfma_ovf_unf + } + { + // XXX: FIXME: should PP_HH for check of zero be CTMP? + P_TMP = cmp.gtu(ONE,CTMP) // is result true zero? + A = convert_d2df(CTMP) + EXPA = add(EXPA,#-BIAS-60) + PP_HH = memd(r29+#0) + } + { + AH += asl(EXPA,#HI_MANTBITS) + EXPCA = memd(r29+#8) + if (!P_TMP) dealloc_return // not zero, return + } +.Ladd_yields_zero: + /* We had full cancellation. Return +/- zero (-0 when round-down) */ + { + TMP = USR + A = #0 + } + { + TMP = extractu(TMP,#2,#SR_ROUND_OFF) + PP_HH = memd(r29+#0) + EXPCA = memd(r29+#8) + } + { + p0 = cmp.eq(TMP,#2) + if (p0.new) AH = ##0x80000000 + dealloc_return + } + +#undef RIGHTLEFTSHIFT +#undef RIGHTSHIFT +#undef LEFTSHIFT +#undef CTMP2 +#undef CTMP2H +#undef CTMP2L + +.Lfma_ovf_unf: + { + p0 = cmp.gtu(ONE,CTMP) + if (p0.new) jump:nt .Ladd_yields_zero + } + { + A = convert_d2df(CTMP) + EXPA = add(EXPA,#-BIAS-60) + TMP = EXPA + } +#define NEW_EXPB r7 +#define NEW_EXPA r6 + { + AH += asl(EXPA,#HI_MANTBITS) + NEW_EXPB = extractu(AH,#EXPBITS,#HI_MANTBITS) + } + { + NEW_EXPA = add(EXPA,NEW_EXPB) + PP_HH = memd(r29+#0) + EXPCA = memd(r29+#8) +#undef PP_HH +#undef PP_HH_H +#undef PP_HH_L +#undef EXPCA +#undef EXPC +#undef EXPA +#undef PP_LL +#undef PP_LL_H +#undef PP_LL_L +#define EXPA r6 +#define EXPB r7 +#define EXPBA r7:6 +#define ATMP r9:8 +#define ATMPH r9 +#define ATMPL r8 +#undef NEW_EXPB +#undef NEW_EXPA + ATMP = abs(CTMP) + } + { + p0 = cmp.gt(EXPA,##BIAS+BIAS) + if (p0.new) jump:nt .Lfma_ovf + } + { + p0 = cmp.gt(EXPA,#0) + if (p0.new) jump:nt .Lpossible_unf + } + { + // TMP has original EXPA. + // ATMP is corresponding value + // Normalize ATMP and shift right to correct location + EXPB = add(clb(ATMP),#-2) // Amount to left shift to normalize + EXPA = sub(#1+5,TMP) // Amount to right shift to denormalize + p3 = cmp.gt(CTMPH,#-1) + } + /* Underflow */ + /* We know that the infinte range exponent should be EXPA */ + /* CTMP is 2's complement, ATMP is abs(CTMP) */ + { + EXPA = add(EXPA,EXPB) // how much to shift back right + ATMP = asl(ATMP,EXPB) // shift left + AH = USR + TMP = #63 + } + { + EXPB = min(EXPA,TMP) + EXPA = #0 + AL = #0x0030 + } + { + B = extractu(ATMP,EXPBA) + ATMP = asr(ATMP,EXPB) + } + { + p0 = cmp.gtu(ONE,B) + if (!p0.new) ATMPL = or(ATMPL,S_ONE) + ATMPH = setbit(ATMPH,#HI_MANTBITS+FUDGE2) + } + { + CTMP = neg(ATMP) + p1 = bitsclr(ATMPL,#(1<<FUDGE2)-1) + if (!p1.new) AH = or(AH,AL) + B = #0 + } + { + if (p3) CTMP = ATMP + USR = AH + TMP = #-BIAS-(MANTBITS+FUDGE2) + } + { + A = convert_d2df(CTMP) + } + { + AH += asl(TMP,#HI_MANTBITS) + dealloc_return + } +.Lpossible_unf: + { + TMP = ##0x7fefffff + ATMP = abs(CTMP) + } + { + p0 = cmp.eq(AL,#0) + p0 = bitsclr(AH,TMP) + if (!p0.new) dealloc_return:t + TMP = #0x7fff + } + { + p0 = bitsset(ATMPH,TMP) + BH = USR + BL = #0x0030 + } + { + if (p0) BH = or(BH,BL) + } + { + USR = BH + } + { + p0 = dfcmp.eq(A,A) + dealloc_return + } +.Lfma_ovf: + { + TMP = USR + CTMP = combine(##0x7fefffff,#-1) + A = CTMP + } + { + ATMP = combine(##0x7ff00000,#0) + BH = extractu(TMP,#2,#SR_ROUND_OFF) + TMP = or(TMP,#0x28) + } + { + USR = TMP + BH ^= lsr(AH,#31) + BL = BH + } + { + p0 = !cmp.eq(BL,#1) + p0 = !cmp.eq(BH,#2) + } + { + p0 = dfcmp.eq(ATMP,ATMP) + if (p0.new) CTMP = ATMP + } + { + A = insert(CTMP,#63,#0) + dealloc_return + } +#undef CTMP +#undef CTMPH +#undef CTMPL +#define BTMP r11:10 +#define BTMPH r11 +#define BTMPL r10 + +#undef STICKIES +#undef STICKIESH +#undef STICKIESL +#define C r5:4 +#define CH r5 +#define CL r4 + +.Lfma_abnormal_ab: + { + ATMP = extractu(A,#63,#0) + BTMP = extractu(B,#63,#0) + deallocframe + } + { + p3 = cmp.gtu(ATMP,BTMP) + if (!p3.new) A = B // sort values + if (!p3.new) B = A + } + { + p0 = dfclass(A,#0x0f) // A NaN? + if (!p0.new) jump:nt .Lnan + if (!p3) ATMP = BTMP + if (!p3) BTMP = ATMP + } + { + p1 = dfclass(A,#0x08) // A is infinity + p1 = dfclass(B,#0x0e) // B is nonzero + } + { + p0 = dfclass(A,#0x08) // a is inf + p0 = dfclass(B,#0x01) // b is zero + } + { + if (p1) jump .Lab_inf + p2 = dfclass(B,#0x01) + } + { + if (p0) jump .Linvalid + if (p2) jump .Lab_true_zero + TMP = ##0x7c000000 + } + // We are left with a normal or subnormal times a subnormal, A > B + // If A and B are both very small, we will go to a single sticky bit; replace + // A and B lower 63 bits with 0x0010_0000_0000_0000, which yields equivalent results + // if A and B might multiply to something bigger, decrease A exp and increase B exp + // and start over + { + p0 = bitsclr(AH,TMP) + if (p0.new) jump:nt .Lfma_ab_tiny + } + { + TMP = add(clb(BTMP),#-EXPBITS) + } + { + BTMP = asl(BTMP,TMP) + } + { + B = insert(BTMP,#63,#0) + AH -= asl(TMP,#HI_MANTBITS) + } + jump fma + +.Lfma_ab_tiny: + ATMP = combine(##0x00100000,#0) + { + A = insert(ATMP,#63,#0) + B = insert(ATMP,#63,#0) + } + jump fma + +.Lab_inf: + { + B = lsr(B,#63) + p0 = dfclass(C,#0x10) + } + { + A ^= asl(B,#63) + if (p0) jump .Lnan + } + { + p1 = dfclass(C,#0x08) + if (p1.new) jump:nt .Lfma_inf_plus_inf + } + /* A*B is +/- inf, C is finite. Return A */ + { + jumpr r31 + } + .falign +.Lfma_inf_plus_inf: + { // adding infinities of different signs is invalid + p0 = dfcmp.eq(A,C) + if (!p0.new) jump:nt .Linvalid + } + { + jumpr r31 + } + +.Lnan: + { + p0 = dfclass(B,#0x10) + p1 = dfclass(C,#0x10) + if (!p0.new) B = A + if (!p1.new) C = A + } + { // find sNaNs + BH = convert_df2sf(B) + BL = convert_df2sf(C) + } + { + BH = convert_df2sf(A) + A = #-1 + jumpr r31 + } + +.Linvalid: + { + TMP = ##0x7f800001 // sp snan + } + { + A = convert_sf2df(TMP) + jumpr r31 + } + +.Lab_true_zero: + // B is zero, A is finite number + { + p0 = dfclass(C,#0x10) + if (p0.new) jump:nt .Lnan + if (p0.new) A = C + } + { + p0 = dfcmp.eq(B,C) // is C also zero? + AH = lsr(AH,#31) // get sign + } + { + BH ^= asl(AH,#31) // form correctly signed zero in B + if (!p0) A = C // If C is not zero, return C + if (!p0) jumpr r31 + } + /* B has correctly signed zero, C is also zero */ +.Lzero_plus_zero: + { + p0 = cmp.eq(B,C) // yes, scalar equals. +0++0 or -0+-0 + if (p0.new) jumpr:t r31 + A = B + } + { + TMP = USR + } + { + TMP = extractu(TMP,#2,#SR_ROUND_OFF) + A = #0 + } + { + p0 = cmp.eq(TMP,#2) + if (p0.new) AH = ##0x80000000 + jumpr r31 + } +#undef BTMP +#undef BTMPH +#undef BTMPL +#define CTMP r11:10 + .falign +.Lfma_abnormal_c: + /* We know that AB is normal * normal */ + /* C is not normal: zero, subnormal, inf, or NaN. */ + { + p0 = dfclass(C,#0x10) // is C NaN? + if (p0.new) jump:nt .Lnan + if (p0.new) A = C // move NaN to A + deallocframe + } + { + p0 = dfclass(C,#0x08) // is C inf? + if (p0.new) A = C // return C + if (p0.new) jumpr:nt r31 + } + // zero or subnormal + // If we have a zero, and we know AB is normal*normal, we can just call normal multiply + { + p0 = dfclass(C,#0x01) // is C zero? + if (p0.new) jump:nt __hexagon_muldf3 + TMP = #1 + } + // Left with: subnormal + // Adjust C and jump back to restart + { + allocframe(#STACKSPACE) // oops, deallocated above, re-allocate frame + CTMP = #0 + CH = insert(TMP,#EXPBITS,#HI_MANTBITS) + jump .Lfma_abnormal_c_restart + } +END(fma) diff --git a/lib/builtins/hexagon/dfminmax.S b/lib/builtins/hexagon/dfminmax.S new file mode 100644 index 000000000000..41122911f183 --- /dev/null +++ b/lib/builtins/hexagon/dfminmax.S @@ -0,0 +1,79 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#define A r1:0 +#define B r3:2 +#define ATMP r5:4 + + +#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG +#define END(TAG) .size TAG,.-TAG + +/* + * Min and Max return A if B is NaN, or B if A is NaN + * Otherwise, they return the smaller or bigger value + * + * If values are equal, we want to favor -0.0 for min and +0.0 for max. + */ + +/* + * Compares always return false for NaN + * if (isnan(A)) A = B; if (A > B) A = B will only trigger at most one of those options. + */ + .text + .global __hexagon_mindf3 + .global __hexagon_maxdf3 + .global fmin + .type fmin,@function + .global fmax + .type fmax,@function + .type __hexagon_mindf3,@function + .type __hexagon_maxdf3,@function + Q6_ALIAS(mindf3) + Q6_ALIAS(maxdf3) + .p2align 5 +__hexagon_mindf3: +fmin: + { + p0 = dfclass(A,#0x10) // If A is a number + p1 = dfcmp.gt(A,B) // AND B > A, don't swap + ATMP = A + } + { + if (p0) A = B // if A is NaN use B + if (p1) A = B // gt is always false if either is NaN + p2 = dfcmp.eq(A,B) // if A == B + if (!p2.new) jumpr:t r31 + } + /* A == B, return A|B to select -0.0 over 0.0 */ + { + A = or(ATMP,B) + jumpr r31 + } +END(__hexagon_mindf3) + .falign +__hexagon_maxdf3: +fmax: + { + p0 = dfclass(A,#0x10) + p1 = dfcmp.gt(B,A) + ATMP = A + } + { + if (p0) A = B + if (p1) A = B + p2 = dfcmp.eq(A,B) + if (!p2.new) jumpr:t r31 + } + /* A == B, return A&B to select 0.0 over -0.0 */ + { + A = and(ATMP,B) + jumpr r31 + } +END(__hexagon_maxdf3) diff --git a/lib/builtins/hexagon/dfmul.S b/lib/builtins/hexagon/dfmul.S new file mode 100644 index 000000000000..fde6d77bdcff --- /dev/null +++ b/lib/builtins/hexagon/dfmul.S @@ -0,0 +1,418 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +/* Double Precision Multiply */ +#define A r1:0 +#define AH r1 +#define AL r0 +#define B r3:2 +#define BH r3 +#define BL r2 + +#define BTMP r5:4 +#define BTMPH r5 +#define BTMPL r4 + +#define PP_ODD r7:6 +#define PP_ODD_H r7 +#define PP_ODD_L r6 + +#define ONE r9:8 +#define S_ONE r8 +#define S_ZERO r9 + +#define PP_HH r11:10 +#define PP_HH_H r11 +#define PP_HH_L r10 + +#define ATMP r13:12 +#define ATMPH r13 +#define ATMPL r12 + +#define PP_LL r15:14 +#define PP_LL_H r15 +#define PP_LL_L r14 + +#define TMP r28 + +#define MANTBITS 52 +#define HI_MANTBITS 20 +#define EXPBITS 11 +#define BIAS 1024 +#define MANTISSA_TO_INT_BIAS 52 + +/* Some constant to adjust normalization amount in error code */ +/* Amount to right shift the partial product to get to a denorm */ +#define FUDGE 5 + +#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG +#define FAST_ALIAS(TAG) .global __hexagon_fast_##TAG ; .set __hexagon_fast_##TAG, __hexagon_##TAG +#define FAST2_ALIAS(TAG) .global __hexagon_fast2_##TAG ; .set __hexagon_fast2_##TAG, __hexagon_##TAG +#define END(TAG) .size TAG,.-TAG + +#define SR_ROUND_OFF 22 + .text + .global __hexagon_muldf3 + .type __hexagon_muldf3,@function + Q6_ALIAS(muldf3) + FAST_ALIAS(muldf3) + FAST2_ALIAS(muldf3) + .p2align 5 +__hexagon_muldf3: + { + p0 = dfclass(A,#2) + p0 = dfclass(B,#2) + ATMP = combine(##0x40000000,#0) + } + { + ATMP = insert(A,#MANTBITS,#EXPBITS-1) + BTMP = asl(B,#EXPBITS-1) + TMP = #-BIAS + ONE = #1 + } + { + PP_ODD = mpyu(BTMPL,ATMPH) + BTMP = insert(ONE,#2,#62) + } + /* since we know that the MSB of the H registers is zero, we should never carry */ + /* H <= 2^31-1. L <= 2^32-1. Therefore, HL <= 2^63-2^32-2^31+1 */ + /* Adding 2 HLs, we get 2^64-3*2^32+2 maximum. */ + /* Therefore, we can add 3 2^32-1 values safely without carry. We only need one. */ + { + PP_LL = mpyu(ATMPL,BTMPL) + PP_ODD += mpyu(ATMPL,BTMPH) + } + { + PP_ODD += lsr(PP_LL,#32) + PP_HH = mpyu(ATMPH,BTMPH) + BTMP = combine(##BIAS+BIAS-4,#0) + } + { + PP_HH += lsr(PP_ODD,#32) + if (!p0) jump .Lmul_abnormal + p1 = cmp.eq(PP_LL_L,#0) // 64 lsb's 0? + p1 = cmp.eq(PP_ODD_L,#0) // 64 lsb's 0? + } + /* + * PP_HH can have a maximum of 0x3FFF_FFFF_FFFF_FFFF or thereabouts + * PP_HH can have a minimum of 0x1000_0000_0000_0000 or so + */ +#undef PP_ODD +#undef PP_ODD_H +#undef PP_ODD_L +#define EXP10 r7:6 +#define EXP1 r7 +#define EXP0 r6 + { + if (!p1) PP_HH_L = or(PP_HH_L,S_ONE) + EXP0 = extractu(AH,#EXPBITS,#HI_MANTBITS) + EXP1 = extractu(BH,#EXPBITS,#HI_MANTBITS) + } + { + PP_LL = neg(PP_HH) + EXP0 += add(TMP,EXP1) + TMP = xor(AH,BH) + } + { + if (!p2.new) PP_HH = PP_LL + p2 = cmp.gt(TMP,#-1) + p0 = !cmp.gt(EXP0,BTMPH) + p0 = cmp.gt(EXP0,BTMPL) + if (!p0.new) jump:nt .Lmul_ovf_unf + } + { + A = convert_d2df(PP_HH) + EXP0 = add(EXP0,#-BIAS-58) + } + { + AH += asl(EXP0,#HI_MANTBITS) + jumpr r31 + } + + .falign +.Lpossible_unf: + /* We end up with a positive exponent */ + /* But we may have rounded up to an exponent of 1. */ + /* If the exponent is 1, if we rounded up to it + * we need to also raise underflow + * Fortunately, this is pretty easy to detect, we must have +/- 0x0010_0000_0000_0000 + * And the PP should also have more than one bit set + */ + /* Note: ATMP should have abs(PP_HH) */ + /* Note: BTMPL should have 0x7FEFFFFF */ + { + p0 = cmp.eq(AL,#0) + p0 = bitsclr(AH,BTMPL) + if (!p0.new) jumpr:t r31 + BTMPH = #0x7fff + } + { + p0 = bitsset(ATMPH,BTMPH) + BTMPL = USR + BTMPH = #0x030 + } + { + if (p0) BTMPL = or(BTMPL,BTMPH) + } + { + USR = BTMPL + } + { + p0 = dfcmp.eq(A,A) + jumpr r31 + } + .falign +.Lmul_ovf_unf: + { + A = convert_d2df(PP_HH) + ATMP = abs(PP_HH) // take absolute value + EXP1 = add(EXP0,#-BIAS-58) + } + { + AH += asl(EXP1,#HI_MANTBITS) + EXP1 = extractu(AH,#EXPBITS,#HI_MANTBITS) + BTMPL = ##0x7FEFFFFF + } + { + EXP1 += add(EXP0,##-BIAS-58) + //BTMPH = add(clb(ATMP),#-2) + BTMPH = #0 + } + { + p0 = cmp.gt(EXP1,##BIAS+BIAS-2) // overflow + if (p0.new) jump:nt .Lmul_ovf + } + { + p0 = cmp.gt(EXP1,#0) + if (p0.new) jump:nt .Lpossible_unf + BTMPH = sub(EXP0,BTMPH) + TMP = #63 // max amount to shift + } + /* Underflow */ + /* + * PP_HH has the partial product with sticky LSB. + * PP_HH can have a maximum of 0x3FFF_FFFF_FFFF_FFFF or thereabouts + * PP_HH can have a minimum of 0x1000_0000_0000_0000 or so + * The exponent of PP_HH is in EXP1, which is non-positive (0 or negative) + * That's the exponent that happens after the normalization + * + * EXP0 has the exponent that, when added to the normalized value, is out of range. + * + * Strategy: + * + * * Shift down bits, with sticky bit, such that the bits are aligned according + * to the LZ count and appropriate exponent, but not all the way to mantissa + * field, keep around the last few bits. + * * Put a 1 near the MSB + * * Check the LSBs for inexact; if inexact also set underflow + * * Convert [u]d2df -- will correctly round according to rounding mode + * * Replace exponent field with zero + * + * + */ + + + { + BTMPL = #0 // offset for extract + BTMPH = sub(#FUDGE,BTMPH) // amount to right shift + } + { + p3 = cmp.gt(PP_HH_H,#-1) // is it positive? + BTMPH = min(BTMPH,TMP) // Don't shift more than 63 + PP_HH = ATMP + } + { + TMP = USR + PP_LL = extractu(PP_HH,BTMP) + } + { + PP_HH = asr(PP_HH,BTMPH) + BTMPL = #0x0030 // underflow flag + AH = insert(S_ZERO,#EXPBITS,#HI_MANTBITS) + } + { + p0 = cmp.gtu(ONE,PP_LL) // Did we extract all zeros? + if (!p0.new) PP_HH_L = or(PP_HH_L,S_ONE) // add sticky bit + PP_HH_H = setbit(PP_HH_H,#HI_MANTBITS+3) // Add back in a bit so we can use convert instruction + } + { + PP_LL = neg(PP_HH) + p1 = bitsclr(PP_HH_L,#0x7) // Are the LSB's clear? + if (!p1.new) TMP = or(BTMPL,TMP) // If not, Inexact+Underflow + } + { + if (!p3) PP_HH = PP_LL + USR = TMP + } + { + A = convert_d2df(PP_HH) // Do rounding + p0 = dfcmp.eq(A,A) // realize exception + } + { + AH = insert(S_ZERO,#EXPBITS-1,#HI_MANTBITS+1) // Insert correct exponent + jumpr r31 + } + .falign +.Lmul_ovf: + // We get either max finite value or infinity. Either way, overflow+inexact + { + TMP = USR + ATMP = combine(##0x7fefffff,#-1) // positive max finite + A = PP_HH + } + { + PP_LL_L = extractu(TMP,#2,#SR_ROUND_OFF) // rounding bits + TMP = or(TMP,#0x28) // inexact + overflow + BTMP = combine(##0x7ff00000,#0) // positive infinity + } + { + USR = TMP + PP_LL_L ^= lsr(AH,#31) // Does sign match rounding? + TMP = PP_LL_L // unmodified rounding mode + } + { + p0 = !cmp.eq(TMP,#1) // If not round-to-zero and + p0 = !cmp.eq(PP_LL_L,#2) // Not rounding the other way, + if (p0.new) ATMP = BTMP // we should get infinity + p0 = dfcmp.eq(A,A) // Realize FP exception if enabled + } + { + A = insert(ATMP,#63,#0) // insert inf/maxfinite, leave sign + jumpr r31 + } + +.Lmul_abnormal: + { + ATMP = extractu(A,#63,#0) // strip off sign + BTMP = extractu(B,#63,#0) // strip off sign + } + { + p3 = cmp.gtu(ATMP,BTMP) + if (!p3.new) A = B // sort values + if (!p3.new) B = A // sort values + } + { + // Any NaN --> NaN, possibly raise invalid if sNaN + p0 = dfclass(A,#0x0f) // A not NaN? + if (!p0.new) jump:nt .Linvalid_nan + if (!p3) ATMP = BTMP + if (!p3) BTMP = ATMP + } + { + // Infinity * nonzero number is infinity + p1 = dfclass(A,#0x08) // A is infinity + p1 = dfclass(B,#0x0e) // B is nonzero + } + { + // Infinity * zero --> NaN, raise invalid + // Other zeros return zero + p0 = dfclass(A,#0x08) // A is infinity + p0 = dfclass(B,#0x01) // B is zero + } + { + if (p1) jump .Ltrue_inf + p2 = dfclass(B,#0x01) + } + { + if (p0) jump .Linvalid_zeroinf + if (p2) jump .Ltrue_zero // so return zero + TMP = ##0x7c000000 + } + // We are left with a normal or subnormal times a subnormal. A > B + // If A and B are both very small (exp(a) < BIAS-MANTBITS), + // we go to a single sticky bit, which we can round easily. + // If A and B might multiply to something bigger, decrease A exponent and increase + // B exponent and try again + { + p0 = bitsclr(AH,TMP) + if (p0.new) jump:nt .Lmul_tiny + } + { + TMP = cl0(BTMP) + } + { + TMP = add(TMP,#-EXPBITS) + } + { + BTMP = asl(BTMP,TMP) + } + { + B = insert(BTMP,#63,#0) + AH -= asl(TMP,#HI_MANTBITS) + } + jump __hexagon_muldf3 +.Lmul_tiny: + { + TMP = USR + A = xor(A,B) // get sign bit + } + { + TMP = or(TMP,#0x30) // Inexact + Underflow + A = insert(ONE,#63,#0) // put in rounded up value + BTMPH = extractu(TMP,#2,#SR_ROUND_OFF) // get rounding mode + } + { + USR = TMP + p0 = cmp.gt(BTMPH,#1) // Round towards pos/neg inf? + if (!p0.new) AL = #0 // If not, zero + BTMPH ^= lsr(AH,#31) // rounding my way --> set LSB + } + { + p0 = cmp.eq(BTMPH,#3) // if rounding towards right inf + if (!p0.new) AL = #0 // don't go to zero + jumpr r31 + } +.Linvalid_zeroinf: + { + TMP = USR + } + { + A = #-1 + TMP = or(TMP,#2) + } + { + USR = TMP + } + { + p0 = dfcmp.uo(A,A) // force exception if enabled + jumpr r31 + } +.Linvalid_nan: + { + p0 = dfclass(B,#0x0f) // if B is not NaN + TMP = convert_df2sf(A) // will generate invalid if sNaN + if (p0.new) B = A // make it whatever A is + } + { + BL = convert_df2sf(B) // will generate invalid if sNaN + A = #-1 + jumpr r31 + } + .falign +.Ltrue_zero: + { + A = B + B = A + } +.Ltrue_inf: + { + BH = extract(BH,#1,#31) + } + { + AH ^= asl(BH,#31) + jumpr r31 + } +END(__hexagon_muldf3) + +#undef ATMP +#undef ATMPL +#undef ATMPH +#undef BTMP +#undef BTMPL +#undef BTMPH diff --git a/lib/builtins/hexagon/dfsqrt.S b/lib/builtins/hexagon/dfsqrt.S new file mode 100644 index 000000000000..027d9e1fde43 --- /dev/null +++ b/lib/builtins/hexagon/dfsqrt.S @@ -0,0 +1,406 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +/* Double Precision square root */ + +#define EXP r28 + +#define A r1:0 +#define AH r1 +#define AL r0 + +#define SFSH r3:2 +#define SF_S r3 +#define SF_H r2 + +#define SFHALF_SONE r5:4 +#define S_ONE r4 +#define SFHALF r5 +#define SF_D r6 +#define SF_E r7 +#define RECIPEST r8 +#define SFRAD r9 + +#define FRACRAD r11:10 +#define FRACRADH r11 +#define FRACRADL r10 + +#define ROOT r13:12 +#define ROOTHI r13 +#define ROOTLO r12 + +#define PROD r15:14 +#define PRODHI r15 +#define PRODLO r14 + +#define P_TMP p0 +#define P_EXP1 p1 +#define NORMAL p2 + +#define SF_EXPBITS 8 +#define SF_MANTBITS 23 + +#define DF_EXPBITS 11 +#define DF_MANTBITS 52 + +#define DF_BIAS 0x3ff + +#define DFCLASS_ZERO 0x01 +#define DFCLASS_NORMAL 0x02 +#define DFCLASS_DENORMAL 0x02 +#define DFCLASS_INFINITE 0x08 +#define DFCLASS_NAN 0x10 + +#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG; .type __qdsp_##TAG,@function +#define FAST_ALIAS(TAG) .global __hexagon_fast_##TAG ; .set __hexagon_fast_##TAG, __hexagon_##TAG; .type __hexagon_fast_##TAG,@function +#define FAST2_ALIAS(TAG) .global __hexagon_fast2_##TAG ; .set __hexagon_fast2_##TAG, __hexagon_##TAG; .type __hexagon_fast2_##TAG,@function +#define END(TAG) .size TAG,.-TAG + + .text + .global __hexagon_sqrtdf2 + .type __hexagon_sqrtdf2,@function + .global __hexagon_sqrt + .type __hexagon_sqrt,@function + Q6_ALIAS(sqrtdf2) + Q6_ALIAS(sqrt) + FAST_ALIAS(sqrtdf2) + FAST_ALIAS(sqrt) + FAST2_ALIAS(sqrtdf2) + FAST2_ALIAS(sqrt) + .type sqrt,@function + .p2align 5 +__hexagon_sqrtdf2: +__hexagon_sqrt: + { + PROD = extractu(A,#SF_MANTBITS+1,#DF_MANTBITS-SF_MANTBITS) + EXP = extractu(AH,#DF_EXPBITS,#DF_MANTBITS-32) + SFHALF_SONE = combine(##0x3f000004,#1) + } + { + NORMAL = dfclass(A,#DFCLASS_NORMAL) // Is it normal + NORMAL = cmp.gt(AH,#-1) // and positive? + if (!NORMAL.new) jump:nt .Lsqrt_abnormal + SFRAD = or(SFHALF,PRODLO) + } +#undef NORMAL +.Ldenormal_restart: + { + FRACRAD = A + SF_E,P_TMP = sfinvsqrta(SFRAD) + SFHALF = and(SFHALF,#-16) + SFSH = #0 + } +#undef A +#undef AH +#undef AL +#define ERROR r1:0 +#define ERRORHI r1 +#define ERRORLO r0 + // SF_E : reciprocal square root + // SF_H : half rsqrt + // sf_S : square root + // SF_D : error term + // SFHALF: 0.5 + { + SF_S += sfmpy(SF_E,SFRAD):lib // s0: root + SF_H += sfmpy(SF_E,SFHALF):lib // h0: 0.5*y0. Could also decrement exponent... + SF_D = SFHALF +#undef SFRAD +#define SHIFTAMT r9 + SHIFTAMT = and(EXP,#1) + } + { + SF_D -= sfmpy(SF_S,SF_H):lib // d0: 0.5-H*S = 0.5-0.5*~1 + FRACRADH = insert(S_ONE,#DF_EXPBITS+1,#DF_MANTBITS-32) // replace upper bits with hidden + P_EXP1 = cmp.gtu(SHIFTAMT,#0) + } + { + SF_S += sfmpy(SF_S,SF_D):lib // s1: refine sqrt + SF_H += sfmpy(SF_H,SF_D):lib // h1: refine half-recip + SF_D = SFHALF + SHIFTAMT = mux(P_EXP1,#8,#9) + } + { + SF_D -= sfmpy(SF_S,SF_H):lib // d1: error term + FRACRAD = asl(FRACRAD,SHIFTAMT) // Move fracrad bits to right place + SHIFTAMT = mux(P_EXP1,#3,#2) + } + { + SF_H += sfmpy(SF_H,SF_D):lib // d2: rsqrt + // cool trick: half of 1/sqrt(x) has same mantissa as 1/sqrt(x). + PROD = asl(FRACRAD,SHIFTAMT) // fracrad<<(2+exp1) + } + { + SF_H = and(SF_H,##0x007fffff) + } + { + SF_H = add(SF_H,##0x00800000 - 3) + SHIFTAMT = mux(P_EXP1,#7,#8) + } + { + RECIPEST = asl(SF_H,SHIFTAMT) + SHIFTAMT = mux(P_EXP1,#15-(1+1),#15-(1+0)) + } + { + ROOT = mpyu(RECIPEST,PRODHI) // root = mpyu_full(recipest,hi(fracrad<<(2+exp1))) + } + +#undef SFSH // r3:2 +#undef SF_H // r2 +#undef SF_S // r3 +#undef S_ONE // r4 +#undef SFHALF // r5 +#undef SFHALF_SONE // r5:4 +#undef SF_D // r6 +#undef SF_E // r7 + +#define HL r3:2 +#define LL r5:4 +#define HH r7:6 + +#undef P_EXP1 +#define P_CARRY0 p1 +#define P_CARRY1 p2 +#define P_CARRY2 p3 + + /* Iteration 0 */ + /* Maybe we can save a cycle by starting with ERROR=asl(fracrad), then as we multiply */ + /* We can shift and subtract instead of shift and add? */ + { + ERROR = asl(FRACRAD,#15) + PROD = mpyu(ROOTHI,ROOTHI) + P_CARRY0 = cmp.eq(r0,r0) + } + { + ERROR -= asl(PROD,#15) + PROD = mpyu(ROOTHI,ROOTLO) + P_CARRY1 = cmp.eq(r0,r0) + } + { + ERROR -= lsr(PROD,#16) + P_CARRY2 = cmp.eq(r0,r0) + } + { + ERROR = mpyu(ERRORHI,RECIPEST) + } + { + ROOT += lsr(ERROR,SHIFTAMT) + SHIFTAMT = add(SHIFTAMT,#16) + ERROR = asl(FRACRAD,#31) // for next iter + } + /* Iteration 1 */ + { + PROD = mpyu(ROOTHI,ROOTHI) + ERROR -= mpyu(ROOTHI,ROOTLO) // amount is 31, no shift needed + } + { + ERROR -= asl(PROD,#31) + PROD = mpyu(ROOTLO,ROOTLO) + } + { + ERROR -= lsr(PROD,#33) + } + { + ERROR = mpyu(ERRORHI,RECIPEST) + } + { + ROOT += lsr(ERROR,SHIFTAMT) + SHIFTAMT = add(SHIFTAMT,#16) + ERROR = asl(FRACRAD,#47) // for next iter + } + /* Iteration 2 */ + { + PROD = mpyu(ROOTHI,ROOTHI) + } + { + ERROR -= asl(PROD,#47) + PROD = mpyu(ROOTHI,ROOTLO) + } + { + ERROR -= asl(PROD,#16) // bidir shr 31-47 + PROD = mpyu(ROOTLO,ROOTLO) + } + { + ERROR -= lsr(PROD,#17) // 64-47 + } + { + ERROR = mpyu(ERRORHI,RECIPEST) + } + { + ROOT += lsr(ERROR,SHIFTAMT) + } +#undef ERROR +#undef PROD +#undef PRODHI +#undef PRODLO +#define REM_HI r15:14 +#define REM_HI_HI r15 +#define REM_LO r1:0 +#undef RECIPEST +#undef SHIFTAMT +#define TWOROOT_LO r9:8 + /* Adjust Root */ + { + HL = mpyu(ROOTHI,ROOTLO) + LL = mpyu(ROOTLO,ROOTLO) + REM_HI = #0 + REM_LO = #0 + } + { + HL += lsr(LL,#33) + LL += asl(HL,#33) + P_CARRY0 = cmp.eq(r0,r0) + } + { + HH = mpyu(ROOTHI,ROOTHI) + REM_LO = sub(REM_LO,LL,P_CARRY0):carry + TWOROOT_LO = #1 + } + { + HH += lsr(HL,#31) + TWOROOT_LO += asl(ROOT,#1) + } +#undef HL +#undef LL +#define REM_HI_TMP r3:2 +#define REM_HI_TMP_HI r3 +#define REM_LO_TMP r5:4 + { + REM_HI = sub(FRACRAD,HH,P_CARRY0):carry + REM_LO_TMP = sub(REM_LO,TWOROOT_LO,P_CARRY1):carry +#undef FRACRAD +#undef HH +#define ZERO r11:10 +#define ONE r7:6 + ONE = #1 + ZERO = #0 + } + { + REM_HI_TMP = sub(REM_HI,ZERO,P_CARRY1):carry + ONE = add(ROOT,ONE) + EXP = add(EXP,#-DF_BIAS) // subtract bias --> signed exp + } + { + // If carry set, no borrow: result was still positive + if (P_CARRY1) ROOT = ONE + if (P_CARRY1) REM_LO = REM_LO_TMP + if (P_CARRY1) REM_HI = REM_HI_TMP + } + { + REM_LO_TMP = sub(REM_LO,TWOROOT_LO,P_CARRY2):carry + ONE = #1 + EXP = asr(EXP,#1) // divide signed exp by 2 + } + { + REM_HI_TMP = sub(REM_HI,ZERO,P_CARRY2):carry + ONE = add(ROOT,ONE) + } + { + if (P_CARRY2) ROOT = ONE + if (P_CARRY2) REM_LO = REM_LO_TMP + // since tworoot <= 2^32, remhi must be zero +#undef REM_HI_TMP +#undef REM_HI_TMP_HI +#define S_ONE r2 +#define ADJ r3 + S_ONE = #1 + } + { + P_TMP = cmp.eq(REM_LO,ZERO) // is the low part zero + if (!P_TMP.new) ROOTLO = or(ROOTLO,S_ONE) // if so, it's exact... hopefully + ADJ = cl0(ROOT) + EXP = add(EXP,#-63) + } +#undef REM_LO +#define RET r1:0 +#define RETHI r1 + { + RET = convert_ud2df(ROOT) // set up mantissa, maybe set inexact flag + EXP = add(EXP,ADJ) // add back bias + } + { + RETHI += asl(EXP,#DF_MANTBITS-32) // add exponent adjust + jumpr r31 + } +#undef REM_LO_TMP +#undef REM_HI_TMP +#undef REM_HI_TMP_HI +#undef REM_LO +#undef REM_HI +#undef TWOROOT_LO + +#undef RET +#define A r1:0 +#define AH r1 +#define AL r1 +#undef S_ONE +#define TMP r3:2 +#define TMPHI r3 +#define TMPLO r2 +#undef P_CARRY0 +#define P_NEG p1 + + +#define SFHALF r5 +#define SFRAD r9 +.Lsqrt_abnormal: + { + P_TMP = dfclass(A,#DFCLASS_ZERO) // zero? + if (P_TMP.new) jumpr:t r31 + } + { + P_TMP = dfclass(A,#DFCLASS_NAN) + if (P_TMP.new) jump:nt .Lsqrt_nan + } + { + P_TMP = cmp.gt(AH,#-1) + if (!P_TMP.new) jump:nt .Lsqrt_invalid_neg + if (!P_TMP.new) EXP = ##0x7F800001 // sNaN + } + { + P_TMP = dfclass(A,#DFCLASS_INFINITE) + if (P_TMP.new) jumpr:nt r31 + } + // If we got here, we're denormal + // prepare to restart + { + A = extractu(A,#DF_MANTBITS,#0) // Extract mantissa + } + { + EXP = add(clb(A),#-DF_EXPBITS) // how much to normalize? + } + { + A = asl(A,EXP) // Shift mantissa + EXP = sub(#1,EXP) // Form exponent + } + { + AH = insert(EXP,#1,#DF_MANTBITS-32) // insert lsb of exponent + } + { + TMP = extractu(A,#SF_MANTBITS+1,#DF_MANTBITS-SF_MANTBITS) // get sf value (mant+exp1) + SFHALF = ##0x3f000004 // form half constant + } + { + SFRAD = or(SFHALF,TMPLO) // form sf value + SFHALF = and(SFHALF,#-16) + jump .Ldenormal_restart // restart + } +.Lsqrt_nan: + { + EXP = convert_df2sf(A) // if sNaN, get invalid + A = #-1 // qNaN + jumpr r31 + } +.Lsqrt_invalid_neg: + { + A = convert_sf2df(EXP) // Invalid,NaNval + jumpr r31 + } +END(__hexagon_sqrt) +END(__hexagon_sqrtdf2) diff --git a/lib/builtins/hexagon/divdi3.S b/lib/builtins/hexagon/divdi3.S new file mode 100644 index 000000000000..49ee8104f305 --- /dev/null +++ b/lib/builtins/hexagon/divdi3.S @@ -0,0 +1,85 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + .macro FUNCTION_BEGIN name + .text + .p2align 5 + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + +FUNCTION_BEGIN __hexagon_divdi3 + { + p2 = tstbit(r1,#31) + p3 = tstbit(r3,#31) + } + { + r1:0 = abs(r1:0) + r3:2 = abs(r3:2) + } + { + r6 = cl0(r1:0) // count leading 0's of dividend (numerator) + r7 = cl0(r3:2) // count leading 0's of divisor (denominator) + r5:4 = r3:2 // divisor moved into working registers + r3:2 = r1:0 // dividend is the initial remainder, r3:2 contains remainder + } + { + p3 = xor(p2,p3) + r10 = sub(r7,r6) // left shift count for bit & divisor + r1:0 = #0 // initialize quotient to 0 + r15:14 = #1 // initialize bit to 1 + } + { + r11 = add(r10,#1) // loop count is 1 more than shift count + r13:12 = lsl(r5:4,r10) // shift divisor msb into same bit position as dividend msb + r15:14 = lsl(r15:14,r10) // shift the bit left by same amount as divisor + } + { + p0 = cmp.gtu(r5:4,r3:2) // check if divisor > dividend + loop0(1f,r11) // register loop + } + { + if (p0) jump .hexagon_divdi3_return // if divisor > dividend, we're done, so return + } + .falign +1: + { + p0 = cmp.gtu(r13:12,r3:2) // set predicate reg if shifted divisor > current remainder + } + { + r7:6 = sub(r3:2, r13:12) // subtract shifted divisor from current remainder + r9:8 = add(r1:0, r15:14) // save current quotient to temp (r9:8) + } + { + r1:0 = vmux(p0, r1:0, r9:8) // choose either current quotient or new quotient (r9:8) + r3:2 = vmux(p0, r3:2, r7:6) // choose either current remainder or new remainder (r7:6) + } + { + r15:14 = lsr(r15:14, #1) // shift bit right by 1 for next iteration + r13:12 = lsr(r13:12, #1) // shift "shifted divisor" right by 1 for next iteration + }:endloop0 + +.hexagon_divdi3_return: + { + r3:2 = neg(r1:0) + } + { + r1:0 = vmux(p3,r3:2,r1:0) + jumpr r31 + } +FUNCTION_END __hexagon_divdi3 + + .globl __qdsp_divdi3 + .set __qdsp_divdi3, __hexagon_divdi3 diff --git a/lib/builtins/hexagon/divsi3.S b/lib/builtins/hexagon/divsi3.S new file mode 100644 index 000000000000..8e159baa192f --- /dev/null +++ b/lib/builtins/hexagon/divsi3.S @@ -0,0 +1,84 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + + + .macro FUNCTION_BEGIN name + .text + .p2align 5 + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + +FUNCTION_BEGIN __hexagon_divsi3 + { + p0 = cmp.ge(r0,#0) + p1 = cmp.ge(r1,#0) + r1 = abs(r0) + r2 = abs(r1) + } + { + r3 = cl0(r1) + r4 = cl0(r2) + r5 = sub(r1,r2) + p2 = cmp.gtu(r2,r1) + } +#if (__HEXAGON_ARCH__ == 60) + { + r0 = #0 + p1 = xor(p0,p1) + p0 = cmp.gtu(r2,r5) + } + if (p2) jumpr r31 +#else + { + r0 = #0 + p1 = xor(p0,p1) + p0 = cmp.gtu(r2,r5) + if (p2) jumpr r31 + } +#endif + { + r0 = mux(p1,#-1,#1) + if (p0) jumpr r31 + r4 = sub(r4,r3) + r3 = #1 + } + { + r0 = #0 + r3:2 = vlslw(r3:2,r4) + loop0(1f,r4) + } + .falign +1: + { + p0 = cmp.gtu(r2,r1) + if (!p0.new) r1 = sub(r1,r2) + if (!p0.new) r0 = add(r0,r3) + r3:2 = vlsrw(r3:2,#1) + }:endloop0 + { + p0 = cmp.gtu(r2,r1) + if (!p0.new) r0 = add(r0,r3) + if (!p1) jumpr r31 + } + { + r0 = neg(r0) + jumpr r31 + } +FUNCTION_END __hexagon_divsi3 + + .globl __qdsp_divsi3 + .set __qdsp_divsi3, __hexagon_divsi3 diff --git a/lib/builtins/hexagon/fabs_opt.S b/lib/builtins/hexagon/fabs_opt.S new file mode 100644 index 000000000000..b09b00734d98 --- /dev/null +++ b/lib/builtins/hexagon/fabs_opt.S @@ -0,0 +1,37 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +.macro FUNCTION_BEGIN name +.text +.p2align 5 +.globl \name +.type \name, @function +\name: +.endm + +.macro FUNCTION_END name +.size \name, . - \name +.endm + +FUNCTION_BEGIN fabs + { + r1 = clrbit(r1, #31) + jumpr r31 + } +FUNCTION_END fabs + +FUNCTION_BEGIN fabsf + { + r0 = clrbit(r0, #31) + jumpr r31 + } +FUNCTION_END fabsf + + .globl fabsl + .set fabsl, fabs diff --git a/lib/builtins/hexagon/fastmath2_dlib_asm.S b/lib/builtins/hexagon/fastmath2_dlib_asm.S new file mode 100644 index 000000000000..9286df06c26d --- /dev/null +++ b/lib/builtins/hexagon/fastmath2_dlib_asm.S @@ -0,0 +1,491 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/* ==================================================================== */ +/* FUNCTIONS Optimized double floating point operators */ +/* ==================================================================== */ +/* c = dadd_asm(a, b) */ +/* ==================================================================== * +fast2_QDOUBLE fast2_dadd(fast2_QDOUBLE a,fast2_QDOUBLE b) { + fast2_QDOUBLE c; + lint manta = a & MANTMASK; + int expa = Q6_R_sxth_R(a) ; + lint mantb = b & MANTMASK; + int expb = Q6_R_sxth_R(b) ; + int exp, expdiff, j, k, hi, lo, cn; + lint mant; + + expdiff = (int) Q6_P_vabsdiffh_PP(a, b); + expdiff = Q6_R_sxth_R(expdiff) ; + if (expdiff > 63) { expdiff = 62;} + if (expa > expb) { + exp = expa + 1; + expa = 1; + expb = expdiff + 1; + } else { + exp = expb + 1; + expb = 1; + expa = expdiff + 1; + } + mant = (manta>>expa) + (mantb>>expb); + + hi = (int) (mant>>32); + lo = (int) (mant); + + k = Q6_R_normamt_R(hi); + if(hi == 0 || hi == -1) k = 31+Q6_R_normamt_R(lo); + + mant = (mant << k); + cn = (mant == 0x8000000000000000LL); + exp = exp - k + cn; + + if (mant == 0 || mant == -1) exp = 0x8001; + c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK); + return(c); + } + * ==================================================================== */ + .text + .global fast2_dadd_asm + .type fast2_dadd_asm, @function +fast2_dadd_asm: +#define manta R0 +#define mantexpa R1:0 +#define lmanta R1:0 +#define mantb R2 +#define mantexpb R3:2 +#define lmantb R3:2 +#define expa R4 +#define expb R5 +#define mantexpd R7:6 +#define expd R6 +#define exp R8 +#define c63 R9 +#define lmant R1:0 +#define manth R1 +#define mantl R0 +#define minmin R11:10 // exactly 0x000000000000008001LL +#define minminl R10 +#define k R4 +#define ce P0 + .falign + { + mantexpd = VABSDIFFH(mantexpa, mantexpb) //represented as 0x08001LL + c63 = #62 + expa = SXTH(manta) + expb = SXTH(mantb) + } { + expd = SXTH(expd) + ce = CMP.GT(expa, expb); + if ( ce.new) exp = add(expa, #1) + if (!ce.new) exp = add(expb, #1) + } { + if ( ce) expa = #1 + if (!ce) expb = #1 + manta.L = #0 + expd = MIN(expd, c63) + } { + if (!ce) expa = add(expd, #1) + if ( ce) expb = add(expd, #1) + mantb.L = #0 + minmin = #0 + } { + lmanta = ASR(lmanta, expa) + lmantb = ASR(lmantb, expb) + } { + lmant = add(lmanta, lmantb) + minminl.L = #0x8001 + } { + k = clb(lmant) + c63 = #58 + } { + k = add(k, #-1) + p0 = cmp.gt(k, c63) + } { + mantexpa = ASL(lmant, k) + exp = SUB(exp, k) + if(p0) jump .Ldenorma + } { + manta = insert(exp, #16, #0) + jumpr r31 + } +.Ldenorma: + { + mantexpa = minmin + jumpr r31 + } +/* =================================================================== * + fast2_QDOUBLE fast2_dsub(fast2_QDOUBLE a,fast2_QDOUBLE b) { + fast2_QDOUBLE c; + lint manta = a & MANTMASK; + int expa = Q6_R_sxth_R(a) ; + lint mantb = b & MANTMASK; + int expb = Q6_R_sxth_R(b) ; + int exp, expdiff, j, k; + lint mant; + + expdiff = (int) Q6_P_vabsdiffh_PP(a, b); + expdiff = Q6_R_sxth_R(expdiff) ; + if (expdiff > 63) { expdiff = 62;} + if (expa > expb) { + exp = expa + 1; + expa = 1; + expb = expdiff + 1; + } else { + exp = expb + 1; + expb = 1; + expa = expdiff + 1; + } + mant = (manta>>expa) - (mantb>>expb); + k = Q6_R_clb_P(mant)-1; + mant = (mant << k); + exp = exp - k; + if (mant == 0 || mant == -1) exp = 0x8001; + c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK); + return(c); + } + * ==================================================================== */ + .text + .global fast2_dsub_asm + .type fast2_dsub_asm, @function +fast2_dsub_asm: + +#define manta R0 +#define mantexpa R1:0 +#define lmanta R1:0 +#define mantb R2 +#define mantexpb R3:2 +#define lmantb R3:2 +#define expa R4 +#define expb R5 +#define mantexpd R7:6 +#define expd R6 +#define exp R8 +#define c63 R9 +#define lmant R1:0 +#define manth R1 +#define mantl R0 +#define minmin R11:10 // exactly 0x000000000000008001LL +#define minminl R10 +#define k R4 +#define ce P0 + .falign + { + mantexpd = VABSDIFFH(mantexpa, mantexpb) //represented as 0x08001LL + c63 = #62 + expa = SXTH(manta) + expb = SXTH(mantb) + } { + expd = SXTH(expd) + ce = CMP.GT(expa, expb); + if ( ce.new) exp = add(expa, #1) + if (!ce.new) exp = add(expb, #1) + } { + if ( ce) expa = #1 + if (!ce) expb = #1 + manta.L = #0 + expd = MIN(expd, c63) + } { + if (!ce) expa = add(expd, #1) + if ( ce) expb = add(expd, #1) + mantb.L = #0 + minmin = #0 + } { + lmanta = ASR(lmanta, expa) + lmantb = ASR(lmantb, expb) + } { + lmant = sub(lmanta, lmantb) + minminl.L = #0x8001 + } { + k = clb(lmant) + c63 = #58 + } { + k = add(k, #-1) + p0 = cmp.gt(k, c63) + } { + mantexpa = ASL(lmant, k) + exp = SUB(exp, k) + if(p0) jump .Ldenorm + } { + manta = insert(exp, #16, #0) + jumpr r31 + } +.Ldenorm: + { + mantexpa = minmin + jumpr r31 + } +/* ==================================================================== * + fast2_QDOUBLE fast2_dmpy(fast2_QDOUBLE a,fast2_QDOUBLE b) { + fast2_QDOUBLE c; + lint manta = a & MANTMASK; + int expa = Q6_R_sxth_R(a) ; + lint mantb = b & MANTMASK; + int expb = Q6_R_sxth_R(b) ; + int exp, k; + lint mant; + int hia, hib, hi, lo; + unsigned int loa, lob; + + hia = (int)(a >> 32); + loa = Q6_R_extractu_RII((int)manta, 31, 1); + hib = (int)(b >> 32); + lob = Q6_R_extractu_RII((int)mantb, 31, 1); + + mant = Q6_P_mpy_RR(hia, lob); + mant = Q6_P_mpyacc_RR(mant,hib, loa); + mant = (mant >> 30) + (Q6_P_mpy_RR(hia, hib)<<1); + + hi = (int) (mant>>32); + + k = Q6_R_normamt_R(hi); + mant = mant << k; + exp = expa + expb - k; + if (mant == 0 || mant == -1) exp = 0x8001; + c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK); + return(c); + } + * ==================================================================== */ + .text + .global fast2_dmpy_asm + .type fast2_dmpy_asm, @function +fast2_dmpy_asm: + +#define mantal R0 +#define mantah R1 +#define mantexpa R1:0 +#define mantbl R2 +#define mantbh R3 +#define mantexpb R3:2 +#define expa R4 +#define expb R5 +#define c8001 R12 +#define mantexpd R7:6 +#define mantdh R7 +#define exp R8 +#define lmantc R11:10 +#define kb R9 +#define guard R11 +#define mantal_ R12 +#define mantbl_ R13 +#define min R15:14 +#define minh R15 + + .falign + { + mantbl_= lsr(mantbl, #16) + expb = sxth(mantbl) + expa = sxth(mantal) + mantal_= lsr(mantal, #16) + } + { + lmantc = mpy(mantah, mantbh) + mantexpd = mpy(mantah, mantbl_) + mantal.L = #0x0 + min = #0 + } + { + lmantc = add(lmantc, lmantc) + mantexpd+= mpy(mantbh, mantal_) + mantbl.L = #0x0 + minh.H = #0x8000 + } + { + mantexpd = asr(mantexpd, #15) + c8001.L = #0x8001 + p1 = cmp.eq(mantexpa, mantexpb) + } + { + mantexpd = add(mantexpd, lmantc) + exp = add(expa, expb) + p2 = cmp.eq(mantexpa, min) + } + { + kb = clb(mantexpd) + mantexpb = abs(mantexpd) + guard = #58 + } + { + p1 = and(p1, p2) + exp = sub(exp, kb) + kb = add(kb, #-1) + p0 = cmp.gt(kb, guard) + } + { + exp = add(exp, #1) + mantexpa = asl(mantexpd, kb) + if(p1) jump .Lsat //rarely happens + } + { + mantal = insert(exp,#16, #0) + if(!p0) jumpr r31 + } + { + mantal = insert(c8001,#16, #0) + jumpr r31 + } +.Lsat: + { + mantexpa = #-1 + } + { + mantexpa = lsr(mantexpa, #1) + } + { + mantal = insert(exp,#16, #0) + jumpr r31 + } + +/* ==================================================================== * + int fast2_qd2f(fast2_QDOUBLE a) { + int exp; + long long int manta; + int ic, rnd, mantb; + + manta = a>>32; + exp = Q6_R_sxth_R(a) ; + ic = 0x80000000 & manta; + manta = Q6_R_abs_R_sat(manta); + mantb = (manta + rnd)>>7; + rnd = 0x40 + exp = (exp + 126); + if((manta & 0xff) == rnd) rnd = 0x00; + if((manta & 0x7fffffc0) == 0x7fffffc0) { + manta = 0x0; exp++; + } else { + manta= mantb & 0x007fffff; + } + exp = (exp << 23) & 0x7fffffc0; + ic = Q6_R_addacc_RR(ic, exp, manta); + return (ic); + } + * ==================================================================== */ + + .text + .global fast2_qd2f_asm + .type fast2_qd2f_asm, @function +fast2_qd2f_asm: +#define mantah R1 +#define mantal R0 +#define cff R0 +#define mant R3 +#define expo R4 +#define rnd R5 +#define mask R6 +#define c07f R7 +#define c80 R0 +#define mantb R2 +#define ic R0 + + .falign + { + mant = abs(mantah):sat + expo = sxth(mantal) + rnd = #0x40 + mask.L = #0xffc0 + } + { + cff = extractu(mant, #8, #0) + p2 = cmp.gt(expo, #126) + p3 = cmp.ge(expo, #-126) + mask.H = #0x7fff + } + { + p1 = cmp.eq(cff,#0x40) + if(p1.new) rnd = #0 + expo = add(expo, #126) + if(!p3) jump .Lmin + } + { + p0 = bitsset(mant, mask) + c80.L = #0x0000 + mantb = add(mant, rnd) + c07f = lsr(mask, #8) + } + { + if(p0) expo = add(expo, #1) + if(p0) mant = #0 + mantb = lsr(mantb, #7) + c80.H = #0x8000 + } + { + ic = and(c80, mantah) + mask &= asl(expo, #23) + if(!p0) mant = and(mantb, c07f) + if(p2) jump .Lmax + } + { + ic += add(mask, mant) + jumpr r31 + } +.Lmax: + { + ic.L = #0xffff; + } + { + ic.H = #0x7f7f; + jumpr r31 + } +.Lmin: + { + ic = #0x0 + jumpr r31 + } + +/* ==================================================================== * +fast2_QDOUBLE fast2_f2qd(int ia) { + lint exp; + lint mant; + fast2_QDOUBLE c; + + mant = ((ia << 7) | 0x40000000)&0x7fffff80 ; + if (ia & 0x80000000) mant = -mant; + exp = ((ia >> 23) & 0xFFLL) - 126; + c = (mant<<32) | Q6_R_zxth_R(exp);; + return(c); +} + * ==================================================================== */ + .text + .global fast2_f2qd_asm + .type fast2_f2qd_asm, @function +fast2_f2qd_asm: +#define ia R0 +#define mag R3 +#define mantr R1 +#define expr R0 +#define zero R2 +#define maxneg R5:4 +#define maxnegl R4 + .falign + { + mantr = asl(ia, #7) + p0 = tstbit(ia, #31) + maxneg = #0 + mag = add(ia,ia) + } + { + mantr = setbit(mantr, #30) + expr= extractu(ia,#8,#23) + maxnegl.L = #0x8001 + p1 = cmp.eq(mag, #0) + } + { + mantr= extractu(mantr, #31, #0) + expr= add(expr, #-126) + zero = #0 + if(p1) jump .Lminqd + } + { + expr = zxth(expr) + if(p0) mantr= sub(zero, mantr) + jumpr r31 + } +.Lminqd: + { + R1:0 = maxneg + jumpr r31 + } diff --git a/lib/builtins/hexagon/fastmath2_ldlib_asm.S b/lib/builtins/hexagon/fastmath2_ldlib_asm.S new file mode 100644 index 000000000000..4192555351a6 --- /dev/null +++ b/lib/builtins/hexagon/fastmath2_ldlib_asm.S @@ -0,0 +1,345 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/* ==================================================================== * + +fast2_QLDOUBLE fast2_ldadd(fast2_QLDOUBLE a,fast2_QLDOUBLE b) { + fast2_QLDOUBLE c; + lint manta = a & MANTMASK; + int expa = Q6_R_sxth_R(a) ; + lint mantb = b & MANTMASK; + int expb = Q6_R_sxth_R(b) ; + int exp, expdiff, j, k, hi, lo, cn; + lint mant; + + expdiff = (int) Q6_P_vabsdiffh_PP(a, b); + expdiff = Q6_R_sxth_R(expdiff) ; + if (expdiff > 63) { expdiff = 62;} + if (expa > expb) { + exp = expa + 1; + expa = 1; + expb = expdiff + 1; + } else { + exp = expb + 1; + expb = 1; + expa = expdiff + 1; + } + mant = (manta>>expa) + (mantb>>expb); + + hi = (int) (mant>>32); + lo = (int) (mant); + + k = Q6_R_normamt_R(hi); + if(hi == 0 || hi == -1) k = 31+Q6_R_normamt_R(lo); + + mant = (mant << k); + cn = (mant == 0x8000000000000000LL); + exp = exp - k + cn; + + if (mant == 0 || mant == -1) exp = 0x8001; + c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK); + return(c); + } + * ==================================================================== */ + .text + .global fast2_ldadd_asm + .type fast2_ldadd_asm, @function +fast2_ldadd_asm: +#define manta R1:0 +#define lmanta R1:0 +#define mantb R3:2 +#define lmantb R3:2 +#define expa R4 +#define expb R5 +#define expd R6 +#define exp R8 +#define c63 R9 +#define lmant R1:0 +#define k R4 +#define ce P0 +#define zero R3:2 + .falign + { + expa = memw(r29+#8) + expb = memw(r29+#24) + r7 = r0 + } + { + expd = sub(expa, expb):sat + ce = CMP.GT(expa, expb); + if ( ce.new) exp = add(expa, #1) + if (!ce.new) exp = add(expb, #1) + } { + expd = abs(expd):sat + if ( ce) expa = #1 + if (!ce) expb = #1 + c63 = #62 + } { + expd = MIN(expd, c63) + manta = memd(r29+#0) + mantb = memd(r29+#16) + } { + if (!ce) expa = add(expd, #1) + if ( ce) expb = add(expd, #1) + } { + lmanta = ASR(lmanta, expa) + lmantb = ASR(lmantb, expb) + } { + lmant = add(lmanta, lmantb) + zero = #0 + } { + k = clb(lmant) + c63.L =#0x0001 + } { + exp -= add(k, #-1) //exp = exp - (k-1) + k = add(k, #-1) + p0 = cmp.gt(k, #58) + c63.H =#0x8000 + } { + if(!p0)memw(r7+#8) = exp + lmant = ASL(lmant, k) + if(p0) jump .Ldenorma + } { + memd(r7+#0) = lmant + jumpr r31 + } +.Ldenorma: + memd(r7+#0) = zero + { + memw(r7+#8) = c63 + jumpr r31 + } +/* =================================================================== * + fast2_QLDOUBLE fast2_ldsub(fast2_QLDOUBLE a,fast2_QLDOUBLE b) { + fast2_QLDOUBLE c; + lint manta = a & MANTMASK; + int expa = Q6_R_sxth_R(a) ; + lint mantb = b & MANTMASK; + int expb = Q6_R_sxth_R(b) ; + int exp, expdiff, j, k; + lint mant; + + expdiff = (int) Q6_P_vabsdiffh_PP(a, b); + expdiff = Q6_R_sxth_R(expdiff) ; + if (expdiff > 63) { expdiff = 62;} + if (expa > expb) { + exp = expa + 1; + expa = 1; + expb = expdiff + 1; + } else { + exp = expb + 1; + expb = 1; + expa = expdiff + 1; + } + mant = (manta>>expa) - (mantb>>expb); + k = Q6_R_clb_P(mant)-1; + mant = (mant << k); + exp = exp - k; + if (mant == 0 || mant == -1) exp = 0x8001; + c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK); + return(c); + } + * ==================================================================== */ + .text + .global fast2_ldsub_asm + .type fast2_ldsub_asm, @function +fast2_ldsub_asm: +#define manta R1:0 +#define lmanta R1:0 +#define mantb R3:2 +#define lmantb R3:2 +#define expa R4 +#define expb R5 +#define expd R6 +#define exp R8 +#define c63 R9 +#define lmant R1:0 +#define k R4 +#define ce P0 +#define zero R3:2 + .falign + { + expa = memw(r29+#8) + expb = memw(r29+#24) + r7 = r0 + } + { + expd = sub(expa, expb):sat + ce = CMP.GT(expa, expb); + if ( ce.new) exp = add(expa, #1) + if (!ce.new) exp = add(expb, #1) + } { + expd = abs(expd):sat + if ( ce) expa = #1 + if (!ce) expb = #1 + c63 = #62 + } { + expd = min(expd, c63) + manta = memd(r29+#0) + mantb = memd(r29+#16) + } { + if (!ce) expa = add(expd, #1) + if ( ce) expb = add(expd, #1) + } { + lmanta = ASR(lmanta, expa) + lmantb = ASR(lmantb, expb) + } { + lmant = sub(lmanta, lmantb) + zero = #0 + } { + k = clb(lmant) + c63.L =#0x0001 + } { + exp -= add(k, #-1) //exp = exp - (k+1) + k = add(k, #-1) + p0 = cmp.gt(k, #58) + c63.H =#0x8000 + } { + if(!p0)memw(r7+#8) = exp + lmant = asl(lmant, k) + if(p0) jump .Ldenorma_s + } { + memd(r7+#0) = lmant + jumpr r31 + } +.Ldenorma_s: + memd(r7+#0) = zero + { + memw(r7+#8) = c63 + jumpr r31 + } + +/* ==================================================================== * + fast2_QLDOUBLE fast2_ldmpy(fast2_QLDOUBLE a,fast2_QLDOUBLE b) { + fast2_QLDOUBLE c; + lint manta = a & MANTMASK; + int expa = Q6_R_sxth_R(a) ; + lint mantb = b & MANTMASK; + int expb = Q6_R_sxth_R(b) ; + int exp, k; + lint mant; + int hia, hib, hi, lo; + unsigned int loa, lob; + + hia = (int)(a >> 32); + loa = Q6_R_extractu_RII((int)manta, 31, 1); + hib = (int)(b >> 32); + lob = Q6_R_extractu_RII((int)mantb, 31, 1); + + mant = Q6_P_mpy_RR(hia, lob); + mant = Q6_P_mpyacc_RR(mant,hib, loa); + mant = (mant >> 30) + (Q6_P_mpy_RR(hia, hib)<<1); + + hi = (int) (mant>>32); + + k = Q6_R_normamt_R(hi); + mant = mant << k; + exp = expa + expb - k; + if (mant == 0 || mant == -1) exp = 0x8001; + c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK); + return(c); + } + * ==================================================================== */ + .text + .global fast2_ldmpy_asm + .type fast2_ldmpy_asm, @function +fast2_ldmpy_asm: + +#define mantxl_ R9 +#define mantxl R14 +#define mantxh R15 +#define mantx R15:14 +#define mantbl R2 +#define mantbl_ R8 +#define mantbh R3 +#define mantb R3:2 +#define expa R4 +#define expb R5 +#define c8001 R8 +#define mantd R7:6 +#define lmantc R11:10 +#define kp R9 +#define min R13:12 +#define minh R13 +#define max R13:12 +#define maxh R13 +#define ret R0 + + .falign + { + mantx = memd(r29+#0) + mantb = memd(r29+#16) + min = #0 + } + { + mantbl_= extractu(mantbl, #31, #1) + mantxl_= extractu(mantxl, #31, #1) + minh.H = #0x8000 + } + { + lmantc = mpy(mantxh, mantbh) + mantd = mpy(mantxh, mantbl_) + expa = memw(r29+#8) + expb = memw(r29+#24) + } + { + lmantc = add(lmantc, lmantc) + mantd += mpy(mantbh, mantxl_) + } + { + mantd = asr(mantd, #30) + c8001.L = #0x0001 + p1 = cmp.eq(mantx, mantb) + } + { + mantd = add(mantd, lmantc) + expa= add(expa, expb) + p2 = cmp.eq(mantb, min) + } + { + kp = clb(mantd) + c8001.H = #0x8000 + p1 = and(p1, p2) + } + { + expa-= add(kp, #-1) + kp = add(kp, #-1) + if(p1) jump .Lsat + } + { + mantd = asl(mantd, kp) + memw(ret+#8) = expa + p0 = cmp.gt(kp, #58) + if(p0.new) jump:NT .Ldenorm //rarely happens + } + { + memd(ret+#0) = mantd + jumpr r31 + } +.Lsat: + { + max = #0 + expa+= add(kp, #1) + } + { + maxh.H = #0x4000 + memw(ret+#8) = expa + } + { + memd(ret+#0) = max + jumpr r31 + } +.Ldenorm: + { + memw(ret+#8) = c8001 + mantx = #0 + } + { + memd(ret+#0) = mantx + jumpr r31 + } diff --git a/lib/builtins/hexagon/fastmath_dlib_asm.S b/lib/builtins/hexagon/fastmath_dlib_asm.S new file mode 100644 index 000000000000..215936b783ca --- /dev/null +++ b/lib/builtins/hexagon/fastmath_dlib_asm.S @@ -0,0 +1,400 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/* ==================================================================== */ +/* FUNCTIONS Optimized double floating point operators */ +/* ==================================================================== */ +/* c = dadd_asm(a, b) */ +/* ==================================================================== + +QDOUBLE dadd(QDOUBLE a,QDOUBLE b) { + QDOUBLE c; + lint manta = a & MANTMASK; + int expa = HEXAGON_R_sxth_R(a) ; + lint mantb = b & MANTMASK; + int expb = HEXAGON_R_sxth_R(b) ; + int exp, expdiff, j, k, hi, lo, cn; + lint mant; + + expdiff = (int) HEXAGON_P_vabsdiffh_PP(a, b); + expdiff = HEXAGON_R_sxth_R(expdiff) ; + if (expdiff > 63) { expdiff = 62;} + if (expa > expb) { + exp = expa + 1; + expa = 1; + expb = expdiff + 1; + } else { + exp = expb + 1; + expb = 1; + expa = expdiff + 1; + } + mant = (manta>>expa) + (mantb>>expb); + + hi = (int) (mant>>32); + lo = (int) (mant); + + k = HEXAGON_R_normamt_R(hi); + if(hi == 0 || hi == -1) k = 31+HEXAGON_R_normamt_R(lo); + + mant = (mant << k); + cn = (mant == 0x8000000000000000LL); + exp = exp - k + cn; + + if (mant == 0 || mant == -1) exp = 0x8001; + c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK); + return(c); + } + * ==================================================================== */ + .text + .global dadd_asm + .type dadd_asm, @function +dadd_asm: + +#define manta R0 +#define mantexpa R1:0 +#define lmanta R1:0 +#define mantb R2 +#define mantexpb R3:2 +#define lmantb R3:2 +#define expa R4 +#define expb R5 +#define mantexpd R7:6 +#define expd R6 +#define exp R8 +#define c63 R9 +#define lmant R1:0 +#define manth R1 +#define mantl R0 +#define zero R7:6 +#define zerol R6 +#define minus R3:2 +#define minusl R2 +#define maxneg R9 +#define minmin R11:10 // exactly 0x800000000000000000LL +#define minminh R11 +#define k R4 +#define kl R5 +#define ce P0 + .falign + { + mantexpd = VABSDIFFH(mantexpa, mantexpb) //represented as 0x08001LL + c63 = #62 + expa = SXTH(manta) + expb = SXTH(mantb) + } { + expd = SXTH(expd) + ce = CMP.GT(expa, expb); + if ( ce.new) exp = add(expa, #1) + if (!ce.new) exp = add(expb, #1) + } { + if ( ce) expa = #1 + if (!ce) expb = #1 + manta.L = #0 + expd = MIN(expd, c63) + } { + if (!ce) expa = add(expd, #1) + if ( ce) expb = add(expd, #1) + mantb.L = #0 + zero = #0 + } { + lmanta = ASR(lmanta, expa) + lmantb = ASR(lmantb, expb) + minmin = #0 + } { + lmant = add(lmanta, lmantb) + minus = #-1 + minminh.H = #0x8000 + } { + k = NORMAMT(manth) + kl = NORMAMT(mantl) + p0 = cmp.eq(manth, zerol) + p1 = cmp.eq(manth, minusl) + } { + p0 = OR(p0, p1) + if(p0.new) k = add(kl, #31) + maxneg.H = #0 + } { + mantexpa = ASL(lmant, k) + exp = SUB(exp, k) + maxneg.L = #0x8001 + } { + p0 = cmp.eq(mantexpa, zero) + p1 = cmp.eq(mantexpa, minus) + manta.L = #0 + exp = ZXTH(exp) + } { + p2 = cmp.eq(mantexpa, minmin) //is result 0x80....0 + if(p2.new) exp = add(exp, #1) + } +#if (__HEXAGON_ARCH__ == 60) + { + p0 = OR(p0, p1) + if( p0.new) manta = OR(manta,maxneg) + if(!p0.new) manta = OR(manta,exp) + } + jumpr r31 +#else + { + p0 = OR(p0, p1) + if( p0.new) manta = OR(manta,maxneg) + if(!p0.new) manta = OR(manta,exp) + jumpr r31 + } +#endif +/* =================================================================== * + QDOUBLE dsub(QDOUBLE a,QDOUBLE b) { + QDOUBLE c; + lint manta = a & MANTMASK; + int expa = HEXAGON_R_sxth_R(a) ; + lint mantb = b & MANTMASK; + int expb = HEXAGON_R_sxth_R(b) ; + int exp, expdiff, j, k, hi, lo, cn; + lint mant; + + expdiff = (int) HEXAGON_P_vabsdiffh_PP(a, b); + expdiff = HEXAGON_R_sxth_R(expdiff) ; + if (expdiff > 63) { expdiff = 62;} + if (expa > expb) { + exp = expa + 1; + expa = 1; + expb = expdiff + 1; + } else { + exp = expb + 1; + expb = 1; + expa = expdiff + 1; + } + mant = (manta>>expa) - (mantb>>expb); + + hi = (int) (mant>>32); + lo = (int) (mant); + + k = HEXAGON_R_normamt_R(hi); + if(hi == 0 || hi == -1) k = 31+HEXAGON_R_normamt_R(lo); + + mant = (mant << k); + cn = (mant == 0x8000000000000000LL); + exp = exp - k + cn; + + if (mant == 0 || mant == -1) exp = 0x8001; + c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK); + return(c); + } + * ==================================================================== */ + .text + .global dsub_asm + .type dsub_asm, @function +dsub_asm: + +#define manta R0 +#define mantexpa R1:0 +#define lmanta R1:0 +#define mantb R2 +#define mantexpb R3:2 +#define lmantb R3:2 +#define expa R4 +#define expb R5 +#define mantexpd R7:6 +#define expd R6 +#define exp R8 +#define c63 R9 +#define lmant R1:0 +#define manth R1 +#define mantl R0 +#define zero R7:6 +#define zerol R6 +#define minus R3:2 +#define minusl R2 +#define maxneg R9 +#define minmin R11:10 // exactly 0x800000000000000000LL +#define minminh R11 +#define k R4 +#define kl R5 +#define ce P0 + .falign + { + mantexpd = VABSDIFFH(mantexpa, mantexpb) //represented as 0x08001LL + c63 = #62 + expa = SXTH(manta) + expb = SXTH(mantb) + } { + expd = SXTH(expd) + ce = CMP.GT(expa, expb); + if ( ce.new) exp = add(expa, #1) + if (!ce.new) exp = add(expb, #1) + } { + if ( ce) expa = #1 + if (!ce) expb = #1 + manta.L = #0 + expd = MIN(expd, c63) + } { + if (!ce) expa = add(expd, #1) + if ( ce) expb = add(expd, #1) + mantb.L = #0 + zero = #0 + } { + lmanta = ASR(lmanta, expa) + lmantb = ASR(lmantb, expb) + minmin = #0 + } { + lmant = sub(lmanta, lmantb) + minus = #-1 + minminh.H = #0x8000 + } { + k = NORMAMT(manth) + kl = NORMAMT(mantl) + p0 = cmp.eq(manth, zerol) + p1 = cmp.eq(manth, minusl) + } { + p0 = OR(p0, p1) + if(p0.new) k = add(kl, #31) + maxneg.H = #0 + } { + mantexpa = ASL(lmant, k) + exp = SUB(exp, k) + maxneg.L = #0x8001 + } { + p0 = cmp.eq(mantexpa, zero) + p1 = cmp.eq(mantexpa, minus) + manta.L = #0 + exp = ZXTH(exp) + } { + p2 = cmp.eq(mantexpa, minmin) //is result 0x80....0 + if(p2.new) exp = add(exp, #1) + } +#if (__HEXAGON_ARCH__ == 60) + { + p0 = OR(p0, p1) + if( p0.new) manta = OR(manta,maxneg) + if(!p0.new) manta = OR(manta,exp) + } + jumpr r31 +#else + { + p0 = OR(p0, p1) + if( p0.new) manta = OR(manta,maxneg) + if(!p0.new) manta = OR(manta,exp) + jumpr r31 + } +#endif +/* ==================================================================== * + QDOUBLE dmpy(QDOUBLE a,QDOUBLE b) { + QDOUBLE c; + lint manta = a & MANTMASK; + int expa = HEXAGON_R_sxth_R(a) ; + lint mantb = b & MANTMASK; + int expb = HEXAGON_R_sxth_R(b) ; + int exp, k; + lint mant; + int hia, hib, hi, lo; + unsigned int loa, lob; + + hia = (int)(a >> 32); + loa = HEXAGON_R_extractu_RII((int)manta, 31, 1); + hib = (int)(b >> 32); + lob = HEXAGON_R_extractu_RII((int)mantb, 31, 1); + + mant = HEXAGON_P_mpy_RR(hia, lob); + mant = HEXAGON_P_mpyacc_RR(mant,hib, loa); + mant = (mant >> 30) + (HEXAGON_P_mpy_RR(hia, hib)<<1); + + hi = (int) (mant>>32); + lo = (int) (mant); + + k = HEXAGON_R_normamt_R(hi); + if(hi == 0 || hi == -1) k = 31+HEXAGON_R_normamt_R(lo); + mant = mant << k; + exp = expa + expb - k; + if (mant == 0 || mant == -1) exp = 0x8001; + c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK); + return(c); + } + * ==================================================================== */ + .text + .global dmpy_asm + .type dmpy_asm, @function +dmpy_asm: + +#define mantal R0 +#define mantah R1 +#define mantexpa R1:0 +#define mantbl R2 +#define mantbh R3 +#define mantexpb R3:2 +#define expa R4 +#define expb R5 +#define mantexpd R7:6 +#define exp R8 +#define lmantc R11:10 +#define mantch R11 +#define mantcl R10 +#define zero0 R7:6 +#define zero0l R6 +#define minus1 R3:2 +#define minus1l R2 +#define maxneg R9 +#define k R4 +#define kl R5 + + .falign + { + mantbl = lsr(mantbl, #16) + mantal = lsr(mantal, #16) + expa = sxth(mantal) + expb = sxth(mantbl) + } + { + lmantc = mpy(mantah, mantbh) + mantexpd = mpy(mantah, mantbl) + } + { + lmantc = add(lmantc, lmantc) //<<1 + mantexpd+= mpy(mantbh, mantal) + } + { + lmantc += asr(mantexpd, #15) + exp = add(expa, expb) + zero0 = #0 + minus1 = #-1 + } + { + k = normamt(mantch) + kl = normamt(mantcl) + p0 = cmp.eq(mantch, zero0l) + p1 = cmp.eq(mantch, minus1l) + } + { + p0 = or(p0, p1) + if(p0.new) k = add(kl, #31) + maxneg.H = #0 + } + { + mantexpa = asl(lmantc, k) + exp = sub(exp, k) + maxneg.L = #0x8001 + } + { + p0 = cmp.eq(mantexpa, zero0) + p1 = cmp.eq(mantexpa, minus1) + mantal.L = #0 + exp = zxth(exp) + } +#if (__HEXAGON_ARCH__ == 60) + { + p0 = or(p0, p1) + if( p0.new) mantal = or(mantal,maxneg) + if(!p0.new) mantal = or(mantal,exp) + } + jumpr r31 +#else + { + p0 = or(p0, p1) + if( p0.new) mantal = or(mantal,maxneg) + if(!p0.new) mantal = or(mantal,exp) + jumpr r31 + } +#endif diff --git a/lib/builtins/hexagon/fma_opt.S b/lib/builtins/hexagon/fma_opt.S new file mode 100644 index 000000000000..12378f0da04e --- /dev/null +++ b/lib/builtins/hexagon/fma_opt.S @@ -0,0 +1,31 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +.macro FUNCTION_BEGIN name +.text +.p2align 5 +.globl \name +.type \name, @function +\name: +.endm + +.macro FUNCTION_END name +.size \name, . - \name +.endm + +FUNCTION_BEGIN fmaf + r2 += sfmpy(r0, r1) + { + r0 = r2 + jumpr r31 + } +FUNCTION_END fmaf + + .globl fmal + .set fmal, fma diff --git a/lib/builtins/hexagon/fmax_opt.S b/lib/builtins/hexagon/fmax_opt.S new file mode 100644 index 000000000000..f3a218c9769b --- /dev/null +++ b/lib/builtins/hexagon/fmax_opt.S @@ -0,0 +1,30 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +.macro FUNCTION_BEGIN name +.text +.p2align 5 +.globl \name +.type \name, @function +\name: +.endm + +.macro FUNCTION_END name +.size \name, . - \name +.endm + +FUNCTION_BEGIN fmaxf + { + r0 = sfmax(r0, r1) + jumpr r31 + } +FUNCTION_END fmaxf + + .globl fmaxl + .set fmaxl, fmax diff --git a/lib/builtins/hexagon/fmin_opt.S b/lib/builtins/hexagon/fmin_opt.S new file mode 100644 index 000000000000..ef9b0ff854a2 --- /dev/null +++ b/lib/builtins/hexagon/fmin_opt.S @@ -0,0 +1,30 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +.macro FUNCTION_BEGIN name +.text +.p2align 5 +.globl \name +.type \name, @function +\name: +.endm + +.macro FUNCTION_END name +.size \name, . - \name +.endm + +FUNCTION_BEGIN fminf + { + r0 = sfmin(r0, r1) + jumpr r31 + } +FUNCTION_END fminf + + .globl fminl + .set fminl, fmin diff --git a/lib/builtins/hexagon/memcpy_forward_vp4cp4n2.S b/lib/builtins/hexagon/memcpy_forward_vp4cp4n2.S new file mode 100644 index 000000000000..fbe09086cd33 --- /dev/null +++ b/lib/builtins/hexagon/memcpy_forward_vp4cp4n2.S @@ -0,0 +1,125 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// An optimized version of a memcpy which is equivalent to the following loop: +// +// volatile unsigned *dest; +// unsigned *src; +// +// for (i = 0; i < num_words; ++i) +// *dest++ = *src++; +// +// The corresponding C prototype for this function would be +// void hexagon_memcpy_forward_vp4cp4n2(volatile unsigned *dest, +// const unsigned *src, +// unsigned num_words); +// +// *** Both dest and src must be aligned to 32-bit boundaries. *** +// The code does not perform any runtime checks for this, and will fail +// in bad ways if this requirement is not met. +// +// The "forward" in the name refers to the fact that the function copies +// the words going forward in memory. It is incorrect to use this function +// for cases where the original code copied words in any other order. +// +// *** This function is only for the use by the compiler. *** +// The only indended use is for the LLVM compiler to generate calls to +// this function, when a mem-copy loop, like the one above, is detected. + + .text + +// Inputs: +// r0: dest +// r1: src +// r2: num_words + + .globl hexagon_memcpy_forward_vp4cp4n2 + .balign 32 + .type hexagon_memcpy_forward_vp4cp4n2,@function +hexagon_memcpy_forward_vp4cp4n2: + + // Compute r3 to be the number of words remaining in the current page. + // At the same time, compute r4 to be the number of 32-byte blocks + // remaining in the page (for prefetch). + { + r3 = sub(##4096, r1) + r5 = lsr(r2, #3) + } + { + // The word count before end-of-page is in the 12 lowest bits of r3. + // (If the address in r1 was already page-aligned, the bits are 0.) + r3 = extractu(r3, #10, #2) + r4 = extractu(r3, #7, #5) + } + { + r3 = minu(r2, r3) + r4 = minu(r5, r4) + } + { + r4 = or(r4, ##2105344) // 2105344 = 0x202000 + p0 = cmp.eq(r3, #0) + if (p0.new) jump:nt .Lskipprolog + } + l2fetch(r1, r4) + { + loop0(.Lprolog, r3) + r2 = sub(r2, r3) // r2 = number of words left after the prolog. + } + .falign +.Lprolog: + { + r4 = memw(r1++#4) + memw(r0++#4) = r4.new + } :endloop0 +.Lskipprolog: + { + // Let r3 = number of whole pages left (page = 1024 words). + r3 = lsr(r2, #10) + if (cmp.eq(r3.new, #0)) jump:nt .Lskipmain + } + { + loop1(.Lout, r3) + r2 = extractu(r2, #10, #0) // r2 = r2 & 1023 + r3 = ##2105472 // r3 = 0x202080 (prefetch info) + } + // Iterate over pages. + .falign +.Lout: + // Prefetch each individual page. + l2fetch(r1, r3) + loop0(.Lpage, #512) + .falign +.Lpage: + r5:4 = memd(r1++#8) + { + memw(r0++#8) = r4 + memw(r0+#4) = r5 + } :endloop0:endloop1 +.Lskipmain: + { + r3 = ##2105344 // r3 = 0x202000 (prefetch info) + r4 = lsr(r2, #3) // r4 = number of 32-byte blocks remaining. + p0 = cmp.eq(r2, #0) + if (p0.new) jumpr:nt r31 + } + { + r3 = or(r3, r4) + loop0(.Lepilog, r2) + } + l2fetch(r1, r3) + .falign +.Lepilog: + { + r4 = memw(r1++#4) + memw(r0++#4) = r4.new + } :endloop0 + + jumpr r31 + +.size hexagon_memcpy_forward_vp4cp4n2, . - hexagon_memcpy_forward_vp4cp4n2 diff --git a/lib/builtins/hexagon/memcpy_likely_aligned.S b/lib/builtins/hexagon/memcpy_likely_aligned.S new file mode 100644 index 000000000000..bbc85c22db08 --- /dev/null +++ b/lib/builtins/hexagon/memcpy_likely_aligned.S @@ -0,0 +1,64 @@ +//===------------------------- memcopy routines ---------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + + .macro FUNCTION_BEGIN name + .text + .p2align 5 + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + +FUNCTION_BEGIN __hexagon_memcpy_likely_aligned_min32bytes_mult8bytes + { + p0 = bitsclr(r1,#7) + p0 = bitsclr(r0,#7) + if (p0.new) r5:4 = memd(r1) + r3 = #-3 + } + { + if (!p0) jump .Lmemcpy_call + if (p0) memd(r0++#8) = r5:4 + if (p0) r5:4 = memd(r1+#8) + r3 += lsr(r2,#3) + } + { + memd(r0++#8) = r5:4 + r5:4 = memd(r1+#16) + r1 = add(r1,#24) + loop0(1f,r3) + } + .falign +1: + { + memd(r0++#8) = r5:4 + r5:4 = memd(r1++#8) + }:endloop0 + { + memd(r0) = r5:4 + r0 -= add(r2,#-8) + jumpr r31 + } +FUNCTION_END __hexagon_memcpy_likely_aligned_min32bytes_mult8bytes + +.Lmemcpy_call: +#ifdef __PIC__ + jump memcpy@PLT +#else + jump memcpy +#endif + + .globl __qdsp_memcpy_likely_aligned_min32bytes_mult8bytes + .set __qdsp_memcpy_likely_aligned_min32bytes_mult8bytes, \ + __hexagon_memcpy_likely_aligned_min32bytes_mult8bytes diff --git a/lib/builtins/hexagon/moddi3.S b/lib/builtins/hexagon/moddi3.S new file mode 100644 index 000000000000..12a0595fe465 --- /dev/null +++ b/lib/builtins/hexagon/moddi3.S @@ -0,0 +1,83 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + .macro FUNCTION_BEGIN name + .text + .p2align 5 + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + +FUNCTION_BEGIN __hexagon_moddi3 + { + p3 = tstbit(r1,#31) + } + { + r1:0 = abs(r1:0) + r3:2 = abs(r3:2) + } + { + r6 = cl0(r1:0) // count leading 0's of dividend (numerator) + r7 = cl0(r3:2) // count leading 0's of divisor (denominator) + r5:4 = r3:2 // divisor moved into working registers + r3:2 = r1:0 // dividend is the initial remainder, r3:2 contains remainder + } + { + r10 = sub(r7,r6) // left shift count for bit & divisor + r1:0 = #0 // initialize quotient to 0 + r15:14 = #1 // initialize bit to 1 + } + { + r11 = add(r10,#1) // loop count is 1 more than shift count + r13:12 = lsl(r5:4,r10) // shift divisor msb into same bit position as dividend msb + r15:14 = lsl(r15:14,r10) // shift the bit left by same amount as divisor + } + { + p0 = cmp.gtu(r5:4,r3:2) // check if divisor > dividend + loop0(1f,r11) // register loop + } + { + if (p0) jump .hexagon_moddi3_return // if divisor > dividend, we're done, so return + } + .falign +1: + { + p0 = cmp.gtu(r13:12,r3:2) // set predicate reg if shifted divisor > current remainder + } + { + r7:6 = sub(r3:2, r13:12) // subtract shifted divisor from current remainder + r9:8 = add(r1:0, r15:14) // save current quotient to temp (r9:8) + } + { + r1:0 = vmux(p0, r1:0, r9:8) // choose either current quotient or new quotient (r9:8) + r3:2 = vmux(p0, r3:2, r7:6) // choose either current remainder or new remainder (r7:6) + } + { + r15:14 = lsr(r15:14, #1) // shift bit right by 1 for next iteration + r13:12 = lsr(r13:12, #1) // shift "shifted divisor" right by 1 for next iteration + }:endloop0 + +.hexagon_moddi3_return: + { + r1:0 = neg(r3:2) + } + { + r1:0 = vmux(p3,r1:0,r3:2) + jumpr r31 + } +FUNCTION_END __hexagon_moddi3 + + .globl __qdsp_moddi3 + .set __qdsp_moddi3, __hexagon_moddi3 diff --git a/lib/builtins/hexagon/modsi3.S b/lib/builtins/hexagon/modsi3.S new file mode 100644 index 000000000000..5afda9e2978b --- /dev/null +++ b/lib/builtins/hexagon/modsi3.S @@ -0,0 +1,66 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + + + .macro FUNCTION_BEGIN name + .text + .p2align 5 + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + +FUNCTION_BEGIN __hexagon_modsi3 + { + p2 = cmp.ge(r0,#0) + r2 = abs(r0) + r1 = abs(r1) + } + { + r3 = cl0(r2) + r4 = cl0(r1) + p0 = cmp.gtu(r1,r2) + } + { + r3 = sub(r4,r3) + if (p0) jumpr r31 + } + { + p1 = cmp.eq(r3,#0) + loop0(1f,r3) + r0 = r2 + r2 = lsl(r1,r3) + } + .falign +1: + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r2) + r2 = lsr(r2,#1) + if (p1) r1 = #0 + }:endloop0 + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r1) + if (p2) jumpr r31 + } + { + r0 = neg(r0) + jumpr r31 + } +FUNCTION_END __hexagon_modsi3 + + .globl __qdsp_modsi3 + .set __qdsp_modsi3, __hexagon_modsi3 diff --git a/lib/builtins/hexagon/sfdiv_opt.S b/lib/builtins/hexagon/sfdiv_opt.S new file mode 100644 index 000000000000..6bdd4808c2b8 --- /dev/null +++ b/lib/builtins/hexagon/sfdiv_opt.S @@ -0,0 +1,66 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + .macro FUNCTION_BEGIN name + .text + .p2align 5 + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + +#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG +#define FAST_ALIAS(TAG) .global __hexagon_fast_##TAG ; .set __hexagon_fast_##TAG, __hexagon_##TAG +#define FAST2_ALIAS(TAG) .global __hexagon_fast2_##TAG ; .set __hexagon_fast2_##TAG, __hexagon_##TAG + +FUNCTION_BEGIN __hexagon_divsf3 + { + r2,p0 = sfrecipa(r0,r1) + r4 = sffixupd(r0,r1) + r3 = ##0x3f800000 // 1.0 + } + { + r5 = sffixupn(r0,r1) + r3 -= sfmpy(r4,r2):lib // 1-(den/recip) yields error? + r6 = ##0x80000000 + r7 = r3 + } + { + r2 += sfmpy(r3,r2):lib + r3 = r7 + r6 = r5 + r0 = and(r6,r5) + } + { + r3 -= sfmpy(r4,r2):lib + r0 += sfmpy(r5,r2):lib + } + { + r2 += sfmpy(r3,r2):lib + r6 -= sfmpy(r0,r4):lib + } + { + r0 += sfmpy(r6,r2):lib + } + { + r5 -= sfmpy(r0,r4):lib + } + { + r0 += sfmpy(r5,r2,p0):scale + jumpr r31 + } +FUNCTION_END __hexagon_divsf3 + +Q6_ALIAS(divsf3) +FAST_ALIAS(divsf3) +FAST2_ALIAS(divsf3) diff --git a/lib/builtins/hexagon/sfsqrt_opt.S b/lib/builtins/hexagon/sfsqrt_opt.S new file mode 100644 index 000000000000..7f619002774f --- /dev/null +++ b/lib/builtins/hexagon/sfsqrt_opt.S @@ -0,0 +1,82 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + .macro FUNCTION_BEGIN name + .text + .p2align 5 + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + +#define RIN r0 +#define S r0 +#define H r1 +#define D r2 +#define E r3 +#define HALF r4 +#define R r5 + +#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG +#define FAST_ALIAS(TAG) .global __hexagon_fast_##TAG ; .set __hexagon_fast_##TAG, __hexagon_##TAG +#define FAST2_ALIAS(TAG) .global __hexagon_fast2_##TAG ; .set __hexagon_fast2_##TAG, __hexagon_##TAG + +FUNCTION_BEGIN __hexagon_sqrtf + { + E,p0 = sfinvsqrta(RIN) + R = sffixupr(RIN) + HALF = ##0x3f000000 // 0.5 + r1:0 = combine(#0,#0) // clear S/H + } + { + S += sfmpy(E,R):lib // S0 + H += sfmpy(E,HALF):lib // H0 + D = HALF + E = R + } + { + D -= sfmpy(S,H):lib // d0 + p1 = sfclass(R,#1) // is zero? + //E -= sfmpy(S,S):lib // e0 + } + { + S += sfmpy(S,D):lib // S1 + H += sfmpy(H,D):lib // H1 + D = HALF + E = R + } + { + D -= sfmpy(S,H):lib // d0 + E -= sfmpy(S,S):lib // e0 + } + { + S += sfmpy(H,E):lib // S2 + H += sfmpy(H,D):lib // H2 + D = HALF + E = R + } + { + //D -= sfmpy(S,H):lib // d2 + E -= sfmpy(S,S):lib // e2 + if (p1) r0 = or(r0,R) // sqrt(-0.0) = -0.0 + } + { + S += sfmpy(H,E,p0):scale // S3 + jumpr r31 + } + +FUNCTION_END __hexagon_sqrtf + +Q6_ALIAS(sqrtf) +FAST_ALIAS(sqrtf) +FAST2_ALIAS(sqrtf) diff --git a/lib/builtins/hexagon/udivdi3.S b/lib/builtins/hexagon/udivdi3.S new file mode 100644 index 000000000000..1ca326b75208 --- /dev/null +++ b/lib/builtins/hexagon/udivdi3.S @@ -0,0 +1,71 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + + .macro FUNCTION_BEGIN name + .text + .p2align 5 + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + +FUNCTION_BEGIN __hexagon_udivdi3 + { + r6 = cl0(r1:0) // count leading 0's of dividend (numerator) + r7 = cl0(r3:2) // count leading 0's of divisor (denominator) + r5:4 = r3:2 // divisor moved into working registers + r3:2 = r1:0 // dividend is the initial remainder, r3:2 contains remainder + } + { + r10 = sub(r7,r6) // left shift count for bit & divisor + r1:0 = #0 // initialize quotient to 0 + r15:14 = #1 // initialize bit to 1 + } + { + r11 = add(r10,#1) // loop count is 1 more than shift count + r13:12 = lsl(r5:4,r10) // shift divisor msb into same bit position as dividend msb + r15:14 = lsl(r15:14,r10) // shift the bit left by same amount as divisor + } + { + p0 = cmp.gtu(r5:4,r3:2) // check if divisor > dividend + loop0(1f,r11) // register loop + } + { + if (p0) jumpr r31 // if divisor > dividend, we're done, so return + } + .falign +1: + { + p0 = cmp.gtu(r13:12,r3:2) // set predicate reg if shifted divisor > current remainder + } + { + r7:6 = sub(r3:2, r13:12) // subtract shifted divisor from current remainder + r9:8 = add(r1:0, r15:14) // save current quotient to temp (r9:8) + } + { + r1:0 = vmux(p0, r1:0, r9:8) // choose either current quotient or new quotient (r9:8) + r3:2 = vmux(p0, r3:2, r7:6) // choose either current remainder or new remainder (r7:6) + } + { + r15:14 = lsr(r15:14, #1) // shift bit right by 1 for next iteration + r13:12 = lsr(r13:12, #1) // shift "shifted divisor" right by 1 for next iteration + }:endloop0 + { + jumpr r31 // return + } +FUNCTION_END __hexagon_udivdi3 + + .globl __qdsp_udivdi3 + .set __qdsp_udivdi3, __hexagon_udivdi3 diff --git a/lib/builtins/hexagon/udivmoddi4.S b/lib/builtins/hexagon/udivmoddi4.S new file mode 100644 index 000000000000..deb5aae0924d --- /dev/null +++ b/lib/builtins/hexagon/udivmoddi4.S @@ -0,0 +1,71 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + + .macro FUNCTION_BEGIN name + .text + .p2align 5 + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + +FUNCTION_BEGIN __hexagon_udivmoddi4 + { + r6 = cl0(r1:0) // count leading 0's of dividend (numerator) + r7 = cl0(r3:2) // count leading 0's of divisor (denominator) + r5:4 = r3:2 // divisor moved into working registers + r3:2 = r1:0 // dividend is the initial remainder, r3:2 contains remainder + } + { + r10 = sub(r7,r6) // left shift count for bit & divisor + r1:0 = #0 // initialize quotient to 0 + r15:14 = #1 // initialize bit to 1 + } + { + r11 = add(r10,#1) // loop count is 1 more than shift count + r13:12 = lsl(r5:4,r10) // shift divisor msb into same bit position as dividend msb + r15:14 = lsl(r15:14,r10) // shift the bit left by same amount as divisor + } + { + p0 = cmp.gtu(r5:4,r3:2) // check if divisor > dividend + loop0(1f,r11) // register loop + } + { + if (p0) jumpr r31 // if divisor > dividend, we're done, so return + } + .falign +1: + { + p0 = cmp.gtu(r13:12,r3:2) // set predicate reg if shifted divisor > current remainder + } + { + r7:6 = sub(r3:2, r13:12) // subtract shifted divisor from current remainder + r9:8 = add(r1:0, r15:14) // save current quotient to temp (r9:8) + } + { + r1:0 = vmux(p0, r1:0, r9:8) // choose either current quotient or new quotient (r9:8) + r3:2 = vmux(p0, r3:2, r7:6) // choose either current remainder or new remainder (r7:6) + } + { + r15:14 = lsr(r15:14, #1) // shift bit right by 1 for next iteration + r13:12 = lsr(r13:12, #1) // shift "shifted divisor" right by 1 for next iteration + }:endloop0 + { + jumpr r31 // return + } +FUNCTION_END __hexagon_udivmoddi4 + + .globl __qdsp_udivmoddi4 + .set __qdsp_udivmoddi4, __hexagon_udivmoddi4 diff --git a/lib/builtins/hexagon/udivmodsi4.S b/lib/builtins/hexagon/udivmodsi4.S new file mode 100644 index 000000000000..25bbe7cd5925 --- /dev/null +++ b/lib/builtins/hexagon/udivmodsi4.S @@ -0,0 +1,60 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + + .macro FUNCTION_BEGIN name + .text + .p2align 5 + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + +FUNCTION_BEGIN __hexagon_udivmodsi4 + { + r2 = cl0(r0) + r3 = cl0(r1) + r5:4 = combine(#1,#0) + p0 = cmp.gtu(r1,r0) + } + { + r6 = sub(r3,r2) + r4 = r1 + r1:0 = combine(r0,r4) + if (p0) jumpr r31 + } + { + r3:2 = vlslw(r5:4,r6) + loop0(1f,r6) + p0 = cmp.eq(r6,#0) + if (p0.new) r4 = #0 + } + .falign +1: + { + p0 = cmp.gtu(r2,r1) + if (!p0.new) r1 = sub(r1,r2) + if (!p0.new) r0 = add(r0,r3) + r3:2 = vlsrw(r3:2,#1) + }:endloop0 + { + p0 = cmp.gtu(r2,r1) + if (!p0.new) r1 = sub(r1,r4) + if (!p0.new) r0 = add(r0,r3) + jumpr r31 + } +FUNCTION_END __hexagon_udivmodsi4 + + .globl __qdsp_udivmodsi4 + .set __qdsp_udivmodsi4, __hexagon_udivmodsi4 diff --git a/lib/builtins/hexagon/udivsi3.S b/lib/builtins/hexagon/udivsi3.S new file mode 100644 index 000000000000..54f0aa409f93 --- /dev/null +++ b/lib/builtins/hexagon/udivsi3.S @@ -0,0 +1,56 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + .macro FUNCTION_BEGIN name + .text + .p2align 5 + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + +FUNCTION_BEGIN __hexagon_udivsi3 + { + r2 = cl0(r0) + r3 = cl0(r1) + r5:4 = combine(#1,#0) + p0 = cmp.gtu(r1,r0) + } + { + r6 = sub(r3,r2) + r4 = r1 + r1:0 = combine(r0,r4) + if (p0) jumpr r31 + } + { + r3:2 = vlslw(r5:4,r6) + loop0(1f,r6) + } + .falign +1: + { + p0 = cmp.gtu(r2,r1) + if (!p0.new) r1 = sub(r1,r2) + if (!p0.new) r0 = add(r0,r3) + r3:2 = vlsrw(r3:2,#1) + }:endloop0 + { + p0 = cmp.gtu(r2,r1) + if (!p0.new) r0 = add(r0,r3) + jumpr r31 + } +FUNCTION_END __hexagon_udivsi3 + + .globl __qdsp_udivsi3 + .set __qdsp_udivsi3, __hexagon_udivsi3 diff --git a/lib/builtins/hexagon/umoddi3.S b/lib/builtins/hexagon/umoddi3.S new file mode 100644 index 000000000000..f091521414af --- /dev/null +++ b/lib/builtins/hexagon/umoddi3.S @@ -0,0 +1,74 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + + .macro FUNCTION_BEGIN name + .text + .p2align 5 + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + +FUNCTION_BEGIN __hexagon_umoddi3 + { + r6 = cl0(r1:0) // count leading 0's of dividend (numerator) + r7 = cl0(r3:2) // count leading 0's of divisor (denominator) + r5:4 = r3:2 // divisor moved into working registers + r3:2 = r1:0 // dividend is the initial remainder, r3:2 contains remainder + } + { + r10 = sub(r7,r6) // left shift count for bit & divisor + r1:0 = #0 // initialize quotient to 0 + r15:14 = #1 // initialize bit to 1 + } + { + r11 = add(r10,#1) // loop count is 1 more than shift count + r13:12 = lsl(r5:4,r10) // shift divisor msb into same bit position as dividend msb + r15:14 = lsl(r15:14,r10) // shift the bit left by same amount as divisor + } + { + p0 = cmp.gtu(r5:4,r3:2) // check if divisor > dividend + loop0(1f,r11) // register loop + } + { + if (p0) jump .hexagon_umoddi3_return // if divisor > dividend, we're done, so return + } + .falign +1: + { + p0 = cmp.gtu(r13:12,r3:2) // set predicate reg if shifted divisor > current remainder + } + { + r7:6 = sub(r3:2, r13:12) // subtract shifted divisor from current remainder + r9:8 = add(r1:0, r15:14) // save current quotient to temp (r9:8) + } + { + r1:0 = vmux(p0, r1:0, r9:8) // choose either current quotient or new quotient (r9:8) + r3:2 = vmux(p0, r3:2, r7:6) // choose either current remainder or new remainder (r7:6) + } + { + r15:14 = lsr(r15:14, #1) // shift bit right by 1 for next iteration + r13:12 = lsr(r13:12, #1) // shift "shifted divisor" right by 1 for next iteration + }:endloop0 + +.hexagon_umoddi3_return: + { + r1:0 = r3:2 + jumpr r31 + } +FUNCTION_END __hexagon_umoddi3 + + .globl __qdsp_umoddi3 + .set __qdsp_umoddi3, __hexagon_umoddi3 diff --git a/lib/builtins/hexagon/umodsi3.S b/lib/builtins/hexagon/umodsi3.S new file mode 100644 index 000000000000..a8270c2030d5 --- /dev/null +++ b/lib/builtins/hexagon/umodsi3.S @@ -0,0 +1,55 @@ +//===----------------------Hexagon builtin routine ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + + + .macro FUNCTION_BEGIN name + .text + .p2align 5 + .globl \name + .type \name, @function +\name: + .endm + + .macro FUNCTION_END name + .size \name, . - \name + .endm + + +FUNCTION_BEGIN __hexagon_umodsi3 + { + r2 = cl0(r0) + r3 = cl0(r1) + p0 = cmp.gtu(r1,r0) + } + { + r2 = sub(r3,r2) + if (p0) jumpr r31 + } + { + loop0(1f,r2) + p1 = cmp.eq(r2,#0) + r2 = lsl(r1,r2) + } + .falign +1: + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r2) + r2 = lsr(r2,#1) + if (p1) r1 = #0 + }:endloop0 + { + p0 = cmp.gtu(r2,r0) + if (!p0.new) r0 = sub(r0,r1) + jumpr r31 + } +FUNCTION_END __hexagon_umodsi3 + + .globl __qdsp_umodsi3 + .set __qdsp_umodsi3, __hexagon_umodsi3 diff --git a/lib/builtins/int_types.h b/lib/builtins/int_types.h index a92238c5b730..f53f343d35d5 100644 --- a/lib/builtins/int_types.h +++ b/lib/builtins/int_types.h @@ -60,7 +60,7 @@ typedef union }s; } udwords; -#if (defined(__LP64__) || defined(__wasm__) || defined(__mips64)) +#if (defined(__LP64__) || defined(__wasm__) || defined(__mips64)) || defined(__riscv) #define CRT_HAS_128BIT #endif diff --git a/lib/builtins/os_version_check.c b/lib/builtins/os_version_check.c index 74ade2f5b966..772e33333c0f 100644 --- a/lib/builtins/os_version_check.c +++ b/lib/builtins/os_version_check.c @@ -16,8 +16,8 @@ #ifdef __APPLE__ #include <CoreFoundation/CoreFoundation.h> -#include <dispatch/dispatch.h> #include <TargetConditionals.h> +#include <dispatch/dispatch.h> #include <dlfcn.h> #include <stdint.h> #include <stdio.h> @@ -28,6 +28,26 @@ static int32_t GlobalMajor, GlobalMinor, GlobalSubminor; static dispatch_once_t DispatchOnceCounter; +typedef CFDataRef (*CFDataCreateWithBytesNoCopyFuncTy)(CFAllocatorRef, + const UInt8 *, CFIndex, + CFAllocatorRef); +typedef CFPropertyListRef (*CFPropertyListCreateWithDataFuncTy)( + CFAllocatorRef, CFDataRef, CFOptionFlags, CFPropertyListFormat *, + CFErrorRef *); +typedef CFPropertyListRef (*CFPropertyListCreateFromXMLDataFuncTy)( + CFAllocatorRef, CFDataRef, CFOptionFlags, CFStringRef *); +typedef CFStringRef (*CFStringCreateWithCStringNoCopyFuncTy)(CFAllocatorRef, + const char *, + CFStringEncoding, + CFAllocatorRef); +typedef const void *(*CFDictionaryGetValueFuncTy)(CFDictionaryRef, + const void *); +typedef CFTypeID (*CFGetTypeIDFuncTy)(CFTypeRef); +typedef CFTypeID (*CFStringGetTypeIDFuncTy)(void); +typedef Boolean (*CFStringGetCStringFuncTy)(CFStringRef, char *, CFIndex, + CFStringEncoding); +typedef void (*CFReleaseFuncTy)(CFTypeRef); + /* Find and parse the SystemVersion.plist file. */ static void parseSystemVersionPList(void *Unused) { (void)Unused; @@ -37,50 +57,49 @@ static void parseSystemVersionPList(void *Unused) { return; const CFAllocatorRef kCFAllocatorNull = *(const CFAllocatorRef *)NullAllocator; - typeof(CFDataCreateWithBytesNoCopy) *CFDataCreateWithBytesNoCopyFunc = - (typeof(CFDataCreateWithBytesNoCopy) *)dlsym( - RTLD_DEFAULT, "CFDataCreateWithBytesNoCopy"); + CFDataCreateWithBytesNoCopyFuncTy CFDataCreateWithBytesNoCopyFunc = + (CFDataCreateWithBytesNoCopyFuncTy)dlsym(RTLD_DEFAULT, + "CFDataCreateWithBytesNoCopy"); if (!CFDataCreateWithBytesNoCopyFunc) return; - typeof(CFPropertyListCreateWithData) *CFPropertyListCreateWithDataFunc = - (typeof(CFPropertyListCreateWithData) *)dlsym( + CFPropertyListCreateWithDataFuncTy CFPropertyListCreateWithDataFunc = + (CFPropertyListCreateWithDataFuncTy)dlsym( RTLD_DEFAULT, "CFPropertyListCreateWithData"); - /* CFPropertyListCreateWithData was introduced only in macOS 10.6+, so it - * will be NULL on earlier OS versions. */ +/* CFPropertyListCreateWithData was introduced only in macOS 10.6+, so it + * will be NULL on earlier OS versions. */ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" - typeof(CFPropertyListCreateFromXMLData) *CFPropertyListCreateFromXMLDataFunc = - (typeof(CFPropertyListCreateFromXMLData) *)dlsym( + CFPropertyListCreateFromXMLDataFuncTy CFPropertyListCreateFromXMLDataFunc = + (CFPropertyListCreateFromXMLDataFuncTy)dlsym( RTLD_DEFAULT, "CFPropertyListCreateFromXMLData"); #pragma clang diagnostic pop /* CFPropertyListCreateFromXMLDataFunc is deprecated in macOS 10.10, so it * might be NULL in future OS versions. */ if (!CFPropertyListCreateWithDataFunc && !CFPropertyListCreateFromXMLDataFunc) return; - typeof(CFStringCreateWithCStringNoCopy) *CFStringCreateWithCStringNoCopyFunc = - (typeof(CFStringCreateWithCStringNoCopy) *)dlsym( + CFStringCreateWithCStringNoCopyFuncTy CFStringCreateWithCStringNoCopyFunc = + (CFStringCreateWithCStringNoCopyFuncTy)dlsym( RTLD_DEFAULT, "CFStringCreateWithCStringNoCopy"); if (!CFStringCreateWithCStringNoCopyFunc) return; - typeof(CFDictionaryGetValue) *CFDictionaryGetValueFunc = - (typeof(CFDictionaryGetValue) *)dlsym(RTLD_DEFAULT, - "CFDictionaryGetValue"); + CFDictionaryGetValueFuncTy CFDictionaryGetValueFunc = + (CFDictionaryGetValueFuncTy)dlsym(RTLD_DEFAULT, "CFDictionaryGetValue"); if (!CFDictionaryGetValueFunc) return; - typeof(CFGetTypeID) *CFGetTypeIDFunc = - (typeof(CFGetTypeID) *)dlsym(RTLD_DEFAULT, "CFGetTypeID"); + CFGetTypeIDFuncTy CFGetTypeIDFunc = + (CFGetTypeIDFuncTy)dlsym(RTLD_DEFAULT, "CFGetTypeID"); if (!CFGetTypeIDFunc) return; - typeof(CFStringGetTypeID) *CFStringGetTypeIDFunc = - (typeof(CFStringGetTypeID) *)dlsym(RTLD_DEFAULT, "CFStringGetTypeID"); + CFStringGetTypeIDFuncTy CFStringGetTypeIDFunc = + (CFStringGetTypeIDFuncTy)dlsym(RTLD_DEFAULT, "CFStringGetTypeID"); if (!CFStringGetTypeIDFunc) return; - typeof(CFStringGetCString) *CFStringGetCStringFunc = - (typeof(CFStringGetCString) *)dlsym(RTLD_DEFAULT, "CFStringGetCString"); + CFStringGetCStringFuncTy CFStringGetCStringFunc = + (CFStringGetCStringFuncTy)dlsym(RTLD_DEFAULT, "CFStringGetCString"); if (!CFStringGetCStringFunc) return; - typeof(CFRelease) *CFReleaseFunc = - (typeof(CFRelease) *)dlsym(RTLD_DEFAULT, "CFRelease"); + CFReleaseFuncTy CFReleaseFunc = + (CFReleaseFuncTy)dlsym(RTLD_DEFAULT, "CFRelease"); if (!CFReleaseFunc) return; @@ -163,10 +182,14 @@ int32_t __isOSVersionAtLeast(int32_t Major, int32_t Minor, int32_t Subminor) { /* Populate the global version variables, if they haven't already. */ dispatch_once_f(&DispatchOnceCounter, NULL, parseSystemVersionPList); - if (Major < GlobalMajor) return 1; - if (Major > GlobalMajor) return 0; - if (Minor < GlobalMinor) return 1; - if (Minor > GlobalMinor) return 0; + if (Major < GlobalMajor) + return 1; + if (Major > GlobalMajor) + return 0; + if (Minor < GlobalMinor) + return 1; + if (Minor > GlobalMinor) + return 0; return Subminor <= GlobalSubminor; } diff --git a/lib/builtins/riscv/mulsi3.S b/lib/builtins/riscv/mulsi3.S new file mode 100644 index 000000000000..a58d237040b6 --- /dev/null +++ b/lib/builtins/riscv/mulsi3.S @@ -0,0 +1,28 @@ +//===--- mulsi3.S - Integer multiplication routines routines ---===// +// +// The LLVM Compiler Infrastructure +// +// This file is dual licensed under the MIT and the University of Illinois Open +// Source Licenses. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#if !defined(__riscv_mul) && __riscv_xlen == 32 + .text + .align 2 + + .globl __mulsi3 + .type __mulsi3, @function +__mulsi3: + mv a2, a0 + mv a0, zero +.L1: + andi a3, a1, 1 + beqz a3, .L2 + add a0, a0, a2 +.L2: + srli a1, a1, 1 + slli a2, a2, 1 + bnez a1, .L1 + ret +#endif diff --git a/lib/cfi/CMakeLists.txt b/lib/cfi/CMakeLists.txt index 6c531445626a..7ed72bca5d23 100644 --- a/lib/cfi/CMakeLists.txt +++ b/lib/cfi/CMakeLists.txt @@ -30,6 +30,8 @@ if(OS_NAME MATCHES "Linux") OBJECT_LIBS RTInterception RTSanitizerCommon RTSanitizerCommonLibc + RTSanitizerCommonCoverage + RTSanitizerCommonSymbolizer RTUbsan CFLAGS ${CFI_CFLAGS} ${CFI_DIAG_CFLAGS} PARENT_TARGET cfi) diff --git a/lib/cfi/cfi.cc b/lib/cfi/cfi.cc index f7693b37d29c..a2f127f93cda 100644 --- a/lib/cfi/cfi.cc +++ b/lib/cfi/cfi.cc @@ -132,7 +132,11 @@ void ShadowBuilder::Start() { void ShadowBuilder::AddUnchecked(uptr begin, uptr end) { uint16_t *shadow_begin = MemToShadow(begin, shadow_); uint16_t *shadow_end = MemToShadow(end - 1, shadow_) + 1; - memset(shadow_begin, kUncheckedShadow, + // memset takes a byte, so our unchecked shadow value requires both bytes to + // be the same. Make sure we're ok during compilation. + static_assert((kUncheckedShadow & 0xff) == ((kUncheckedShadow >> 8) & 0xff), + "Both bytes of the 16-bit value must be the same!"); + memset(shadow_begin, kUncheckedShadow & 0xff, (shadow_end - shadow_begin) * sizeof(*shadow_begin)); } @@ -379,6 +383,8 @@ __cfi_slowpath_diag(u64 CallSiteTypeId, void *Ptr, void *DiagData) { } #endif +static void EnsureInterceptorsInitialized(); + // Setup shadow for dlopen()ed libraries. // The actual shadow setup happens after dlopen() returns, which means that // a library can not be a target of any CFI checks while its constructors are @@ -388,6 +394,7 @@ __cfi_slowpath_diag(u64 CallSiteTypeId, void *Ptr, void *DiagData) { // We could insert a high-priority constructor into the library, but that would // not help with the uninstrumented libraries. INTERCEPTOR(void*, dlopen, const char *filename, int flag) { + EnsureInterceptorsInitialized(); EnterLoader(); void *handle = REAL(dlopen)(filename, flag); ExitLoader(); @@ -395,12 +402,27 @@ INTERCEPTOR(void*, dlopen, const char *filename, int flag) { } INTERCEPTOR(int, dlclose, void *handle) { + EnsureInterceptorsInitialized(); EnterLoader(); int res = REAL(dlclose)(handle); ExitLoader(); return res; } +static BlockingMutex interceptor_init_lock(LINKER_INITIALIZED); +static bool interceptors_inited = false; + +static void EnsureInterceptorsInitialized() { + BlockingMutexLock lock(&interceptor_init_lock); + if (interceptors_inited) + return; + + INTERCEPT_FUNCTION(dlopen); + INTERCEPT_FUNCTION(dlclose); + + interceptors_inited = true; +} + extern "C" SANITIZER_INTERFACE_ATTRIBUTE #if !SANITIZER_CAN_USE_PREINIT_ARRAY // On ELF platforms, the constructor is invoked using .preinit_array (see below) @@ -411,9 +433,6 @@ void __cfi_init() { InitializeFlags(); InitShadow(); - INTERCEPT_FUNCTION(dlopen); - INTERCEPT_FUNCTION(dlclose); - #ifdef CFI_ENABLE_DIAG __ubsan::InitAsPlugin(); #endif diff --git a/lib/cfi/cfi_blacklist.txt b/lib/cfi/cfi_blacklist.txt index d8c9d49e3cdd..3d73508f5707 100644 --- a/lib/cfi/cfi_blacklist.txt +++ b/lib/cfi/cfi_blacklist.txt @@ -1,7 +1,9 @@ [cfi-unrelated-cast] -# std::get_temporary_buffer, likewise (libstdc++, libc++). +# The specification of std::get_temporary_buffer mandates a cast to +# uninitialized T* (libstdc++, libc++, MSVC stdlib). fun:_ZSt20get_temporary_buffer* fun:_ZNSt3__120get_temporary_buffer* +fun:*get_temporary_buffer@.*@std@@* # STL address-of magic (libstdc++, libc++). fun:*__addressof* diff --git a/lib/dfsan/.clang-format b/lib/dfsan/.clang-format index f6cb8ad931f5..560308c91dee 100644 --- a/lib/dfsan/.clang-format +++ b/lib/dfsan/.clang-format @@ -1 +1,2 @@ BasedOnStyle: Google +AllowShortIfStatementsOnASingleLine: false diff --git a/lib/dfsan/CMakeLists.txt b/lib/dfsan/CMakeLists.txt index 2c486bff821b..b3ae713cf02c 100644 --- a/lib/dfsan/CMakeLists.txt +++ b/lib/dfsan/CMakeLists.txt @@ -5,6 +5,12 @@ set(DFSAN_RTL_SOURCES dfsan.cc dfsan_custom.cc dfsan_interceptors.cc) + +set(DFSAN_RTL_HEADERS + dfsan.h + dfsan_flags.inc + dfsan_platform.h) + set(DFSAN_COMMON_CFLAGS ${SANITIZER_COMMON_CFLAGS}) append_rtti_flag(OFF DFSAN_COMMON_CFLAGS) # Prevent clang from generating libc calls. @@ -23,6 +29,7 @@ foreach(arch ${DFSAN_SUPPORTED_ARCH}) $<TARGET_OBJECTS:RTInterception.${arch}> $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + ADDITIONAL_HEADERS ${DFSAN_RTL_HEADERS} CFLAGS ${DFSAN_CFLAGS} PARENT_TARGET dfsan) add_sanitizer_rt_symbols(clang_rt.dfsan @@ -32,16 +39,19 @@ foreach(arch ${DFSAN_SUPPORTED_ARCH}) clang_rt.dfsan-${arch}-symbols) endforeach() -set(dfsan_abilist_filename ${COMPILER_RT_OUTPUT_DIR}/dfsan_abilist.txt) +set(dfsan_abilist_dir ${COMPILER_RT_OUTPUT_DIR}/share) +set(dfsan_abilist_filename ${dfsan_abilist_dir}/dfsan_abilist.txt) add_custom_target(dfsan_abilist ALL DEPENDS ${dfsan_abilist_filename}) add_custom_command(OUTPUT ${dfsan_abilist_filename} VERBATIM COMMAND + ${CMAKE_COMMAND} -E make_directory ${dfsan_abilist_dir} + COMMAND cat ${CMAKE_CURRENT_SOURCE_DIR}/done_abilist.txt ${CMAKE_CURRENT_SOURCE_DIR}/libc_ubuntu1404_abilist.txt > ${dfsan_abilist_filename} DEPENDS done_abilist.txt libc_ubuntu1404_abilist.txt) add_dependencies(dfsan dfsan_abilist) install(FILES ${dfsan_abilist_filename} - DESTINATION ${COMPILER_RT_INSTALL_PATH}) + DESTINATION ${COMPILER_RT_INSTALL_PATH}/share) diff --git a/lib/dfsan/dfsan.cc b/lib/dfsan/dfsan.cc index 9e360c959fa7..d4dbebc43a78 100644 --- a/lib/dfsan/dfsan.cc +++ b/lib/dfsan/dfsan.cc @@ -425,7 +425,8 @@ static void dfsan_init(int argc, char **argv, char **envp) { InitializePlatformEarly(); - MmapFixedNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr()); + if (!MmapFixedNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr())) + Die(); // Protect the region of memory we don't use, to preserve the one-to-one // mapping from application to shadow memory. But if ASLR is disabled, Linux diff --git a/lib/dfsan/dfsan_custom.cc b/lib/dfsan/dfsan_custom.cc index e0cd16ab695c..022aa9a9ab8e 100644 --- a/lib/dfsan/dfsan_custom.cc +++ b/lib/dfsan/dfsan_custom.cc @@ -1132,4 +1132,26 @@ int __dfsw_snprintf(char *str, size_t size, const char *format, va_end(ap); return ret; } -} // extern "C" + +// Default empty implementations (weak). Users should redefine them. +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32 *) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init, u32 *, + u32 *) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {} + +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp, void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp1, void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp2, void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp4, void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp8, void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp1, + void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp2, + void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp4, + void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp8, + void) {} +SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_switch, void) {} +} // extern "C" diff --git a/lib/dfsan/done_abilist.txt b/lib/dfsan/done_abilist.txt index a560cd7b477c..9765626ad3ff 100644 --- a/lib/dfsan/done_abilist.txt +++ b/lib/dfsan/done_abilist.txt @@ -264,8 +264,6 @@ fun:reflect.makeFuncStub=discard # lib/Fuzzer ############################################################################### # Replaces __sanitizer_cov_trace_cmp with __dfsw___sanitizer_cov_trace_cmp -fun:__sanitizer_cov_trace_cmp=custom -fun:__sanitizer_cov_trace_cmp=uninstrumented fun:__sanitizer_cov_trace_cmp1=custom fun:__sanitizer_cov_trace_cmp1=uninstrumented fun:__sanitizer_cov_trace_cmp2=custom @@ -274,6 +272,14 @@ fun:__sanitizer_cov_trace_cmp4=custom fun:__sanitizer_cov_trace_cmp4=uninstrumented fun:__sanitizer_cov_trace_cmp8=custom fun:__sanitizer_cov_trace_cmp8=uninstrumented +fun:__sanitizer_cov_trace_const_cmp1=custom +fun:__sanitizer_cov_trace_const_cmp1=uninstrumented +fun:__sanitizer_cov_trace_const_cmp2=custom +fun:__sanitizer_cov_trace_const_cmp2=uninstrumented +fun:__sanitizer_cov_trace_const_cmp4=custom +fun:__sanitizer_cov_trace_const_cmp4=uninstrumented +fun:__sanitizer_cov_trace_const_cmp8=custom +fun:__sanitizer_cov_trace_const_cmp8=uninstrumented # Similar for __sanitizer_cov_trace_switch fun:__sanitizer_cov_trace_switch=custom fun:__sanitizer_cov_trace_switch=uninstrumented @@ -289,10 +295,11 @@ fun:__sanitizer_set_death_callback=uninstrumented fun:__sanitizer_set_death_callback=discard fun:__sanitizer_update_counter_bitset_and_clear_counters=uninstrumented fun:__sanitizer_update_counter_bitset_and_clear_counters=discard +fun:__sanitizer_cov_trace_pc*=uninstrumented +fun:__sanitizer_cov_trace_pc*=discard +fun:__sanitizer_cov_pcs_init=uninstrumented +fun:__sanitizer_cov_pcs_init=discard # Ignores the dfsan wrappers. fun:__dfsw_*=uninstrumented fun:__dfsw_*=discard - -# Don't add extra parameters to the Fuzzer callback. -fun:LLVMFuzzerTestOneInput=uninstrumented diff --git a/lib/dfsan/scripts/check_custom_wrappers.sh b/lib/dfsan/scripts/check_custom_wrappers.sh index 99bf50cbbd08..9a80cb9c6ffd 100755 --- a/lib/dfsan/scripts/check_custom_wrappers.sh +++ b/lib/dfsan/scripts/check_custom_wrappers.sh @@ -23,6 +23,7 @@ grep -E "^fun:.*=custom" ${DFSAN_ABI_LIST} \ | grep -v "dfsan_get_label\|__sanitizer_cov_trace" \ | sed "s/^fun:\(.*\)=custom.*/\1/" | sort > $DIFF_A grep -E "__dfsw.*\(" ${DFSAN_CUSTOM_WRAPPERS} \ + | grep -v "__sanitizer_cov_trace" \ | sed "s/.*__dfsw_\(.*\)(.*/\1/" | sort > $DIFF_B diff -u $DIFF_A $DIFF_B > ${DIFFOUT} if [ $? -ne 0 ] @@ -33,6 +34,7 @@ then fi grep -E __dfsw_ ${DFSAN_CUSTOM_WRAPPERS} \ + | grep -v "__sanitizer_cov_trace" \ | sed "s/.*__dfsw_\([^(]*\).*/\1/" | sort > $DIFF_A grep -E "^[[:space:]]*test_.*\(\);" ${DFSAN_CUSTOM_TESTS} \ | sed "s/.*test_\(.*\)();/\1/" | sort > $DIFF_B diff --git a/lib/esan/CMakeLists.txt b/lib/esan/CMakeLists.txt index 2012ab642bf1..4de5c0205c1d 100644 --- a/lib/esan/CMakeLists.txt +++ b/lib/esan/CMakeLists.txt @@ -18,6 +18,18 @@ set(ESAN_SOURCES working_set.cpp working_set_posix.cpp) +set(ESAN_HEADERS + cache_frag.h + esan.h + esan_circular_buffer.h + esan_flags.h + esan_flags.inc + esan_hashtable.h + esan_interface_internal.h + esan_shadow.h + esan_sideline.h + working_set.h) + foreach (arch ${ESAN_SUPPORTED_ARCH}) add_compiler_rt_runtime(clang_rt.esan STATIC @@ -26,6 +38,8 @@ foreach (arch ${ESAN_SUPPORTED_ARCH}) $<TARGET_OBJECTS:RTInterception.${arch}> $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}> + ADDITIONAL_HEADERS ${ESAN_HEADERS} CFLAGS ${ESAN_RTL_CFLAGS}) add_sanitizer_rt_symbols(clang_rt.esan ARCHS ${arch} diff --git a/lib/esan/esan.cpp b/lib/esan/esan.cpp index 09b530b6645f..44b8032caa1a 100644 --- a/lib/esan/esan.cpp +++ b/lib/esan/esan.cpp @@ -163,15 +163,15 @@ static void initializeShadow() { VPrintf(1, "Shadow #%d: [%zx-%zx) (%zuGB)\n", i, ShadowStart, ShadowEnd, (ShadowEnd - ShadowStart) >> 30); - uptr Map; + uptr Map = 0; if (__esan_which_tool == ESAN_WorkingSet) { // We want to identify all shadow pages that are touched so we start // out inaccessible. Map = (uptr)MmapFixedNoAccess(ShadowStart, ShadowEnd- ShadowStart, "shadow"); } else { - Map = (uptr)MmapFixedNoReserve(ShadowStart, ShadowEnd - ShadowStart, - "shadow"); + if (MmapFixedNoReserve(ShadowStart, ShadowEnd - ShadowStart, "shadow")) + Map = ShadowStart; } if (Map != ShadowStart) { Printf("FATAL: EfficiencySanitizer failed to map its shadow memory.\n"); diff --git a/lib/esan/esan_interceptors.cpp b/lib/esan/esan_interceptors.cpp index 62fa13c83822..0c596f1cf37e 100644 --- a/lib/esan/esan_interceptors.cpp +++ b/lib/esan/esan_interceptors.cpp @@ -175,6 +175,15 @@ DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr) do { \ } while (false) +#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \ + off) \ + do { \ + if (!fixMmapAddr(&addr, sz, flags)) \ + return (void *)-1; \ + void *result = REAL(mmap)(addr, sz, prot, flags, fd, off); \ + return (void *)checkMmapResult((uptr)result, sz); \ + } while (false) + #include "sanitizer_common/sanitizer_common_interceptors.inc" //===----------------------------------------------------------------------===// @@ -232,6 +241,7 @@ DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr) } while (false) #include "sanitizer_common/sanitizer_common_syscalls.inc" +#include "sanitizer_common/sanitizer_syscalls_netbsd.inc" //===----------------------------------------------------------------------===// // Custom interceptors @@ -306,13 +316,6 @@ INTERCEPTOR(int, unlink, char *path) { return REAL(unlink)(path); } -INTERCEPTOR(int, puts, const char *s) { - void *ctx; - COMMON_INTERCEPTOR_ENTER(ctx, puts, s); - COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s)); - return REAL(puts)(s); -} - INTERCEPTOR(int, rmdir, char *path) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, rmdir, path); @@ -321,44 +324,6 @@ INTERCEPTOR(int, rmdir, char *path) { } //===----------------------------------------------------------------------===// -// Shadow-related interceptors -//===----------------------------------------------------------------------===// - -// These are candidates for sharing with all sanitizers if shadow memory -// support is also standardized. - -INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags, - int fd, OFF_T off) { - if (UNLIKELY(REAL(mmap) == nullptr)) { - // With esan init during interceptor init and a static libc preventing - // our early-calloc from triggering, we can end up here before our - // REAL pointer is set up. - return (void *)internal_mmap(addr, sz, prot, flags, fd, off); - } - void *ctx; - COMMON_INTERCEPTOR_ENTER(ctx, mmap, addr, sz, prot, flags, fd, off); - if (!fixMmapAddr(&addr, sz, flags)) - return (void *)-1; - void *result = REAL(mmap)(addr, sz, prot, flags, fd, off); - return (void *)checkMmapResult((uptr)result, sz); -} - -#if SANITIZER_LINUX -INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags, - int fd, OFF64_T off) { - void *ctx; - COMMON_INTERCEPTOR_ENTER(ctx, mmap64, addr, sz, prot, flags, fd, off); - if (!fixMmapAddr(&addr, sz, flags)) - return (void *)-1; - void *result = REAL(mmap64)(addr, sz, prot, flags, fd, off); - return (void *)checkMmapResult((uptr)result, sz); -} -#define ESAN_MAYBE_INTERCEPT_MMAP64 INTERCEPT_FUNCTION(mmap64) -#else -#define ESAN_MAYBE_INTERCEPT_MMAP64 -#endif - -//===----------------------------------------------------------------------===// // Signal-related interceptors //===----------------------------------------------------------------------===// @@ -521,14 +486,8 @@ void initializeInterceptors() { INTERCEPT_FUNCTION(creat); ESAN_MAYBE_INTERCEPT_CREAT64; INTERCEPT_FUNCTION(unlink); - INTERCEPT_FUNCTION(fread); - INTERCEPT_FUNCTION(fwrite); - INTERCEPT_FUNCTION(puts); INTERCEPT_FUNCTION(rmdir); - INTERCEPT_FUNCTION(mmap); - ESAN_MAYBE_INTERCEPT_MMAP64; - ESAN_MAYBE_INTERCEPT_SIGNAL; ESAN_MAYBE_INTERCEPT_SIGACTION; ESAN_MAYBE_INTERCEPT_SIGPROCMASK; diff --git a/lib/esan/esan_sideline_linux.cpp b/lib/esan/esan_sideline_linux.cpp index 4a96910ece35..2de25fba700d 100644 --- a/lib/esan/esan_sideline_linux.cpp +++ b/lib/esan/esan_sideline_linux.cpp @@ -70,7 +70,7 @@ int SidelineThread::runSideline(void *Arg) { internal_prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0); // Set up a signal handler on an alternate stack for safety. - InternalScopedBuffer<char> StackMap(SigAltStackSize); + InternalMmapVector<char> StackMap(SigAltStackSize); stack_t SigAltStack; SigAltStack.ss_sp = StackMap.data(); SigAltStack.ss_size = SigAltStackSize; diff --git a/lib/fuzzer/CMakeLists.txt b/lib/fuzzer/CMakeLists.txt index 9769be52ae01..679318e460b5 100644 --- a/lib/fuzzer/CMakeLists.txt +++ b/lib/fuzzer/CMakeLists.txt @@ -1,6 +1,6 @@ set(LIBFUZZER_SOURCES - FuzzerClangCounters.cpp FuzzerCrossOver.cpp + FuzzerDataFlowTrace.cpp FuzzerDriver.cpp FuzzerExtFunctionsDlsym.cpp FuzzerExtFunctionsDlsymWin.cpp @@ -13,6 +13,7 @@ set(LIBFUZZER_SOURCES FuzzerMerge.cpp FuzzerMutate.cpp FuzzerSHA1.cpp + FuzzerShmemFuchsia.cpp FuzzerShmemPosix.cpp FuzzerShmemWindows.cpp FuzzerTracePC.cpp @@ -21,8 +22,29 @@ set(LIBFUZZER_SOURCES FuzzerUtilFuchsia.cpp FuzzerUtilLinux.cpp FuzzerUtilPosix.cpp - FuzzerUtilWindows.cpp - ) + FuzzerUtilWindows.cpp) + +set(LIBFUZZER_HEADERS + FuzzerCommand.h + FuzzerCorpus.h + FuzzerDataFlowTrace.h + FuzzerDefs.h + FuzzerDictionary.h + FuzzerExtFunctions.def + FuzzerExtFunctions.h + FuzzerFlags.def + FuzzerIO.h + FuzzerInterface.h + FuzzerInternal.h + FuzzerMerge.h + FuzzerMutate.h + FuzzerOptions.h + FuzzerRandom.h + FuzzerSHA1.h + FuzzerShmem.h + FuzzerTracePC.h + FuzzerUtil.h + FuzzerValueBitMap.h) CHECK_CXX_SOURCE_COMPILES(" static thread_local int blah; @@ -33,6 +55,14 @@ CHECK_CXX_SOURCE_COMPILES(" set(LIBFUZZER_CFLAGS ${SANITIZER_COMMON_CFLAGS}) +if(OS_NAME MATCHES "Linux|Fuchsia" AND COMPILER_RT_LIBCXX_PATH) + list(APPEND LIBFUZZER_CFLAGS -nostdinc++ -D_LIBCPP_ABI_VERSION=Fuzzer) + # Remove -stdlib= which is unused when passing -nostdinc++. + string(REGEX REPLACE "-stdlib=[a-zA-Z+]*" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) +elseif(TARGET cxx-headers OR HAVE_LIBCXX) + set(LIBFUZZER_DEPS cxx-headers) +endif() + append_list_if(COMPILER_RT_HAS_OMIT_FRAME_POINTER_FLAG -fno-omit-frame-pointer LIBFUZZER_CFLAGS) if (CMAKE_CXX_FLAGS MATCHES "fsanitize-coverage") @@ -43,21 +73,22 @@ if(NOT HAS_THREAD_LOCAL) list(APPEND LIBFUZZER_CFLAGS -Dthread_local=__thread) endif() -if(APPLE) - set(FUZZER_SUPPORTED_OS osx) -endif() +set(FUZZER_SUPPORTED_OS ${SANITIZER_COMMON_SUPPORTED_OS}) add_compiler_rt_object_libraries(RTfuzzer OS ${FUZZER_SUPPORTED_OS} ARCHS ${FUZZER_SUPPORTED_ARCH} SOURCES ${LIBFUZZER_SOURCES} - CFLAGS ${LIBFUZZER_CFLAGS}) + ADDITIONAL_HEADERS ${LIBFUZZER_HEADERS} + CFLAGS ${LIBFUZZER_CFLAGS} + DEPS ${LIBFUZZER_DEPS}) add_compiler_rt_object_libraries(RTfuzzer_main OS ${FUZZER_SUPPORTED_OS} ARCHS ${FUZZER_SUPPORTED_ARCH} SOURCES FuzzerMain.cpp - CFLAGS ${LIBFUZZER_CFLAGS}) + CFLAGS ${LIBFUZZER_CFLAGS} + DEPS ${LIBFUZZER_DEPS}) add_compiler_rt_runtime(clang_rt.fuzzer STATIC @@ -75,6 +106,40 @@ add_compiler_rt_runtime(clang_rt.fuzzer_no_main CFLAGS ${LIBFUZZER_CFLAGS} PARENT_TARGET fuzzer) +if(OS_NAME MATCHES "Linux|Fuchsia" AND COMPILER_RT_LIBCXX_PATH) + macro(partially_link_libcxx name dir arch) + set(cxx_${arch}_merge_dir "${CMAKE_CURRENT_BINARY_DIR}/cxx_${arch}_merge.dir") + file(MAKE_DIRECTORY ${cxx_${arch}_merge_dir}) + add_custom_command(TARGET clang_rt.${name}-${arch} POST_BUILD + COMMAND ${CMAKE_LINKER} --whole-archive "$<TARGET_LINKER_FILE:clang_rt.${name}-${arch}>" --no-whole-archive ${dir}/lib/libc++.a -r -o ${name}.o + COMMAND ${CMAKE_OBJCOPY} --localize-hidden ${name}.o + COMMAND ${CMAKE_COMMAND} -E remove "$<TARGET_LINKER_FILE:clang_rt.${name}-${arch}>" + COMMAND ${CMAKE_AR} qcs "$<TARGET_LINKER_FILE:clang_rt.${name}-${arch}>" ${name}.o + WORKING_DIRECTORY ${cxx_${arch}_merge_dir} + ) + endmacro() + + foreach(arch ${FUZZER_SUPPORTED_ARCH}) + get_target_flags_for_arch(${arch} TARGET_CFLAGS) + set(LIBCXX_${arch}_PREFIX ${CMAKE_CURRENT_BINARY_DIR}/libcxx_fuzzer_${arch}) + add_custom_libcxx(libcxx_fuzzer_${arch} ${LIBCXX_${arch}_PREFIX} + CFLAGS ${TARGET_CFLAGS} + -D_LIBCPP_ABI_VERSION=Fuzzer + -D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS=1 + -fvisibility=hidden + CMAKE_ARGS -DCMAKE_CXX_COMPILER_WORKS=ON + -DLIBCXX_ENABLE_EXCEPTIONS=OFF + -DLIBCXX_ENABLE_SHARED=OFF + -DLIBCXX_CXX_ABI=none) + target_compile_options(RTfuzzer.${arch} PRIVATE -isystem ${LIBCXX_${arch}_PREFIX}/include/c++/v1) + add_dependencies(RTfuzzer.${arch} libcxx_fuzzer_${arch}-build) + target_compile_options(RTfuzzer_main.${arch} PRIVATE -isystem ${LIBCXX_${arch}_PREFIX}/include/c++/v1) + add_dependencies(RTfuzzer_main.${arch} libcxx_fuzzer_${arch}-build) + partially_link_libcxx(fuzzer_no_main ${LIBCXX_${arch}_PREFIX} ${arch}) + partially_link_libcxx(fuzzer ${LIBCXX_${arch}_PREFIX} ${arch}) + endforeach() +endif() + if(COMPILER_RT_INCLUDE_TESTS) add_subdirectory(tests) endif() diff --git a/lib/fuzzer/FuzzerClangCounters.cpp b/lib/fuzzer/FuzzerClangCounters.cpp deleted file mode 100644 index f69e922cf004..000000000000 --- a/lib/fuzzer/FuzzerClangCounters.cpp +++ /dev/null @@ -1,49 +0,0 @@ -//===- FuzzerExtraCounters.cpp - Extra coverage counters ------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// Coverage counters from Clang's SourceBasedCodeCoverage. -//===----------------------------------------------------------------------===// - -// Support for SourceBasedCodeCoverage is experimental: -// * Works only for the main binary, not DSOs yet. -// * Works only on Linux. -// * Does not implement print_pcs/print_coverage yet. -// * Is not fully evaluated for performance and sensitivity. -// We expect large performance drop due to 64-bit counters, -// and *maybe* better sensitivity due to more fine-grained counters. -// Preliminary comparison on a single benchmark (RE2) shows -// a bit worse sensitivity though. - -#include "FuzzerDefs.h" - -#if LIBFUZZER_LINUX -__attribute__((weak)) extern uint64_t __start___llvm_prf_cnts; -__attribute__((weak)) extern uint64_t __stop___llvm_prf_cnts; -namespace fuzzer { -uint64_t *ClangCountersBegin() { return &__start___llvm_prf_cnts; } -uint64_t *ClangCountersEnd() { return &__stop___llvm_prf_cnts; } -} // namespace fuzzer -#else -// TODO: Implement on Mac (if the data shows it's worth it). -//__attribute__((visibility("hidden"))) -//extern uint64_t CountersStart __asm("section$start$__DATA$__llvm_prf_cnts"); -//__attribute__((visibility("hidden"))) -//extern uint64_t CountersEnd __asm("section$end$__DATA$__llvm_prf_cnts"); -namespace fuzzer { -uint64_t *ClangCountersBegin() { return nullptr; } -uint64_t *ClangCountersEnd() { return nullptr; } -} // namespace fuzzer -#endif - -namespace fuzzer { -ATTRIBUTE_NO_SANITIZE_ALL -void ClearClangCounters() { // hand-written memset, don't asan-ify. - for (auto P = ClangCountersBegin(); P < ClangCountersEnd(); P++) - *P = 0; -} -} diff --git a/lib/fuzzer/FuzzerCommand.h b/lib/fuzzer/FuzzerCommand.h index c5500ed21fc1..255f571ecf31 100644 --- a/lib/fuzzer/FuzzerCommand.h +++ b/lib/fuzzer/FuzzerCommand.h @@ -29,8 +29,7 @@ public: // is immutable, meaning this flag effectively marks the end of the mutable // argument list. static inline const char *ignoreRemainingArgs() { - static const char *kIgnoreRemaining = "-ignore_remaining_args=1"; - return kIgnoreRemaining; + return "-ignore_remaining_args=1"; } Command() : CombinedOutAndErr(false) {} diff --git a/lib/fuzzer/FuzzerCorpus.h b/lib/fuzzer/FuzzerCorpus.h index 2da929835f45..8ad14656cffc 100644 --- a/lib/fuzzer/FuzzerCorpus.h +++ b/lib/fuzzer/FuzzerCorpus.h @@ -12,6 +12,7 @@ #ifndef LLVM_FUZZER_CORPUS #define LLVM_FUZZER_CORPUS +#include "FuzzerDataFlowTrace.h" #include "FuzzerDefs.h" #include "FuzzerIO.h" #include "FuzzerRandom.h" @@ -35,8 +36,9 @@ struct InputInfo { size_t NumSuccessfullMutations = 0; bool MayDeleteFile = false; bool Reduced = false; + bool HasFocusFunction = false; Vector<uint32_t> UniqFeatureSet; - float FeatureFrequencyScore = 1.0; + Vector<uint8_t> DataFlowTraceForFocusFunction; }; class InputCorpus { @@ -45,7 +47,6 @@ class InputCorpus { InputCorpus(const std::string &OutputCorpus) : OutputCorpus(OutputCorpus) { memset(InputSizesPerFeature, 0, sizeof(InputSizesPerFeature)); memset(SmallestElementPerFeature, 0, sizeof(SmallestElementPerFeature)); - memset(FeatureFrequency, 0, sizeof(FeatureFrequency)); } ~InputCorpus() { for (auto II : Inputs) @@ -70,10 +71,24 @@ class InputCorpus { Res = std::max(Res, II->U.size()); return Res; } + + size_t NumInputsThatTouchFocusFunction() { + return std::count_if(Inputs.begin(), Inputs.end(), [](const InputInfo *II) { + return II->HasFocusFunction; + }); + } + + size_t NumInputsWithDataFlowTrace() { + return std::count_if(Inputs.begin(), Inputs.end(), [](const InputInfo *II) { + return !II->DataFlowTraceForFocusFunction.empty(); + }); + } + bool empty() const { return Inputs.empty(); } const Unit &operator[] (size_t Idx) const { return Inputs[Idx]->U; } void AddToCorpus(const Unit &U, size_t NumFeatures, bool MayDeleteFile, - const Vector<uint32_t> &FeatureSet) { + bool HasFocusFunction, const Vector<uint32_t> &FeatureSet, + const DataFlowTrace &DFT, const InputInfo *BaseII) { assert(!U.empty()); if (FeatureDebug) Printf("ADD_TO_CORPUS %zd NF %zd\n", Inputs.size(), NumFeatures); @@ -83,9 +98,19 @@ class InputCorpus { II.NumFeatures = NumFeatures; II.MayDeleteFile = MayDeleteFile; II.UniqFeatureSet = FeatureSet; + II.HasFocusFunction = HasFocusFunction; std::sort(II.UniqFeatureSet.begin(), II.UniqFeatureSet.end()); ComputeSHA1(U.data(), U.size(), II.Sha1); - Hashes.insert(Sha1ToString(II.Sha1)); + auto Sha1Str = Sha1ToString(II.Sha1); + Hashes.insert(Sha1Str); + if (HasFocusFunction) + if (auto V = DFT.Get(Sha1Str)) + II.DataFlowTraceForFocusFunction = *V; + // This is a gross heuristic. + // Ideally, when we add an element to a corpus we need to know its DFT. + // But if we don't, we'll use the DFT of its base input. + if (II.DataFlowTraceForFocusFunction.empty() && BaseII) + II.DataFlowTraceForFocusFunction = BaseII->DataFlowTraceForFocusFunction; UpdateCorpusDistribution(); PrintCorpus(); // ValidateFeatureSet(); @@ -157,9 +182,9 @@ class InputCorpus { void PrintStats() { for (size_t i = 0; i < Inputs.size(); i++) { const auto &II = *Inputs[i]; - Printf(" [%zd %s]\tsz: %zd\truns: %zd\tsucc: %zd\n", i, + Printf(" [% 3zd %s] sz: % 5zd runs: % 5zd succ: % 5zd focus: %d\n", i, Sha1ToString(II.Sha1).c_str(), II.U.size(), - II.NumExecutedMutations, II.NumSuccessfullMutations); + II.NumExecutedMutations, II.NumSuccessfullMutations, II.HasFocusFunction); } } @@ -213,18 +238,10 @@ class InputCorpus { return false; } - void UpdateFeatureFrequency(size_t Idx) { - FeatureFrequency[Idx % kFeatureSetSize]++; - } - float GetFeatureFrequency(size_t Idx) const { - return FeatureFrequency[Idx % kFeatureSetSize]; - } - void UpdateFeatureFrequencyScore(InputInfo *II) { - const float kMin = 0.01, kMax = 100.; - II->FeatureFrequencyScore = kMin; - for (auto Idx : II->UniqFeatureSet) - II->FeatureFrequencyScore += 1. / (GetFeatureFrequency(Idx) + 1.); - II->FeatureFrequencyScore = Min(II->FeatureFrequencyScore, kMax); + bool IsFeatureNew(size_t Idx, uint32_t NewSize, bool Shrink) { + assert(NewSize); + uint32_t OldSize = GetFeature(Idx % kFeatureSetSize); + return OldSize == 0 || (Shrink && OldSize > NewSize); } size_t NumFeatures() const { return NumAddedFeatures; } @@ -264,14 +281,11 @@ private: std::iota(Intervals.begin(), Intervals.end(), 0); for (size_t i = 0; i < N; i++) Weights[i] = Inputs[i]->NumFeatures - ? (i + 1) * Inputs[i]->FeatureFrequencyScore + ? (i + 1) * (Inputs[i]->HasFocusFunction ? 1000 : 1) : 0.; if (FeatureDebug) { for (size_t i = 0; i < N; i++) Printf("%zd ", Inputs[i]->NumFeatures); - Printf("NUM\n"); - for (size_t i = 0; i < N; i++) - Printf("%f ", Inputs[i]->FeatureFrequencyScore); Printf("SCORE\n"); for (size_t i = 0; i < N; i++) Printf("%f ", Weights[i]); @@ -292,7 +306,6 @@ private: size_t NumUpdatedFeatures = 0; uint32_t InputSizesPerFeature[kFeatureSetSize]; uint32_t SmallestElementPerFeature[kFeatureSetSize]; - float FeatureFrequency[kFeatureSetSize]; std::string OutputCorpus; }; diff --git a/lib/fuzzer/FuzzerDataFlowTrace.cpp b/lib/fuzzer/FuzzerDataFlowTrace.cpp new file mode 100644 index 000000000000..764f3e49fd2d --- /dev/null +++ b/lib/fuzzer/FuzzerDataFlowTrace.cpp @@ -0,0 +1,91 @@ +//===- FuzzerDataFlowTrace.cpp - DataFlowTrace ---*- C++ -* ===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// fuzzer::DataFlowTrace +//===----------------------------------------------------------------------===// + +#include "FuzzerDataFlowTrace.h" +#include "FuzzerIO.h" + +#include <cstdlib> +#include <fstream> +#include <string> +#include <vector> + +namespace fuzzer { + +void DataFlowTrace::Init(const std::string &DirPath, + const std::string &FocusFunction) { + if (DirPath.empty()) return; + const char *kFunctionsTxt = "functions.txt"; + Printf("INFO: DataFlowTrace: reading from '%s'\n", DirPath.c_str()); + Vector<SizedFile> Files; + GetSizedFilesFromDir(DirPath, &Files); + std::string L; + + // Read functions.txt + std::ifstream IF(DirPlusFile(DirPath, kFunctionsTxt)); + size_t FocusFuncIdx = SIZE_MAX; + size_t NumFunctions = 0; + while (std::getline(IF, L, '\n')) { + NumFunctions++; + if (FocusFunction == L) + FocusFuncIdx = NumFunctions - 1; + } + if (!NumFunctions || FocusFuncIdx == SIZE_MAX || Files.size() <= 1) + return; + // Read traces. + size_t NumTraceFiles = 0; + size_t NumTracesWithFocusFunction = 0; + for (auto &SF : Files) { + auto Name = Basename(SF.File); + if (Name == kFunctionsTxt) continue; + auto ParseError = [&](const char *Err) { + Printf("DataFlowTrace: parse error: %s\n File: %s\n Line: %s\n", Err, + Name.c_str(), L.c_str()); + }; + NumTraceFiles++; + // Printf("=== %s\n", Name.c_str()); + std::ifstream IF(SF.File); + while (std::getline(IF, L, '\n')) { + size_t SpacePos = L.find(' '); + if (SpacePos == std::string::npos) + return ParseError("no space in the trace line"); + if (L.empty() || L[0] != 'F') + return ParseError("the trace line doesn't start with 'F'"); + size_t N = std::atol(L.c_str() + 1); + if (N >= NumFunctions) + return ParseError("N is greater than the number of functions"); + if (N == FocusFuncIdx) { + NumTracesWithFocusFunction++; + const char *Beg = L.c_str() + SpacePos + 1; + const char *End = L.c_str() + L.size(); + assert(Beg < End); + size_t Len = End - Beg; + Vector<uint8_t> V(Len); + for (size_t I = 0; I < Len; I++) { + if (Beg[I] != '0' && Beg[I] != '1') + ParseError("the trace should contain only 0 or 1"); + V[I] = Beg[I] == '1'; + } + Traces[Name] = V; + // Print just a few small traces. + if (NumTracesWithFocusFunction <= 3 && Len <= 16) + Printf("%s => |%s|\n", Name.c_str(), L.c_str() + SpacePos + 1); + break; // No need to parse the following lines. + } + } + } + assert(NumTraceFiles == Files.size() - 1); + Printf("INFO: DataFlowTrace: %zd trace files, %zd functions, " + "%zd traces with focus function\n", + NumTraceFiles, NumFunctions, NumTracesWithFocusFunction); +} + +} // namespace fuzzer + diff --git a/lib/fuzzer/FuzzerDataFlowTrace.h b/lib/fuzzer/FuzzerDataFlowTrace.h new file mode 100644 index 000000000000..ad4faeab7b2f --- /dev/null +++ b/lib/fuzzer/FuzzerDataFlowTrace.h @@ -0,0 +1,56 @@ +//===- FuzzerDataFlowTrace.h - Internal header for the Fuzzer ---*- C++ -* ===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// fuzzer::DataFlowTrace; reads and handles a data-flow trace. +// +// A data flow trace is generated by e.g. dataflow/DataFlow.cpp +// and is stored on disk in a separate directory. +// +// The trace dir contains a file 'functions.txt' which lists function names, +// oner per line, e.g. +// ==> functions.txt <== +// Func2 +// LLVMFuzzerTestOneInput +// Func1 +// +// All other files in the dir are the traces, see dataflow/DataFlow.cpp. +// The name of the file is sha1 of the input used to generate the trace. +// +// Current status: +// the data is parsed and the summary is printed, but the data is not yet +// used in any other way. +//===----------------------------------------------------------------------===// + +#ifndef LLVM_FUZZER_DATA_FLOW_TRACE +#define LLVM_FUZZER_DATA_FLOW_TRACE + +#include "FuzzerDefs.h" + +#include <unordered_map> +#include <vector> +#include <string> + +namespace fuzzer { +class DataFlowTrace { + public: + void Init(const std::string &DirPath, const std::string &FocusFunction); + void Clear() { Traces.clear(); } + const Vector<uint8_t> *Get(const std::string &InputSha1) const { + auto It = Traces.find(InputSha1); + if (It != Traces.end()) + return &It->second; + return nullptr; + } + + private: + // Input's sha1 => DFT for the FocusFunction. + std::unordered_map<std::string, Vector<uint8_t> > Traces; +}; +} // namespace fuzzer + +#endif // LLVM_FUZZER_DATA_FLOW_TRACE diff --git a/lib/fuzzer/FuzzerDefs.h b/lib/fuzzer/FuzzerDefs.h index 5942efc47a4a..a35c7a181b74 100644 --- a/lib/fuzzer/FuzzerDefs.h +++ b/lib/fuzzer/FuzzerDefs.h @@ -27,30 +27,56 @@ #define LIBFUZZER_FUCHSIA 0 #define LIBFUZZER_LINUX 1 #define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 0 #define LIBFUZZER_WINDOWS 0 #elif __APPLE__ #define LIBFUZZER_APPLE 1 #define LIBFUZZER_FUCHSIA 0 #define LIBFUZZER_LINUX 0 #define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 0 #define LIBFUZZER_WINDOWS 0 #elif __NetBSD__ #define LIBFUZZER_APPLE 0 #define LIBFUZZER_FUCHSIA 0 #define LIBFUZZER_LINUX 0 #define LIBFUZZER_NETBSD 1 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 0 +#define LIBFUZZER_WINDOWS 0 +#elif __FreeBSD__ +#define LIBFUZZER_APPLE 0 +#define LIBFUZZER_FUCHSIA 0 +#define LIBFUZZER_LINUX 0 +#define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 1 +#define LIBFUZZER_OPENBSD 0 +#define LIBFUZZER_WINDOWS 0 +#elif __OpenBSD__ +#define LIBFUZZER_APPLE 0 +#define LIBFUZZER_FUCHSIA 0 +#define LIBFUZZER_LINUX 0 +#define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 1 #define LIBFUZZER_WINDOWS 0 #elif _WIN32 #define LIBFUZZER_APPLE 0 #define LIBFUZZER_FUCHSIA 0 #define LIBFUZZER_LINUX 0 #define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 0 #define LIBFUZZER_WINDOWS 1 #elif __Fuchsia__ #define LIBFUZZER_APPLE 0 #define LIBFUZZER_FUCHSIA 1 #define LIBFUZZER_LINUX 0 #define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 0 #define LIBFUZZER_WINDOWS 0 #else #error "Support for your platform has not been implemented" @@ -60,7 +86,9 @@ # define __has_attribute(x) 0 #endif -#define LIBFUZZER_POSIX (LIBFUZZER_APPLE || LIBFUZZER_LINUX || LIBFUZZER_NETBSD) +#define LIBFUZZER_POSIX \ + (LIBFUZZER_APPLE || LIBFUZZER_LINUX || LIBFUZZER_NETBSD || \ + LIBFUZZER_FREEBSD || LIBFUZZER_OPENBSD) #ifdef __x86_64 # if __has_attribute(target) @@ -127,6 +155,11 @@ extern ExternalFunctions *EF; template<typename T> class fuzzer_allocator: public std::allocator<T> { public: + fuzzer_allocator() = default; + + template<class U> + fuzzer_allocator(const fuzzer_allocator<U>&) {} + template<class Other> struct rebind { typedef fuzzer_allocator<Other> other; }; }; @@ -143,12 +176,6 @@ typedef int (*UserCallback)(const uint8_t *Data, size_t Size); int FuzzerDriver(int *argc, char ***argv, UserCallback Callback); -struct ScopedDoingMyOwnMemOrStr { - ScopedDoingMyOwnMemOrStr() { DoingMyOwnMemOrStr++; } - ~ScopedDoingMyOwnMemOrStr() { DoingMyOwnMemOrStr--; } - static int DoingMyOwnMemOrStr; -}; - inline uint8_t Bswap(uint8_t x) { return x; } inline uint16_t Bswap(uint16_t x) { return __builtin_bswap16(x); } inline uint32_t Bswap(uint32_t x) { return __builtin_bswap32(x); } @@ -158,9 +185,7 @@ uint8_t *ExtraCountersBegin(); uint8_t *ExtraCountersEnd(); void ClearExtraCounters(); -uint64_t *ClangCountersBegin(); -uint64_t *ClangCountersEnd(); -void ClearClangCounters(); +extern bool RunningUserCallback; } // namespace fuzzer diff --git a/lib/fuzzer/FuzzerDictionary.h b/lib/fuzzer/FuzzerDictionary.h index daf7d003ea91..0d9d91bcd2f1 100644 --- a/lib/fuzzer/FuzzerDictionary.h +++ b/lib/fuzzer/FuzzerDictionary.h @@ -33,17 +33,9 @@ public: } bool operator==(const FixedWord<kMaxSize> &w) const { - ScopedDoingMyOwnMemOrStr scoped_doing_my_own_mem_os_str; return Size == w.Size && 0 == memcmp(Data, w.Data, Size); } - bool operator<(const FixedWord<kMaxSize> &w) const { - ScopedDoingMyOwnMemOrStr scoped_doing_my_own_mem_os_str; - if (Size != w.Size) - return Size < w.Size; - return memcmp(Data, w.Data, Size) < 0; - } - static size_t GetMaxSize() { return kMaxSize; } const uint8_t *data() const { return Data; } uint8_t size() const { return Size; } @@ -115,11 +107,11 @@ private: }; // Parses one dictionary entry. -// If successfull, write the enty to Unit and returns true, +// If successful, write the enty to Unit and returns true, // otherwise returns false. bool ParseOneDictionaryEntry(const std::string &Str, Unit *U); // Parses the dictionary file, fills Units, returns true iff all lines -// were parsed succesfully. +// were parsed successfully. bool ParseDictionaryFile(const std::string &Text, Vector<Unit> *Units); } // namespace fuzzer diff --git a/lib/fuzzer/FuzzerDriver.cpp b/lib/fuzzer/FuzzerDriver.cpp index f6b642daeda7..783474a39e16 100644 --- a/lib/fuzzer/FuzzerDriver.cpp +++ b/lib/fuzzer/FuzzerDriver.cpp @@ -537,6 +537,8 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) { EF = new ExternalFunctions(); if (EF->LLVMFuzzerInitialize) EF->LLVMFuzzerInitialize(argc, argv); + if (EF->__msan_scoped_disable_interceptor_checks) + EF->__msan_scoped_disable_interceptor_checks(); const Vector<std::string> Args(*argv, *argv + *argc); assert(!Args.empty()); ProgName = new std::string(Args[0]); @@ -567,7 +569,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) { FuzzingOptions Options; Options.Verbosity = Flags.verbosity; Options.MaxLen = Flags.max_len; - Options.ExperimentalLenControl = Flags.experimental_len_control; + Options.LenControl = Flags.len_control; Options.UnitTimeoutSec = Flags.timeout; Options.ErrorExitCode = Flags.error_exitcode; Options.TimeoutExitCode = Flags.timeout_exitcode; @@ -613,15 +615,22 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) { Options.PrintNewCovPcs = Flags.print_pcs; Options.PrintNewCovFuncs = Flags.print_funcs; Options.PrintFinalStats = Flags.print_final_stats; + Options.PrintMutationStats = Flags.print_mutation_stats; Options.PrintCorpusStats = Flags.print_corpus_stats; Options.PrintCoverage = Flags.print_coverage; + Options.PrintUnstableStats = Flags.print_unstable_stats; + if (Flags.handle_unstable == TracePC::MinUnstable || + Flags.handle_unstable == TracePC::ZeroUnstable) + Options.HandleUnstable = Flags.handle_unstable; Options.DumpCoverage = Flags.dump_coverage; - Options.UseClangCoverage = Flags.use_clang_coverage; - Options.UseFeatureFrequency = Flags.use_feature_frequency; if (Flags.exit_on_src_pos) Options.ExitOnSrcPos = Flags.exit_on_src_pos; if (Flags.exit_on_item) Options.ExitOnItem = Flags.exit_on_item; + if (Flags.focus_function) + Options.FocusFunction = Flags.focus_function; + if (Flags.data_flow_trace) + Options.DataFlowTrace = Flags.data_flow_trace; unsigned Seed = Flags.seed; // Initialize Seed. @@ -665,6 +674,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) { if (Flags.cleanse_crash) return CleanseCrashInput(Args, Options); +#if 0 // deprecated, to be removed. if (auto Name = Flags.run_equivalence_server) { SMR.Destroy(Name); if (!SMR.Create(Name)) { @@ -690,6 +700,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) { } Printf("INFO: EQUIVALENCE CLIENT UP\n"); } +#endif if (DoPlainRun) { Options.SaveArtifacts = false; @@ -747,7 +758,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) { Printf("Dictionary analysis failed\n"); exit(1); } - Printf("Dictionary analysis suceeded\n"); + Printf("Dictionary analysis succeeded\n"); exit(0); } diff --git a/lib/fuzzer/FuzzerExtFunctions.def b/lib/fuzzer/FuzzerExtFunctions.def index 25a655bfd71d..8bfffdde56d4 100644 --- a/lib/fuzzer/FuzzerExtFunctions.def +++ b/lib/fuzzer/FuzzerExtFunctions.def @@ -29,6 +29,7 @@ EXT_FUNC(LLVMFuzzerCustomCrossOver, size_t, EXT_FUNC(__lsan_enable, void, (), false); EXT_FUNC(__lsan_disable, void, (), false); EXT_FUNC(__lsan_do_recoverable_leak_check, int, (), false); +EXT_FUNC(__sanitizer_acquire_crash_state, bool, (), true); EXT_FUNC(__sanitizer_install_malloc_and_free_hooks, int, (void (*malloc_hook)(const volatile void *, size_t), void (*free_hook)(const volatile void *)), @@ -45,3 +46,6 @@ EXT_FUNC(__sanitizer_set_death_callback, void, (void (*)(void)), true); EXT_FUNC(__sanitizer_set_report_fd, void, (void*), false); EXT_FUNC(__sanitizer_dump_coverage, void, (const uintptr_t *, uintptr_t), false); +EXT_FUNC(__msan_scoped_disable_interceptor_checks, void, (), false); +EXT_FUNC(__msan_scoped_enable_interceptor_checks, void, (), false); +EXT_FUNC(__msan_unpoison, void, (const volatile void *, size_t size), false); diff --git a/lib/fuzzer/FuzzerExtFunctionsWeak.cpp b/lib/fuzzer/FuzzerExtFunctionsWeak.cpp index 5a90723986af..a4e56fc27b8d 100644 --- a/lib/fuzzer/FuzzerExtFunctionsWeak.cpp +++ b/lib/fuzzer/FuzzerExtFunctionsWeak.cpp @@ -13,7 +13,8 @@ // to clients right now. //===----------------------------------------------------------------------===// #include "FuzzerDefs.h" -#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FUCHSIA +#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FUCHSIA || \ + LIBFUZZER_FREEBSD || LIBFUZZER_OPENBSD #include "FuzzerExtFunctions.h" #include "FuzzerIO.h" @@ -51,4 +52,4 @@ ExternalFunctions::ExternalFunctions() { } // namespace fuzzer -#endif // LIBFUZZER_LINUX || LIBFUZZER_NETBSD +#endif diff --git a/lib/fuzzer/FuzzerExtraCounters.cpp b/lib/fuzzer/FuzzerExtraCounters.cpp index 0e7a7761bf80..c99cd89be293 100644 --- a/lib/fuzzer/FuzzerExtraCounters.cpp +++ b/lib/fuzzer/FuzzerExtraCounters.cpp @@ -11,7 +11,8 @@ #include "FuzzerDefs.h" -#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD +#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FREEBSD || \ + LIBFUZZER_OPENBSD __attribute__((weak)) extern uint8_t __start___libfuzzer_extra_counters; __attribute__((weak)) extern uint8_t __stop___libfuzzer_extra_counters; diff --git a/lib/fuzzer/FuzzerFlags.def b/lib/fuzzer/FuzzerFlags.def index a32102a7da07..ba04bc25fd45 100644 --- a/lib/fuzzer/FuzzerFlags.def +++ b/lib/fuzzer/FuzzerFlags.def @@ -17,7 +17,10 @@ FUZZER_FLAG_INT(runs, -1, FUZZER_FLAG_INT(max_len, 0, "Maximum length of the test input. " "If 0, libFuzzer tries to guess a good value based on the corpus " "and reports it. ") -FUZZER_FLAG_INT(experimental_len_control, 0, "experimental flag") +FUZZER_FLAG_INT(len_control, 1000, "Try generating small inputs first, " + "then try larger inputs over time. Specifies the rate at which the length " + "limit is increased (smaller == faster). If 0, immediately try inputs with " + "size up to max_len.") FUZZER_FLAG_INT(cross_over, 1, "If 1, cross over inputs.") FUZZER_FLAG_INT(mutate_depth, 5, "Apply this number of consecutive mutations to each input.") @@ -42,7 +45,7 @@ FUZZER_FLAG_INT(merge, 0, "If 1, the 2-nd, 3-rd, etc corpora will be " "This flag can be used to minimize a corpus.") FUZZER_FLAG_STRING(merge_inner, "internal flag") FUZZER_FLAG_STRING(merge_control_file, - "Specify a control file used for the merge proccess. " + "Specify a control file used for the merge process. " "If a merge process gets killed it tries to leave this file " "in a state suitable for resuming the merge. " "By default a temporary file will be used.") @@ -107,6 +110,15 @@ FUZZER_FLAG_INT(print_coverage, 0, "If 1, print coverage information as text" FUZZER_FLAG_INT(dump_coverage, 0, "Deprecated." " If 1, dump coverage information as a" " .sancov file at exit.") +FUZZER_FLAG_INT(handle_unstable, 0, "Experimental." + " Executes every input 3 times in total if a unique feature" + " is found during the first execution." + " If 1, we only use the minimum hit count from the 3 runs" + " to determine whether an input is interesting." + " If 2, we disregard edges that are found unstable for" + " feature collection.") +FUZZER_FLAG_INT(print_unstable_stats, 0, "Experimental." + " If 1, print unstable statistics at exit.") FUZZER_FLAG_INT(handle_segv, 1, "If 1, try to intercept SIGSEGV.") FUZZER_FLAG_INT(handle_bus, 1, "If 1, try to intercept SIGBUS.") FUZZER_FLAG_INT(handle_abrt, 1, "If 1, try to intercept SIGABRT.") @@ -142,9 +154,12 @@ FUZZER_FLAG_STRING(exit_on_item, "Exit if an item with a given sha1 sum" FUZZER_FLAG_INT(ignore_remaining_args, 0, "If 1, ignore all arguments passed " "after this one. Useful for fuzzers that need to do their own " "argument parsing.") +FUZZER_FLAG_STRING(focus_function, "Experimental. " + "Fuzzing will focus on inputs that trigger calls to this function") -FUZZER_FLAG_STRING(run_equivalence_server, "Experimental") -FUZZER_FLAG_STRING(use_equivalence_server, "Experimental") +FUZZER_DEPRECATED_FLAG(run_equivalence_server) +FUZZER_DEPRECATED_FLAG(use_equivalence_server) FUZZER_FLAG_INT(analyze_dict, 0, "Experimental") -FUZZER_FLAG_INT(use_clang_coverage, 0, "Experimental") -FUZZER_FLAG_INT(use_feature_frequency, 0, "Experimental/internal") +FUZZER_DEPRECATED_FLAG(use_clang_coverage) +FUZZER_FLAG_STRING(data_flow_trace, "Experimental: use the data flow trace") +FUZZER_FLAG_INT(print_mutation_stats, 0, "Experimental") diff --git a/lib/fuzzer/FuzzerIO.cpp b/lib/fuzzer/FuzzerIO.cpp index dac5ec658f1c..f3ead0ec5357 100644 --- a/lib/fuzzer/FuzzerIO.cpp +++ b/lib/fuzzer/FuzzerIO.cpp @@ -100,6 +100,14 @@ std::string DirPlusFile(const std::string &DirPath, return DirPath + GetSeparator() + FileName; } +std::string Basename(const std::string &Path, char Separator) { + size_t Pos = Path.rfind(Separator); + if (Pos == std::string::npos) + return Path; + assert(Pos < Path.size()); + return Path.substr(Pos + 1); +} + void DupAndCloseStderr() { int OutputFd = DuplicateFile(2); if (OutputFd > 0) { diff --git a/lib/fuzzer/FuzzerIO.h b/lib/fuzzer/FuzzerIO.h index ea9f0d5a6703..6d7757435b7b 100644 --- a/lib/fuzzer/FuzzerIO.h +++ b/lib/fuzzer/FuzzerIO.h @@ -67,6 +67,8 @@ struct SizedFile { void GetSizedFilesFromDir(const std::string &Dir, Vector<SizedFile> *V); char GetSeparator(); +// Similar to the basename utility: returns the file name w/o the dir prefix. +std::string Basename(const std::string &Path, char Separator = GetSeparator()); FILE* OpenFile(int Fd, const char *Mode); diff --git a/lib/fuzzer/FuzzerIOPosix.cpp b/lib/fuzzer/FuzzerIOPosix.cpp index 2257751c662d..17e884d3c4c3 100644 --- a/lib/fuzzer/FuzzerIOPosix.cpp +++ b/lib/fuzzer/FuzzerIOPosix.cpp @@ -54,7 +54,7 @@ void ListFilesInDirRecursive(const std::string &Dir, long *Epoch, DIR *D = opendir(Dir.c_str()); if (!D) { - Printf("No such directory: %s; exiting\n", Dir.c_str()); + Printf("%s: %s; exiting\n", strerror(errno), Dir.c_str()); exit(1); } while (auto E = readdir(D)) { diff --git a/lib/fuzzer/FuzzerInterface.h b/lib/fuzzer/FuzzerInterface.h index c2c0a39843c0..0f7effb2ab6a 100644 --- a/lib/fuzzer/FuzzerInterface.h +++ b/lib/fuzzer/FuzzerInterface.h @@ -30,35 +30,39 @@ extern "C" { // Executes the code under test with [Data, Data+Size) as the input. // libFuzzer will invoke this function *many* times with different inputs. // Must return 0. -int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); +__attribute__((visibility("default"))) int +LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); // Optional user-provided initialization function. // If provided, this function will be called by libFuzzer once at startup. // It may read and modify argc/argv. // Must return 0. -int LLVMFuzzerInitialize(int *argc, char ***argv); +__attribute__((visibility("default"))) int LLVMFuzzerInitialize(int *argc, + char ***argv); // Optional user-provided custom mutator. // Mutates raw data in [Data, Data+Size) inplace. // Returns the new size, which is not greater than MaxSize. // Given the same Seed produces the same mutation. -size_t LLVMFuzzerCustomMutator(uint8_t *Data, size_t Size, size_t MaxSize, - unsigned int Seed); +__attribute__((visibility("default"))) size_t +LLVMFuzzerCustomMutator(uint8_t *Data, size_t Size, size_t MaxSize, + unsigned int Seed); // Optional user-provided custom cross-over function. // Combines pieces of Data1 & Data2 together into Out. // Returns the new size, which is not greater than MaxOutSize. // Should produce the same mutation given the same Seed. -size_t LLVMFuzzerCustomCrossOver(const uint8_t *Data1, size_t Size1, - const uint8_t *Data2, size_t Size2, - uint8_t *Out, size_t MaxOutSize, - unsigned int Seed); +__attribute__((visibility("default"))) size_t +LLVMFuzzerCustomCrossOver(const uint8_t *Data1, size_t Size1, + const uint8_t *Data2, size_t Size2, uint8_t *Out, + size_t MaxOutSize, unsigned int Seed); // Experimental, may go away in future. // libFuzzer-provided function to be used inside LLVMFuzzerCustomMutator. // Mutates raw data in [Data, Data+Size) inplace. // Returns the new size, which is not greater than MaxSize. -size_t LLVMFuzzerMutate(uint8_t *Data, size_t Size, size_t MaxSize); +__attribute__((visibility("default"))) size_t +LLVMFuzzerMutate(uint8_t *Data, size_t Size, size_t MaxSize); #ifdef __cplusplus } // extern "C" diff --git a/lib/fuzzer/FuzzerInternal.h b/lib/fuzzer/FuzzerInternal.h index 2b2638f1f8f2..bfc898248adb 100644 --- a/lib/fuzzer/FuzzerInternal.h +++ b/lib/fuzzer/FuzzerInternal.h @@ -12,6 +12,7 @@ #ifndef LLVM_FUZZER_INTERNAL_H #define LLVM_FUZZER_INTERNAL_H +#include "FuzzerDataFlowTrace.h" #include "FuzzerDefs.h" #include "FuzzerExtFunctions.h" #include "FuzzerInterface.h" @@ -66,6 +67,7 @@ public: static void StaticGracefulExitCallback(); void ExecuteCallback(const uint8_t *Data, size_t Size); + void CheckForUnstableCounters(const uint8_t *Data, size_t Size); bool RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile = false, InputInfo *II = nullptr, bool *FoundUniqFeatures = nullptr); @@ -116,7 +118,6 @@ private: uint8_t *CurrentUnitData = nullptr; std::atomic<size_t> CurrentUnitSize; uint8_t BaseSha1[kSHA1NumBytes]; // Checksum of the base unit. - bool RunningCB = false; bool GracefulExitRequested = false; @@ -134,6 +135,7 @@ private: InputCorpus &Corpus; MutationDispatcher &MD; FuzzingOptions Options; + DataFlowTrace DFT; system_clock::time_point ProcessStartTime = system_clock::now(); system_clock::time_point UnitStartTime, UnitStopTime; @@ -150,6 +152,28 @@ private: static thread_local bool IsMyThread; }; +struct ScopedEnableMsanInterceptorChecks { + ScopedEnableMsanInterceptorChecks() { + if (EF->__msan_scoped_enable_interceptor_checks) + EF->__msan_scoped_enable_interceptor_checks(); + } + ~ScopedEnableMsanInterceptorChecks() { + if (EF->__msan_scoped_disable_interceptor_checks) + EF->__msan_scoped_disable_interceptor_checks(); + } +}; + +struct ScopedDisableMsanInterceptorChecks { + ScopedDisableMsanInterceptorChecks() { + if (EF->__msan_scoped_disable_interceptor_checks) + EF->__msan_scoped_disable_interceptor_checks(); + } + ~ScopedDisableMsanInterceptorChecks() { + if (EF->__msan_scoped_enable_interceptor_checks) + EF->__msan_scoped_enable_interceptor_checks(); + } +}; + } // namespace fuzzer #endif // LLVM_FUZZER_INTERNAL_H diff --git a/lib/fuzzer/FuzzerLoop.cpp b/lib/fuzzer/FuzzerLoop.cpp index 5b451ca122d7..4bc88365a0b9 100644 --- a/lib/fuzzer/FuzzerLoop.cpp +++ b/lib/fuzzer/FuzzerLoop.cpp @@ -43,6 +43,8 @@ thread_local bool Fuzzer::IsMyThread; SharedMemoryRegion SMR; +bool RunningUserCallback = false; + // Only one Fuzzer per process. static Fuzzer *F; @@ -105,7 +107,7 @@ void MallocHook(const volatile void *ptr, size_t size) { return; Printf("MALLOC[%zd] %p %zd\n", N, ptr, size); if (TraceLevel >= 2 && EF) - EF->__sanitizer_print_stack_trace(); + PrintStackTrace(); } } @@ -118,7 +120,7 @@ void FreeHook(const volatile void *ptr) { return; Printf("FREE[%zd] %p\n", N, ptr); if (TraceLevel >= 2 && EF) - EF->__sanitizer_print_stack_trace(); + PrintStackTrace(); } } @@ -129,8 +131,7 @@ void Fuzzer::HandleMalloc(size_t Size) { Printf("==%d== ERROR: libFuzzer: out-of-memory (malloc(%zd))\n", GetPid(), Size); Printf(" To change the out-of-memory limit use -rss_limit_mb=<N>\n\n"); - if (EF->__sanitizer_print_stack_trace) - EF->__sanitizer_print_stack_trace(); + PrintStackTrace(); DumpCurrentUnit("oom-"); Printf("SUMMARY: libFuzzer: out-of-memory\n"); PrintFinalStats(); @@ -149,8 +150,7 @@ Fuzzer::Fuzzer(UserCallback CB, InputCorpus &Corpus, MutationDispatcher &MD, if (Options.DetectLeaks && EF->__sanitizer_install_malloc_and_free_hooks) EF->__sanitizer_install_malloc_and_free_hooks(MallocHook, FreeHook); TPC.SetUseCounters(Options.UseCounters); - TPC.SetUseValueProfile(Options.UseValueProfile); - TPC.SetUseClangCoverage(Options.UseClangCoverage); + TPC.SetUseValueProfileMask(Options.UseValueProfile); if (Options.Verbosity) TPC.PrintModuleInfo(); @@ -161,6 +161,8 @@ Fuzzer::Fuzzer(UserCallback CB, InputCorpus &Corpus, MutationDispatcher &MD, AllocateCurrentUnitData(); CurrentUnitSize = 0; memset(BaseSha1, 0, sizeof(BaseSha1)); + TPC.SetFocusFunction(Options.FocusFunction); + DFT.Init(Options.DataFlowTrace, Options.FocusFunction); } Fuzzer::~Fuzzer() {} @@ -179,6 +181,7 @@ void Fuzzer::StaticDeathCallback() { void Fuzzer::DumpCurrentUnit(const char *Prefix) { if (!CurrentUnitData) return; // Happens when running individual inputs. + ScopedDisableMsanInterceptorChecks S; MD.PrintMutationSequence(); Printf("; base unit: %s\n", Sha1ToString(BaseSha1).c_str()); size_t UnitSize = CurrentUnitSize; @@ -228,9 +231,10 @@ void Fuzzer::StaticFileSizeExceedCallback() { } void Fuzzer::CrashCallback() { + if (EF->__sanitizer_acquire_crash_state) + EF->__sanitizer_acquire_crash_state(); Printf("==%lu== ERROR: libFuzzer: deadly signal\n", GetPid()); - if (EF->__sanitizer_print_stack_trace) - EF->__sanitizer_print_stack_trace(); + PrintStackTrace(); Printf("NOTE: libFuzzer has rudimentary signal handlers.\n" " Combine libFuzzer with AddressSanitizer or similar for better " "crash reports.\n"); @@ -241,11 +245,13 @@ void Fuzzer::CrashCallback() { } void Fuzzer::ExitCallback() { - if (!RunningCB) + if (!RunningUserCallback) return; // This exit did not come from the user callback + if (EF->__sanitizer_acquire_crash_state && + !EF->__sanitizer_acquire_crash_state()) + return; Printf("==%lu== ERROR: libFuzzer: fuzz target exited\n", GetPid()); - if (EF->__sanitizer_print_stack_trace) - EF->__sanitizer_print_stack_trace(); + PrintStackTrace(); Printf("SUMMARY: libFuzzer: fuzz target exited\n"); DumpCurrentUnit("crash-"); PrintFinalStats(); @@ -273,7 +279,7 @@ void Fuzzer::AlarmCallback() { if (!InFuzzingThread()) return; #endif - if (!RunningCB) + if (!RunningUserCallback) return; // We have not started running units yet. size_t Seconds = duration_cast<seconds>(system_clock::now() - UnitStartTime).count(); @@ -282,14 +288,16 @@ void Fuzzer::AlarmCallback() { if (Options.Verbosity >= 2) Printf("AlarmCallback %zd\n", Seconds); if (Seconds >= (size_t)Options.UnitTimeoutSec) { + if (EF->__sanitizer_acquire_crash_state && + !EF->__sanitizer_acquire_crash_state()) + return; Printf("ALARM: working on the last Unit for %zd seconds\n", Seconds); Printf(" and the timeout value is %d (use -timeout=N to change)\n", Options.UnitTimeoutSec); DumpCurrentUnit("timeout-"); Printf("==%lu== ERROR: libFuzzer: timeout after %d seconds\n", GetPid(), Seconds); - if (EF->__sanitizer_print_stack_trace) - EF->__sanitizer_print_stack_trace(); + PrintStackTrace(); Printf("SUMMARY: libFuzzer: timeout\n"); PrintFinalStats(); _Exit(Options.TimeoutExitCode); // Stop right now. @@ -297,12 +305,14 @@ void Fuzzer::AlarmCallback() { } void Fuzzer::RssLimitCallback() { + if (EF->__sanitizer_acquire_crash_state && + !EF->__sanitizer_acquire_crash_state()) + return; Printf( "==%lu== ERROR: libFuzzer: out-of-memory (used: %zdMb; limit: %zdMb)\n", GetPid(), GetPeakRSSMb(), Options.RssLimitMb); Printf(" To change the out-of-memory limit use -rss_limit_mb=<N>\n\n"); - if (EF->__sanitizer_print_memory_profile) - EF->__sanitizer_print_memory_profile(95, 8); + PrintMemoryProfile(); DumpCurrentUnit("oom-"); Printf("SUMMARY: libFuzzer: out-of-memory\n"); PrintFinalStats(); @@ -328,7 +338,11 @@ void Fuzzer::PrintStats(const char *Where, const char *End, size_t Units) { else Printf("/%zdMb", N >> 20); } + if (size_t FF = Corpus.NumInputsThatTouchFocusFunction()) + Printf(" focus: %zd", FF); } + if (TmpMaxMutationLen) + Printf(" lim: %zd", TmpMaxMutationLen); if (Units) Printf(" units: %zd", Units); @@ -340,10 +354,13 @@ void Fuzzer::PrintStats(const char *Where, const char *End, size_t Units) { void Fuzzer::PrintFinalStats() { if (Options.PrintCoverage) TPC.PrintCoverage(); + if (Options.PrintUnstableStats) + TPC.PrintUnstableStats(); if (Options.DumpCoverage) TPC.DumpCoverage(); if (Options.PrintCorpusStats) Corpus.PrintStats(); + if (Options.PrintMutationStats) MD.PrintMutationStats(); if (!Options.PrintFinalStats) return; size_t ExecPerSec = execPerSec(); @@ -432,6 +449,34 @@ void Fuzzer::PrintPulseAndReportSlowInput(const uint8_t *Data, size_t Size) { } } +void Fuzzer::CheckForUnstableCounters(const uint8_t *Data, size_t Size) { + auto CBSetupAndRun = [&]() { + ScopedEnableMsanInterceptorChecks S; + UnitStartTime = system_clock::now(); + TPC.ResetMaps(); + RunningUserCallback = true; + CB(Data, Size); + RunningUserCallback = false; + UnitStopTime = system_clock::now(); + }; + + // Copy original run counters into our unstable counters + TPC.InitializeUnstableCounters(); + + // First Rerun + CBSetupAndRun(); + TPC.UpdateUnstableCounters(Options.HandleUnstable); + + // Second Rerun + CBSetupAndRun(); + TPC.UpdateUnstableCounters(Options.HandleUnstable); + + // Move minimum hit counts back to ModuleInline8bitCounters + if (Options.HandleUnstable == TracePC::MinUnstable || + Options.HandleUnstable == TracePC::ZeroUnstable) + TPC.ApplyUnstableCounters(); +} + bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile, InputInfo *II, bool *FoundUniqFeatures) { if (!Size) @@ -442,9 +487,18 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile, UniqFeatureSetTmp.clear(); size_t FoundUniqFeaturesOfII = 0; size_t NumUpdatesBefore = Corpus.NumFeatureUpdates(); + bool NewFeaturesUnstable = false; + + if (Options.HandleUnstable || Options.PrintUnstableStats) { + TPC.CollectFeatures([&](size_t Feature) { + if (Corpus.IsFeatureNew(Feature, Size, Options.Shrink)) + NewFeaturesUnstable = true; + }); + if (NewFeaturesUnstable) + CheckForUnstableCounters(Data, Size); + } + TPC.CollectFeatures([&](size_t Feature) { - if (Options.UseFeatureFrequency) - Corpus.UpdateFeatureFrequency(Feature); if (Corpus.AddFeature(Feature, Size, Options.Shrink)) UniqFeatureSetTmp.push_back(Feature); if (Options.ReduceInputs && II) @@ -452,17 +506,20 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile, II->UniqFeatureSet.end(), Feature)) FoundUniqFeaturesOfII++; }); + if (FoundUniqFeatures) *FoundUniqFeatures = FoundUniqFeaturesOfII; PrintPulseAndReportSlowInput(Data, Size); size_t NumNewFeatures = Corpus.NumFeatureUpdates() - NumUpdatesBefore; + if (NumNewFeatures) { TPC.UpdateObservedPCs(); Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures, MayDeleteFile, - UniqFeatureSetTmp); + TPC.ObservedFocusFunction(), UniqFeatureSetTmp, DFT, II); return true; } if (II && FoundUniqFeaturesOfII && + II->DataFlowTraceForFocusFunction.empty() && FoundUniqFeaturesOfII == II->UniqFeatureSet.size() && II->U.size() > Size) { Corpus.Replace(II, {Data, Data + Size}); @@ -505,19 +562,24 @@ void Fuzzer::ExecuteCallback(const uint8_t *Data, size_t Size) { // so that we reliably find buffer overflows in it. uint8_t *DataCopy = new uint8_t[Size]; memcpy(DataCopy, Data, Size); + if (EF->__msan_unpoison) + EF->__msan_unpoison(DataCopy, Size); if (CurrentUnitData && CurrentUnitData != Data) memcpy(CurrentUnitData, Data, Size); CurrentUnitSize = Size; - AllocTracer.Start(Options.TraceMalloc); - UnitStartTime = system_clock::now(); - TPC.ResetMaps(); - RunningCB = true; - int Res = CB(DataCopy, Size); - RunningCB = false; - UnitStopTime = system_clock::now(); - (void)Res; - assert(Res == 0); - HasMoreMallocsThanFrees = AllocTracer.Stop(); + { + ScopedEnableMsanInterceptorChecks S; + AllocTracer.Start(Options.TraceMalloc); + UnitStartTime = system_clock::now(); + TPC.ResetMaps(); + RunningUserCallback = true; + int Res = CB(DataCopy, Size); + RunningUserCallback = false; + UnitStopTime = system_clock::now(); + (void)Res; + assert(Res == 0); + HasMoreMallocsThanFrees = AllocTracer.Stop(); + } if (!LooseMemeq(DataCopy, Data, Size)) CrashOnOverwrittenData(); CurrentUnitSize = 0; @@ -618,8 +680,6 @@ void Fuzzer::MutateAndTestOne() { MD.StartMutationSequence(); auto &II = Corpus.ChooseUnitToMutate(MD.GetRand()); - if (Options.UseFeatureFrequency) - Corpus.UpdateFeatureFrequencyScore(&II); const auto &U = II.U; memcpy(BaseSha1, II.Sha1, sizeof(BaseSha1)); assert(CurrentUnitData); @@ -638,7 +698,12 @@ void Fuzzer::MutateAndTestOne() { break; MaybeExitGracefully(); size_t NewSize = 0; - NewSize = MD.Mutate(CurrentUnitData, Size, CurrentMaxMutationLen); + if (II.HasFocusFunction && !II.DataFlowTraceForFocusFunction.empty() && + Size <= CurrentMaxMutationLen) + NewSize = MD.MutateWithMask(CurrentUnitData, Size, Size, + II.DataFlowTraceForFocusFunction); + else + NewSize = MD.Mutate(CurrentUnitData, Size, CurrentMaxMutationLen); assert(NewSize > 0 && "Mutator returned empty unit"); assert(NewSize <= CurrentMaxMutationLen && "Mutator return oversized unit"); Size = NewSize; @@ -728,7 +793,14 @@ void Fuzzer::ReadAndExecuteSeedCorpora(const Vector<std::string> &CorpusDirs) { } PrintStats("INITED"); - if (Corpus.empty()) { + if (!Options.FocusFunction.empty()) + Printf("INFO: %zd/%zd inputs touch the focus function\n", + Corpus.NumInputsThatTouchFocusFunction(), Corpus.size()); + if (!Options.DataFlowTrace.empty()) + Printf("INFO: %zd/%zd inputs have the Data Flow Trace\n", + Corpus.NumInputsWithDataFlowTrace(), Corpus.size()); + + if (Corpus.empty() && Options.MaxNumberOfRuns) { Printf("ERROR: no interesting inputs were found. " "Is the code instrumented for coverage? Exiting.\n"); exit(1); @@ -737,6 +809,7 @@ void Fuzzer::ReadAndExecuteSeedCorpora(const Vector<std::string> &CorpusDirs) { void Fuzzer::Loop(const Vector<std::string> &CorpusDirs) { ReadAndExecuteSeedCorpora(CorpusDirs); + DFT.Clear(); // No need for DFT any more. TPC.SetPrintNewPCs(Options.PrintNewCovPcs); TPC.SetPrintNewFuncs(Options.PrintNewCovFuncs); system_clock::time_point LastCorpusReload = system_clock::now(); @@ -755,16 +828,12 @@ void Fuzzer::Loop(const Vector<std::string> &CorpusDirs) { break; // Update TmpMaxMutationLen - if (Options.ExperimentalLenControl) { + if (Options.LenControl) { if (TmpMaxMutationLen < MaxMutationLen && TotalNumberOfRuns - LastCorpusUpdateRun > - Options.ExperimentalLenControl * Log(TmpMaxMutationLen)) { + Options.LenControl * Log(TmpMaxMutationLen)) { TmpMaxMutationLen = Min(MaxMutationLen, TmpMaxMutationLen + Log(TmpMaxMutationLen)); - if (TmpMaxMutationLen <= MaxMutationLen) - Printf("#%zd\tTEMP_MAX_LEN: %zd (%zd %zd)\n", TotalNumberOfRuns, - TmpMaxMutationLen, Options.ExperimentalLenControl, - LastCorpusUpdateRun); LastCorpusUpdateRun = TotalNumberOfRuns; } } else { @@ -826,13 +895,15 @@ void Fuzzer::AnnounceOutput(const uint8_t *Data, size_t Size) { extern "C" { -size_t LLVMFuzzerMutate(uint8_t *Data, size_t Size, size_t MaxSize) { +__attribute__((visibility("default"))) size_t +LLVMFuzzerMutate(uint8_t *Data, size_t Size, size_t MaxSize) { assert(fuzzer::F); return fuzzer::F->GetMD().DefaultMutate(Data, Size, MaxSize); } // Experimental -void LLVMFuzzerAnnounceOutput(const uint8_t *Data, size_t Size) { +__attribute__((visibility("default"))) void +LLVMFuzzerAnnounceOutput(const uint8_t *Data, size_t Size) { assert(fuzzer::F); fuzzer::F->AnnounceOutput(Data, Size); } diff --git a/lib/fuzzer/FuzzerMain.cpp b/lib/fuzzer/FuzzerMain.cpp index af8657200be2..f2c8e9c7bb11 100644 --- a/lib/fuzzer/FuzzerMain.cpp +++ b/lib/fuzzer/FuzzerMain.cpp @@ -16,6 +16,6 @@ extern "C" { int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); } // extern "C" -int main(int argc, char **argv) { +__attribute__((visibility("default"))) int main(int argc, char **argv) { return fuzzer::FuzzerDriver(&argc, &argv, LLVMFuzzerTestOneInput); } diff --git a/lib/fuzzer/FuzzerMutate.cpp b/lib/fuzzer/FuzzerMutate.cpp index 9ee5299f1b60..ff076cca683f 100644 --- a/lib/fuzzer/FuzzerMutate.cpp +++ b/lib/fuzzer/FuzzerMutate.cpp @@ -30,39 +30,41 @@ MutationDispatcher::MutationDispatcher(Random &Rand, DefaultMutators.insert( DefaultMutators.begin(), { - {&MutationDispatcher::Mutate_EraseBytes, "EraseBytes"}, - {&MutationDispatcher::Mutate_InsertByte, "InsertByte"}, + {&MutationDispatcher::Mutate_EraseBytes, "EraseBytes", 0, 0}, + {&MutationDispatcher::Mutate_InsertByte, "InsertByte", 0, 0}, {&MutationDispatcher::Mutate_InsertRepeatedBytes, - "InsertRepeatedBytes"}, - {&MutationDispatcher::Mutate_ChangeByte, "ChangeByte"}, - {&MutationDispatcher::Mutate_ChangeBit, "ChangeBit"}, - {&MutationDispatcher::Mutate_ShuffleBytes, "ShuffleBytes"}, - {&MutationDispatcher::Mutate_ChangeASCIIInteger, "ChangeASCIIInt"}, - {&MutationDispatcher::Mutate_ChangeBinaryInteger, "ChangeBinInt"}, - {&MutationDispatcher::Mutate_CopyPart, "CopyPart"}, - {&MutationDispatcher::Mutate_CrossOver, "CrossOver"}, + "InsertRepeatedBytes", 0, 0}, + {&MutationDispatcher::Mutate_ChangeByte, "ChangeByte", 0, 0}, + {&MutationDispatcher::Mutate_ChangeBit, "ChangeBit", 0, 0}, + {&MutationDispatcher::Mutate_ShuffleBytes, "ShuffleBytes", 0, 0}, + {&MutationDispatcher::Mutate_ChangeASCIIInteger, "ChangeASCIIInt", 0, + 0}, + {&MutationDispatcher::Mutate_ChangeBinaryInteger, "ChangeBinInt", 0, + 0}, + {&MutationDispatcher::Mutate_CopyPart, "CopyPart", 0, 0}, + {&MutationDispatcher::Mutate_CrossOver, "CrossOver", 0, 0}, {&MutationDispatcher::Mutate_AddWordFromManualDictionary, - "ManualDict"}, + "ManualDict", 0, 0}, {&MutationDispatcher::Mutate_AddWordFromPersistentAutoDictionary, - "PersAutoDict"}, + "PersAutoDict", 0, 0}, }); if(Options.UseCmp) DefaultMutators.push_back( - {&MutationDispatcher::Mutate_AddWordFromTORC, "CMP"}); + {&MutationDispatcher::Mutate_AddWordFromTORC, "CMP", 0, 0}); if (EF->LLVMFuzzerCustomMutator) - Mutators.push_back({&MutationDispatcher::Mutate_Custom, "Custom"}); + Mutators.push_back({&MutationDispatcher::Mutate_Custom, "Custom", 0, 0}); else Mutators = DefaultMutators; if (EF->LLVMFuzzerCustomCrossOver) Mutators.push_back( - {&MutationDispatcher::Mutate_CustomCrossOver, "CustomCrossOver"}); + {&MutationDispatcher::Mutate_CustomCrossOver, "CustomCrossOver", 0, 0}); } static char RandCh(Random &Rand) { if (Rand.RandBool()) return Rand(256); - const char *Special = "!*'();:@&=+$,/?%#[]012Az-`~.\xff\x00"; + const char Special[] = "!*'();:@&=+$,/?%#[]012Az-`~.\xff\x00"; return Special[Rand(sizeof(Special) - 1)]; } @@ -195,7 +197,6 @@ DictionaryEntry MutationDispatcher::MakeDictionaryEntryFromCMP( const void *Arg1Mutation, const void *Arg2Mutation, size_t ArgSize, const uint8_t *Data, size_t Size) { - ScopedDoingMyOwnMemOrStr scoped_doing_my_own_mem_os_str; bool HandleFirst = Rand.RandBool(); const void *ExistingBytes, *DesiredBytes; Word W; @@ -339,7 +340,9 @@ size_t MutationDispatcher::InsertPartOf(const uint8_t *From, size_t FromSize, size_t MutationDispatcher::Mutate_CopyPart(uint8_t *Data, size_t Size, size_t MaxSize) { if (Size > MaxSize || Size == 0) return 0; - if (Rand.RandBool()) + // If Size == MaxSize, `InsertPartOf(...)` will + // fail so there's no point using it in this case. + if (Size == MaxSize || Rand.RandBool()) return CopyPartOf(Data, Size, Data, Size); else return InsertPartOf(Data, Size, Data, Size, MaxSize); @@ -463,6 +466,7 @@ void MutationDispatcher::RecordSuccessfulMutationSequence() { if (!PersistentAutoDictionary.ContainsWord(DE->GetW())) PersistentAutoDictionary.push_back({DE->GetW(), 1}); } + RecordUsefulMutations(); } void MutationDispatcher::PrintRecommendedDictionary() { @@ -483,8 +487,7 @@ void MutationDispatcher::PrintRecommendedDictionary() { void MutationDispatcher::PrintMutationSequence() { Printf("MS: %zd ", CurrentMutatorSequence.size()); - for (auto M : CurrentMutatorSequence) - Printf("%s-", M.Name); + for (auto M : CurrentMutatorSequence) Printf("%s-", M->Name); if (!CurrentDictionaryEntrySequence.empty()) { Printf(" DE: "); for (auto DE : CurrentDictionaryEntrySequence) { @@ -512,12 +515,13 @@ size_t MutationDispatcher::MutateImpl(uint8_t *Data, size_t Size, // in which case they will return 0. // Try several times before returning un-mutated data. for (int Iter = 0; Iter < 100; Iter++) { - auto M = Mutators[Rand(Mutators.size())]; - size_t NewSize = (this->*(M.Fn))(Data, Size, MaxSize); + auto M = &Mutators[Rand(Mutators.size())]; + size_t NewSize = (this->*(M->Fn))(Data, Size, MaxSize); if (NewSize && NewSize <= MaxSize) { if (Options.OnlyASCII) ToASCII(Data, NewSize); CurrentMutatorSequence.push_back(M); + M->TotalCount++; return NewSize; } } @@ -525,9 +529,54 @@ size_t MutationDispatcher::MutateImpl(uint8_t *Data, size_t Size, return 1; // Fallback, should not happen frequently. } +// Mask represents the set of Data bytes that are worth mutating. +size_t MutationDispatcher::MutateWithMask(uint8_t *Data, size_t Size, + size_t MaxSize, + const Vector<uint8_t> &Mask) { + assert(Size <= Mask.size()); + // * Copy the worthy bytes into a temporary array T + // * Mutate T + // * Copy T back. + // This is totally unoptimized. + auto &T = MutateWithMaskTemp; + if (T.size() < Size) + T.resize(Size); + size_t OneBits = 0; + for (size_t I = 0; I < Size; I++) + if (Mask[I]) + T[OneBits++] = Data[I]; + + assert(!T.empty()); + size_t NewSize = Mutate(T.data(), OneBits, OneBits); + assert(NewSize <= OneBits); + (void)NewSize; + // Even if NewSize < OneBits we still use all OneBits bytes. + for (size_t I = 0, J = 0; I < Size; I++) + if (Mask[I]) + Data[I] = T[J++]; + return Size; +} + void MutationDispatcher::AddWordToManualDictionary(const Word &W) { ManualDictionary.push_back( {W, std::numeric_limits<size_t>::max()}); } +void MutationDispatcher::RecordUsefulMutations() { + for (auto M : CurrentMutatorSequence) M->UsefulCount++; +} + +void MutationDispatcher::PrintMutationStats() { + Printf("\nstat::mutation_usefulness: "); + for (size_t i = 0; i < Mutators.size(); i++) { + double UsefulPercentage = + Mutators[i].TotalCount + ? (100.0 * Mutators[i].UsefulCount) / Mutators[i].TotalCount + : 0; + Printf("%.3f", UsefulPercentage); + if (i < Mutators.size() - 1) Printf(","); + } + Printf("\n"); +} + } // namespace fuzzer diff --git a/lib/fuzzer/FuzzerMutate.h b/lib/fuzzer/FuzzerMutate.h index 4aa58af9902d..828ecc13d866 100644 --- a/lib/fuzzer/FuzzerMutate.h +++ b/lib/fuzzer/FuzzerMutate.h @@ -27,7 +27,7 @@ public: void StartMutationSequence(); /// Print the current sequence of mutations. void PrintMutationSequence(); - /// Indicate that the current sequence of mutations was successfull. + /// Indicate that the current sequence of mutations was successful. void RecordSuccessfulMutationSequence(); /// Mutates data by invoking user-provided mutator. size_t Mutate_Custom(uint8_t *Data, size_t Size, size_t MaxSize); @@ -70,6 +70,13 @@ public: /// Applies one of the configured mutations. /// Returns the new size of data which could be up to MaxSize. size_t Mutate(uint8_t *Data, size_t Size, size_t MaxSize); + + /// Applies one of the configured mutations to the bytes of Data + /// that have '1' in Mask. + /// Mask.size() should be >= Size. + size_t MutateWithMask(uint8_t *Data, size_t Size, size_t MaxSize, + const Vector<uint8_t> &Mask); + /// Applies one of the default mutations. Provided as a service /// to mutation authors. size_t DefaultMutate(uint8_t *Data, size_t Size, size_t MaxSize); @@ -86,11 +93,16 @@ public: Random &GetRand() { return Rand; } -private: + void PrintMutationStats(); + + void RecordUsefulMutations(); + private: struct Mutator { size_t (MutationDispatcher::*Fn)(uint8_t *Data, size_t Size, size_t Max); const char *Name; + uint64_t UsefulCount; + uint64_t TotalCount; }; size_t AddWordFromDictionary(Dictionary &D, uint8_t *Data, size_t Size, @@ -125,11 +137,11 @@ private: // recreated periodically. Dictionary TempAutoDictionary; // Persistent dictionary modified by the fuzzer, consists of - // entries that led to successfull discoveries in the past mutations. + // entries that led to successful discoveries in the past mutations. Dictionary PersistentAutoDictionary; - Vector<Mutator> CurrentMutatorSequence; Vector<DictionaryEntry *> CurrentDictionaryEntrySequence; + Vector<Mutator *> CurrentMutatorSequence; static const size_t kCmpDictionaryEntriesDequeSize = 16; DictionaryEntry CmpDictionaryEntriesDeque[kCmpDictionaryEntriesDequeSize]; @@ -137,6 +149,7 @@ private: const InputCorpus *Corpus = nullptr; Vector<uint8_t> MutateInPlaceHere; + Vector<uint8_t> MutateWithMaskTemp; // CustomCrossOver needs its own buffer as a custom implementation may call // LLVMFuzzerMutate, which in turn may resize MutateInPlaceHere. Vector<uint8_t> CustomCrossOverInPlaceHere; diff --git a/lib/fuzzer/FuzzerOptions.h b/lib/fuzzer/FuzzerOptions.h index 15a378020b85..ce39c0876cd7 100644 --- a/lib/fuzzer/FuzzerOptions.h +++ b/lib/fuzzer/FuzzerOptions.h @@ -18,7 +18,7 @@ namespace fuzzer { struct FuzzingOptions { int Verbosity = 1; size_t MaxLen = 0; - size_t ExperimentalLenControl = 0; + size_t LenControl = 1000; int UnitTimeoutSec = 300; int TimeoutExitCode = 77; int ErrorExitCode = 77; @@ -31,7 +31,7 @@ struct FuzzingOptions { bool UseCounters = false; bool UseMemmem = true; bool UseCmp = false; - bool UseValueProfile = false; + int UseValueProfile = false; bool Shrink = false; bool ReduceInputs = false; int ReloadIntervalSec = 1; @@ -45,18 +45,21 @@ struct FuzzingOptions { std::string ExactArtifactPath; std::string ExitOnSrcPos; std::string ExitOnItem; + std::string FocusFunction; + std::string DataFlowTrace; bool SaveArtifacts = true; bool PrintNEW = true; // Print a status line when new units are found; bool PrintNewCovPcs = false; int PrintNewCovFuncs = 0; bool PrintFinalStats = false; + bool PrintMutationStats = false; bool PrintCorpusStats = false; bool PrintCoverage = false; + bool PrintUnstableStats = false; + int HandleUnstable = 0; bool DumpCoverage = false; - bool UseClangCoverage = false; bool DetectLeaks = true; int PurgeAllocatorIntervalSec = 1; - int UseFeatureFrequency = false; int TraceMalloc = 0; bool HandleAbrt = false; bool HandleBus = false; diff --git a/lib/fuzzer/FuzzerShmemPosix.cpp b/lib/fuzzer/FuzzerShmemPosix.cpp index 50cdcfb509dc..41a93f61004b 100644 --- a/lib/fuzzer/FuzzerShmemPosix.cpp +++ b/lib/fuzzer/FuzzerShmemPosix.cpp @@ -32,6 +32,11 @@ std::string SharedMemoryRegion::Path(const char *Name) { std::string SharedMemoryRegion::SemName(const char *Name, int Idx) { std::string Res(Name); + // When passing a name without a leading <slash> character to + // sem_open, the behaviour is unspecified in POSIX. Add a leading + // <slash> character for the name if there is no such one. + if (!Res.empty() && Res[0] != '/') + Res.insert(Res.begin(), '/'); return Res + (char)('0' + Idx); } @@ -52,7 +57,7 @@ bool SharedMemoryRegion::Create(const char *Name) { for (int i = 0; i < 2; i++) { sem_unlink(SemName(Name, i).c_str()); Semaphore[i] = sem_open(SemName(Name, i).c_str(), O_CREAT, 0644, 0); - if (Semaphore[i] == (void *)-1) + if (Semaphore[i] == SEM_FAILED) return false; } IAmServer = true; @@ -70,7 +75,7 @@ bool SharedMemoryRegion::Open(const char *Name) { return false; for (int i = 0; i < 2; i++) { Semaphore[i] = sem_open(SemName(Name, i).c_str(), 0); - if (Semaphore[i] == (void *)-1) + if (Semaphore[i] == SEM_FAILED) return false; } IAmServer = false; diff --git a/lib/fuzzer/FuzzerTracePC.cpp b/lib/fuzzer/FuzzerTracePC.cpp index 5e9f9f2f6dcc..29ffc8e34fc0 100644 --- a/lib/fuzzer/FuzzerTracePC.cpp +++ b/lib/fuzzer/FuzzerTracePC.cpp @@ -39,8 +39,6 @@ namespace fuzzer { TracePC TPC; -int ScopedDoingMyOwnMemOrStr::DoingMyOwnMemOrStr; - uint8_t *TracePC::Counters() const { return __sancov_trace_pc_guard_8bit_counters; } @@ -59,6 +57,49 @@ size_t TracePC::GetTotalPCCoverage() { return Res; } +template<class CallBack> +void TracePC::IterateInline8bitCounters(CallBack CB) const { + if (NumInline8bitCounters && NumInline8bitCounters == NumPCsInPCTables) { + size_t CounterIdx = 0; + for (size_t i = 0; i < NumModulesWithInline8bitCounters; i++) { + uint8_t *Beg = ModuleCounters[i].Start; + size_t Size = ModuleCounters[i].Stop - Beg; + assert(Size == (size_t)(ModulePCTable[i].Stop - ModulePCTable[i].Start)); + for (size_t j = 0; j < Size; j++, CounterIdx++) + CB(i, j, CounterIdx); + } + } +} + +// Initializes unstable counters by copying Inline8bitCounters to unstable +// counters. +void TracePC::InitializeUnstableCounters() { + IterateInline8bitCounters([&](int i, int j, int UnstableIdx) { + UnstableCounters[UnstableIdx].Counter = ModuleCounters[i].Start[j]; + }); +} + +// Compares the current counters with counters from previous runs +// and records differences as unstable edges. +void TracePC::UpdateUnstableCounters(int UnstableMode) { + IterateInline8bitCounters([&](int i, int j, int UnstableIdx) { + if (ModuleCounters[i].Start[j] != UnstableCounters[UnstableIdx].Counter) { + UnstableCounters[UnstableIdx].IsUnstable = true; + if (UnstableMode == ZeroUnstable) + UnstableCounters[UnstableIdx].Counter = 0; + else if (UnstableMode == MinUnstable) + UnstableCounters[UnstableIdx].Counter = std::min( + ModuleCounters[i].Start[j], UnstableCounters[UnstableIdx].Counter); + } + }); +} + +// Moves the minimum hit counts to ModuleCounters. +void TracePC::ApplyUnstableCounters() { + IterateInline8bitCounters([&](int i, int j, int UnstableIdx) { + ModuleCounters[i].Start[j] = UnstableCounters[UnstableIdx].Counter; + }); +} void TracePC::HandleInline8bitCountersInit(uint8_t *Start, uint8_t *Stop) { if (Start == Stop) return; @@ -132,8 +173,8 @@ void TracePC::PrintModuleInfo() { _Exit(1); } } - if (size_t NumClangCounters = ClangCountersEnd() - ClangCountersBegin()) - Printf("INFO: %zd Clang Coverage Counters\n", NumClangCounters); + if (size_t NumExtraCounters = ExtraCountersEnd() - ExtraCountersBegin()) + Printf("INFO: %zd Extra Counters\n", NumExtraCounters); } ATTRIBUTE_NO_SANITIZE_ALL @@ -147,28 +188,25 @@ void TracePC::HandleCallerCallee(uintptr_t Caller, uintptr_t Callee) { void TracePC::UpdateObservedPCs() { Vector<uintptr_t> CoveredFuncs; auto ObservePC = [&](uintptr_t PC) { - if (ObservedPCs.insert(PC).second && DoPrintNewPCs) - PrintPC("\tNEW_PC: %p %F %L\n", "\tNEW_PC: %p\n", PC + 1); + if (ObservedPCs.insert(PC).second && DoPrintNewPCs) { + PrintPC("\tNEW_PC: %p %F %L", "\tNEW_PC: %p", PC + 1); + Printf("\n"); + } }; auto Observe = [&](const PCTableEntry &TE) { if (TE.PCFlags & 1) - if (ObservedFuncs.insert(TE.PC).second && NumPrintNewFuncs) + if (++ObservedFuncs[TE.PC] == 1 && NumPrintNewFuncs) CoveredFuncs.push_back(TE.PC); ObservePC(TE.PC); }; if (NumPCsInPCTables) { if (NumInline8bitCounters == NumPCsInPCTables) { - for (size_t i = 0; i < NumModulesWithInline8bitCounters; i++) { - uint8_t *Beg = ModuleCounters[i].Start; - size_t Size = ModuleCounters[i].Stop - Beg; - assert(Size == - (size_t)(ModulePCTable[i].Stop - ModulePCTable[i].Start)); - for (size_t j = 0; j < Size; j++) - if (Beg[j]) - Observe(ModulePCTable[i].Start[j]); - } + IterateInline8bitCounters([&](int i, int j, int CounterIdx) { + if (ModuleCounters[i].Start[j]) + Observe(ModulePCTable[i].Start[j]); + }); } else if (NumGuards == NumPCsInPCTables) { size_t GuardIdx = 1; for (size_t i = 0; i < NumModules; i++) { @@ -182,17 +220,12 @@ void TracePC::UpdateObservedPCs() { } } } - if (size_t NumClangCounters = - ClangCountersEnd() - ClangCountersBegin()) { - auto P = ClangCountersBegin(); - for (size_t Idx = 0; Idx < NumClangCounters; Idx++) - if (P[Idx]) - ObservePC((uintptr_t)Idx); - } - for (size_t i = 0, N = Min(CoveredFuncs.size(), NumPrintNewFuncs); i < N; i++) { - Printf("\tNEW_FUNC[%zd/%zd]: ", i, CoveredFuncs.size()); - PrintPC("%p %F %L\n", "%p\n", CoveredFuncs[i] + 1); + for (size_t i = 0, N = Min(CoveredFuncs.size(), NumPrintNewFuncs); i < N; + i++) { + Printf("\tNEW_FUNC[%zd/%zd]: ", i + 1, CoveredFuncs.size()); + PrintPC("%p %F %L", "%p", CoveredFuncs[i] + 1); + Printf("\n"); } } @@ -218,6 +251,57 @@ static std::string GetModuleName(uintptr_t PC) { return ModulePathRaw; } +template<class CallBack> +void TracePC::IterateCoveredFunctions(CallBack CB) { + for (size_t i = 0; i < NumPCTables; i++) { + auto &M = ModulePCTable[i]; + assert(M.Start < M.Stop); + auto ModuleName = GetModuleName(M.Start->PC); + for (auto NextFE = M.Start; NextFE < M.Stop; ) { + auto FE = NextFE; + assert((FE->PCFlags & 1) && "Not a function entry point"); + do { + NextFE++; + } while (NextFE < M.Stop && !(NextFE->PCFlags & 1)); + if (ObservedFuncs.count(FE->PC)) + CB(FE, NextFE, ObservedFuncs[FE->PC]); + } + } +} + +void TracePC::SetFocusFunction(const std::string &FuncName) { + // This function should be called once. + assert(FocusFunction.first > NumModulesWithInline8bitCounters); + if (FuncName.empty()) + return; + for (size_t M = 0; M < NumModulesWithInline8bitCounters; M++) { + auto &PCTE = ModulePCTable[M]; + size_t N = PCTE.Stop - PCTE.Start; + for (size_t I = 0; I < N; I++) { + if (!(PCTE.Start[I].PCFlags & 1)) continue; // not a function entry. + auto Name = DescribePC("%F", GetNextInstructionPc(PCTE.Start[I].PC)); + if (Name[0] == 'i' && Name[1] == 'n' && Name[2] == ' ') + Name = Name.substr(3, std::string::npos); + if (FuncName != Name) continue; + Printf("INFO: Focus function is set to '%s'\n", Name.c_str()); + FocusFunction = {M, I}; + return; + } + } +} + +bool TracePC::ObservedFocusFunction() { + size_t I = FocusFunction.first; + size_t J = FocusFunction.second; + if (I >= NumModulesWithInline8bitCounters) + return false; + auto &MC = ModuleCounters[I]; + size_t Size = MC.Stop - MC.Start; + if (J >= Size) + return false; + return MC.Start[J] != 0; +} + void TracePC::PrintCoverage() { if (!EF->__sanitizer_symbolize_pc || !EF->__sanitizer_get_module_and_offset_for_pc) { @@ -227,53 +311,33 @@ void TracePC::PrintCoverage() { return; } Printf("COVERAGE:\n"); - std::string LastFunctionName = ""; - std::string LastFileStr = ""; - Set<size_t> UncoveredLines; - Set<size_t> CoveredLines; - - auto FunctionEndCallback = [&](const std::string &CurrentFunc, - const std::string &CurrentFile) { - if (LastFunctionName != CurrentFunc) { - if (CoveredLines.empty() && !UncoveredLines.empty()) { - Printf("UNCOVERED_FUNC: %s\n", LastFunctionName.c_str()); - } else { - for (auto Line : UncoveredLines) { - if (!CoveredLines.count(Line)) - Printf("UNCOVERED_LINE: %s %s:%zd\n", LastFunctionName.c_str(), - LastFileStr.c_str(), Line); - } - } - - UncoveredLines.clear(); - CoveredLines.clear(); - LastFunctionName = CurrentFunc; - LastFileStr = CurrentFile; - } + auto CoveredFunctionCallback = [&](const PCTableEntry *First, + const PCTableEntry *Last, + uintptr_t Counter) { + assert(First < Last); + auto VisualizePC = GetNextInstructionPc(First->PC); + std::string FileStr = DescribePC("%s", VisualizePC); + if (!IsInterestingCoverageFile(FileStr)) + return; + std::string FunctionStr = DescribePC("%F", VisualizePC); + if (FunctionStr.find("in ") == 0) + FunctionStr = FunctionStr.substr(3); + std::string LineStr = DescribePC("%l", VisualizePC); + size_t Line = std::stoul(LineStr); + size_t NumEdges = Last - First; + Vector<uintptr_t> UncoveredPCs; + for (auto TE = First; TE < Last; TE++) + if (!ObservedPCs.count(TE->PC)) + UncoveredPCs.push_back(TE->PC); + Printf("COVERED_FUNC: hits: %zd", Counter); + Printf(" edges: %zd/%zd", NumEdges - UncoveredPCs.size(), NumEdges); + Printf(" %s %s:%zd\n", FunctionStr.c_str(), FileStr.c_str(), Line); + for (auto PC: UncoveredPCs) + Printf(" UNCOVERED_PC: %s\n", + DescribePC("%s:%l", GetNextInstructionPc(PC)).c_str()); }; - for (size_t i = 0; i < NumPCTables; i++) { - auto &M = ModulePCTable[i]; - assert(M.Start < M.Stop); - auto ModuleName = GetModuleName(M.Start->PC); - for (auto Ptr = M.Start; Ptr < M.Stop; Ptr++) { - auto PC = Ptr->PC; - auto VisualizePC = GetNextInstructionPc(PC); - bool IsObserved = ObservedPCs.count(PC); - std::string FileStr = DescribePC("%s", VisualizePC); - if (!IsInterestingCoverageFile(FileStr)) continue; - std::string FunctionStr = DescribePC("%F", VisualizePC); - FunctionEndCallback(FunctionStr, FileStr); - std::string LineStr = DescribePC("%l", VisualizePC); - size_t Line = std::stoul(LineStr); - if (IsObserved && CoveredLines.insert(Line).second) - Printf("COVERED: %s %s:%zd\n", FunctionStr.c_str(), FileStr.c_str(), - Line); - else - UncoveredLines.insert(Line); - } - } - FunctionEndCallback("", ""); + IterateCoveredFunctions(CoveredFunctionCallback); } void TracePC::DumpCoverage() { @@ -285,6 +349,15 @@ void TracePC::DumpCoverage() { } } +void TracePC::PrintUnstableStats() { + size_t count = 0; + for (size_t i = 0; i < NumInline8bitCounters; i++) + if (UnstableCounters[i].IsUnstable) + count++; + Printf("stat::stability_rate: %.2f\n", + 100 - static_cast<float>(count * 100) / NumInline8bitCounters); +} + // Value profile. // We keep track of various values that affect control flow. // These values are inserted into a bit-set-based hash map. @@ -334,7 +407,14 @@ void TracePC::HandleCmp(uintptr_t PC, T Arg1, T Arg2) { TORC4.Insert(ArgXor, Arg1, Arg2); else if (sizeof(T) == 8) TORC8.Insert(ArgXor, Arg1, Arg2); - ValueProfileMap.AddValue(Idx); + // TODO: remove these flags and instead use all metrics at once. + if (UseValueProfileMask & 1) + ValueProfileMap.AddValue(Idx); + if (UseValueProfileMask & 2) + ValueProfileMap.AddValue( + PC * 64 + (Arg1 == Arg2 ? 0 : __builtin_clzll(Arg1 - Arg2) + 1)); + if (UseValueProfileMask & 4) // alternative way to use the hamming distance + ValueProfileMap.AddValue(PC * 64 + ArgDistance); } static size_t InternalStrnlen(const char *S, size_t MaxLen) { @@ -536,7 +616,7 @@ void __sanitizer_cov_trace_gep(uintptr_t Idx) { ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY void __sanitizer_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2, size_t n, int result) { - if (fuzzer::ScopedDoingMyOwnMemOrStr::DoingMyOwnMemOrStr) return; + if (!fuzzer::RunningUserCallback) return; if (result == 0) return; // No reason to mutate. if (n <= 1) return; // Not interesting. fuzzer::TPC.AddValueForMemcmp(caller_pc, s1, s2, n, /*StopAtZero*/false); @@ -545,7 +625,7 @@ void __sanitizer_weak_hook_memcmp(void *caller_pc, const void *s1, ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY void __sanitizer_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2, size_t n, int result) { - if (fuzzer::ScopedDoingMyOwnMemOrStr::DoingMyOwnMemOrStr) return; + if (!fuzzer::RunningUserCallback) return; if (result == 0) return; // No reason to mutate. size_t Len1 = fuzzer::InternalStrnlen(s1, n); size_t Len2 = fuzzer::InternalStrnlen(s2, n); @@ -558,7 +638,7 @@ void __sanitizer_weak_hook_strncmp(void *caller_pc, const char *s1, ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY void __sanitizer_weak_hook_strcmp(void *caller_pc, const char *s1, const char *s2, int result) { - if (fuzzer::ScopedDoingMyOwnMemOrStr::DoingMyOwnMemOrStr) return; + if (!fuzzer::RunningUserCallback) return; if (result == 0) return; // No reason to mutate. size_t N = fuzzer::InternalStrnlen2(s1, s2); if (N <= 1) return; // Not interesting. @@ -568,35 +648,35 @@ void __sanitizer_weak_hook_strcmp(void *caller_pc, const char *s1, ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1, const char *s2, size_t n, int result) { - if (fuzzer::ScopedDoingMyOwnMemOrStr::DoingMyOwnMemOrStr) return; + if (!fuzzer::RunningUserCallback) return; return __sanitizer_weak_hook_strncmp(called_pc, s1, s2, n, result); } ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1, const char *s2, int result) { - if (fuzzer::ScopedDoingMyOwnMemOrStr::DoingMyOwnMemOrStr) return; + if (!fuzzer::RunningUserCallback) return; return __sanitizer_weak_hook_strcmp(called_pc, s1, s2, result); } ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY void __sanitizer_weak_hook_strstr(void *called_pc, const char *s1, const char *s2, char *result) { - if (fuzzer::ScopedDoingMyOwnMemOrStr::DoingMyOwnMemOrStr) return; + if (!fuzzer::RunningUserCallback) return; fuzzer::TPC.MMT.Add(reinterpret_cast<const uint8_t *>(s2), strlen(s2)); } ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY void __sanitizer_weak_hook_strcasestr(void *called_pc, const char *s1, const char *s2, char *result) { - if (fuzzer::ScopedDoingMyOwnMemOrStr::DoingMyOwnMemOrStr) return; + if (!fuzzer::RunningUserCallback) return; fuzzer::TPC.MMT.Add(reinterpret_cast<const uint8_t *>(s2), strlen(s2)); } ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY void __sanitizer_weak_hook_memmem(void *called_pc, const void *s1, size_t len1, const void *s2, size_t len2, void *result) { - if (fuzzer::ScopedDoingMyOwnMemOrStr::DoingMyOwnMemOrStr) return; + if (!fuzzer::RunningUserCallback) return; fuzzer::TPC.MMT.Add(reinterpret_cast<const uint8_t *>(s2), len2); } } // extern "C" diff --git a/lib/fuzzer/FuzzerTracePC.h b/lib/fuzzer/FuzzerTracePC.h index c3f241b905b1..097ba69bdc08 100644 --- a/lib/fuzzer/FuzzerTracePC.h +++ b/lib/fuzzer/FuzzerTracePC.h @@ -17,6 +17,7 @@ #include "FuzzerValueBitMap.h" #include <set> +#include <unordered_map> namespace fuzzer { @@ -73,6 +74,11 @@ class TracePC { // How many bits of PC are used from __sanitizer_cov_trace_pc. static const size_t kTracePcBits = 18; + enum HandleUnstableOptions { + MinUnstable = 1, + ZeroUnstable = 2, + }; + void HandleInit(uint32_t *Start, uint32_t *Stop); void HandleInline8bitCountersInit(uint8_t *Start, uint8_t *Stop); void HandlePCsInit(const uintptr_t *Start, const uintptr_t *Stop); @@ -80,8 +86,7 @@ class TracePC { template <class T> void HandleCmp(uintptr_t PC, T Arg1, T Arg2); size_t GetTotalPCCoverage(); void SetUseCounters(bool UC) { UseCounters = UC; } - void SetUseClangCoverage(bool UCC) { UseClangCoverage = UCC; } - void SetUseValueProfile(bool VP) { UseValueProfile = VP; } + void SetUseValueProfileMask(uint32_t VPMask) { UseValueProfileMask = VPMask; } void SetPrintNewPCs(bool P) { DoPrintNewPCs = P; } void SetPrintNewFuncs(size_t P) { NumPrintNewFuncs = P; } void UpdateObservedPCs(); @@ -93,8 +98,6 @@ class TracePC { memset(Counters(), 0, GetNumPCs()); ClearExtraCounters(); ClearInlineCounters(); - if (UseClangCoverage) - ClearClangCounters(); } void ClearInlineCounters(); @@ -106,6 +109,10 @@ class TracePC { void PrintCoverage(); void DumpCoverage(); + void PrintUnstableStats(); + + template<class CallBack> + void IterateCoveredFunctions(CallBack CB); void AddValueForMemcmp(void *caller_pc, const void *s1, const void *s2, size_t n, bool StopAtZero); @@ -132,10 +139,23 @@ class TracePC { CB(PC); } + void SetFocusFunction(const std::string &FuncName); + bool ObservedFocusFunction(); + + void InitializeUnstableCounters(); + void UpdateUnstableCounters(int UnstableMode); + void ApplyUnstableCounters(); + private: + struct UnstableEdge { + uint8_t Counter; + bool IsUnstable; + }; + + UnstableEdge UnstableCounters[kNumPCs]; + bool UseCounters = false; - bool UseValueProfile = false; - bool UseClangCoverage = false; + uint32_t UseValueProfileMask = false; bool DoPrintNewPCs = false; size_t NumPrintNewFuncs = 0; @@ -163,7 +183,12 @@ private: uintptr_t *PCs() const; Set<uintptr_t> ObservedPCs; - Set<uintptr_t> ObservedFuncs; + std::unordered_map<uintptr_t, uintptr_t> ObservedFuncs; // PC => Counter. + + template <class Callback> + void IterateInline8bitCounters(Callback CB) const; + + std::pair<size_t, size_t> FocusFunction = {-1, -1}; // Module and PC IDs. ValueBitMap ValueProfileMap; uintptr_t InitialStack; @@ -251,23 +276,11 @@ void TracePC::CollectFeatures(Callback HandleFeature) const { } } - if (size_t NumClangCounters = ClangCountersEnd() - ClangCountersBegin()) { - auto P = ClangCountersBegin(); - for (size_t Idx = 0; Idx < NumClangCounters; Idx++) - if (auto Cnt = P[Idx]) { - if (UseCounters) - HandleFeature(FirstFeature + Idx * 8 + CounterToFeature(Cnt)); - else - HandleFeature(FirstFeature + Idx); - } - FirstFeature += NumClangCounters; - } - ForEachNonZeroByte(ExtraCountersBegin(), ExtraCountersEnd(), FirstFeature, Handle8bitCounter); FirstFeature += (ExtraCountersEnd() - ExtraCountersBegin()) * 8; - if (UseValueProfile) { + if (UseValueProfileMask) { ValueProfileMap.ForEach([&](size_t Idx) { HandleFeature(FirstFeature + Idx); }); diff --git a/lib/fuzzer/FuzzerUtil.cpp b/lib/fuzzer/FuzzerUtil.cpp index 96b37d34815d..6286f9a718ad 100644 --- a/lib/fuzzer/FuzzerUtil.cpp +++ b/lib/fuzzer/FuzzerUtil.cpp @@ -16,6 +16,7 @@ #include <chrono> #include <cstring> #include <errno.h> +#include <mutex> #include <signal.h> #include <sstream> #include <stdio.h> @@ -179,8 +180,12 @@ std::string Base64(const Unit &U) { return Res; } +static std::mutex SymbolizeMutex; + std::string DescribePC(const char *SymbolizedFMT, uintptr_t PC) { - if (!EF->__sanitizer_symbolize_pc) return "<can not symbolize>"; + std::unique_lock<std::mutex> l(SymbolizeMutex, std::try_to_lock); + if (!EF->__sanitizer_symbolize_pc || !l.owns_lock()) + return "<can not symbolize>"; char PcDescr[1024] = {}; EF->__sanitizer_symbolize_pc(reinterpret_cast<void*>(PC), SymbolizedFMT, PcDescr, sizeof(PcDescr)); @@ -195,6 +200,18 @@ void PrintPC(const char *SymbolizedFMT, const char *FallbackFMT, uintptr_t PC) { Printf(FallbackFMT, PC); } +void PrintStackTrace() { + std::unique_lock<std::mutex> l(SymbolizeMutex, std::try_to_lock); + if (EF->__sanitizer_print_stack_trace && l.owns_lock()) + EF->__sanitizer_print_stack_trace(); +} + +void PrintMemoryProfile() { + std::unique_lock<std::mutex> l(SymbolizeMutex, std::try_to_lock); + if (EF->__sanitizer_print_memory_profile && l.owns_lock()) + EF->__sanitizer_print_memory_profile(95, 8); +} + unsigned NumberOfCpuCores() { unsigned N = std::thread::hardware_concurrency(); if (!N) { diff --git a/lib/fuzzer/FuzzerUtil.h b/lib/fuzzer/FuzzerUtil.h index f2ed028ce78e..8c5c57c3ab83 100644 --- a/lib/fuzzer/FuzzerUtil.h +++ b/lib/fuzzer/FuzzerUtil.h @@ -40,6 +40,10 @@ void PrintPC(const char *SymbolizedFMT, const char *FallbackFMT, uintptr_t PC); std::string DescribePC(const char *SymbolizedFMT, uintptr_t PC); +void PrintStackTrace(); + +void PrintMemoryProfile(); + unsigned NumberOfCpuCores(); // Platform specific functions. diff --git a/lib/fuzzer/FuzzerUtilFuchsia.cpp b/lib/fuzzer/FuzzerUtilFuchsia.cpp index a5fdc37fb5a6..cd2bb7438e9d 100644 --- a/lib/fuzzer/FuzzerUtilFuchsia.cpp +++ b/lib/fuzzer/FuzzerUtilFuchsia.cpp @@ -17,29 +17,54 @@ #include <cerrno> #include <cinttypes> #include <cstdint> -#include <fbl/unique_fd.h> #include <fcntl.h> -#include <launchpad/launchpad.h> +#include <lib/fdio/spawn.h> #include <string> +#include <sys/select.h> #include <thread> +#include <unistd.h> #include <zircon/errors.h> +#include <zircon/process.h> +#include <zircon/sanitizer.h> #include <zircon/status.h> #include <zircon/syscalls.h> +#include <zircon/syscalls/debug.h> +#include <zircon/syscalls/exception.h> #include <zircon/syscalls/port.h> #include <zircon/types.h> -#include <zx/object.h> -#include <zx/port.h> -#include <zx/process.h> -#include <zx/time.h> namespace fuzzer { +// Given that Fuchsia doesn't have the POSIX signals that libFuzzer was written +// around, the general approach is to spin up dedicated threads to watch for +// each requested condition (alarm, interrupt, crash). Of these, the crash +// handler is the most involved, as it requires resuming the crashed thread in +// order to invoke the sanitizers to get the needed state. + +// Forward declaration of assembly trampoline needed to resume crashed threads. +// This appears to have external linkage to C++, which is why it's not in the +// anonymous namespace. The assembly definition inside MakeTrampoline() +// actually defines the symbol with internal linkage only. +void CrashTrampolineAsm() __asm__("CrashTrampolineAsm"); + namespace { +// TODO(phosek): remove this and replace it with ZX_TIME_INFINITE +#define ZX_TIME_INFINITE_OLD INT64_MAX + // A magic value for the Zircon exception port, chosen to spell 'FUZZING' // when interpreted as a byte sequence on little-endian platforms. const uint64_t kFuzzingCrash = 0x474e495a5a5546; +// Helper function to handle Zircon syscall failures. +void ExitOnErr(zx_status_t Status, const char *Syscall) { + if (Status != ZX_OK) { + Printf("libFuzzer: %s failed: %s\n", Syscall, + _zx_status_get_string(Status)); + exit(1); + } +} + void AlarmHandler(int Seconds) { while (true) { SleepSeconds(Seconds); @@ -48,37 +73,226 @@ void AlarmHandler(int Seconds) { } void InterruptHandler() { + fd_set readfds; // Ctrl-C sends ETX in Zircon. - while (getchar() != 0x03); + do { + FD_ZERO(&readfds); + FD_SET(STDIN_FILENO, &readfds); + select(STDIN_FILENO + 1, &readfds, nullptr, nullptr, nullptr); + } while(!FD_ISSET(STDIN_FILENO, &readfds) || getchar() != 0x03); Fuzzer::StaticInterruptCallback(); } -void CrashHandler(zx::port *Port) { - std::unique_ptr<zx::port> ExceptionPort(Port); - zx_port_packet_t Packet; - ExceptionPort->wait(ZX_TIME_INFINITE, &Packet, 0); - // Unbind as soon as possible so we don't receive exceptions from this thread. - if (zx_task_bind_exception_port(ZX_HANDLE_INVALID, ZX_HANDLE_INVALID, - kFuzzingCrash, 0) != ZX_OK) { - // Shouldn't happen; if it does the safest option is to just exit. - Printf("libFuzzer: unable to unbind exception port; aborting!\n"); - exit(1); - } - if (Packet.key != kFuzzingCrash) { - Printf("libFuzzer: invalid crash key: %" PRIx64 "; aborting!\n", - Packet.key); - exit(1); - } - // CrashCallback should not return from this call +// For the crash handler, we need to call Fuzzer::StaticCrashSignalCallback +// without POSIX signal handlers. To achieve this, we use an assembly function +// to add the necessary CFI unwinding information and a C function to bridge +// from that back into C++. + +// FIXME: This works as a short-term solution, but this code really shouldn't be +// architecture dependent. A better long term solution is to implement remote +// unwinding and expose the necessary APIs through sanitizer_common and/or ASAN +// to allow the exception handling thread to gather the crash state directly. +// +// Alternatively, Fuchsia may in future actually implement basic signal +// handling for the machine trap signals. +#if defined(__x86_64__) +#define FOREACH_REGISTER(OP_REG, OP_NUM) \ + OP_REG(rax) \ + OP_REG(rbx) \ + OP_REG(rcx) \ + OP_REG(rdx) \ + OP_REG(rsi) \ + OP_REG(rdi) \ + OP_REG(rbp) \ + OP_REG(rsp) \ + OP_REG(r8) \ + OP_REG(r9) \ + OP_REG(r10) \ + OP_REG(r11) \ + OP_REG(r12) \ + OP_REG(r13) \ + OP_REG(r14) \ + OP_REG(r15) \ + OP_REG(rip) + +#elif defined(__aarch64__) +#define FOREACH_REGISTER(OP_REG, OP_NUM) \ + OP_NUM(0) \ + OP_NUM(1) \ + OP_NUM(2) \ + OP_NUM(3) \ + OP_NUM(4) \ + OP_NUM(5) \ + OP_NUM(6) \ + OP_NUM(7) \ + OP_NUM(8) \ + OP_NUM(9) \ + OP_NUM(10) \ + OP_NUM(11) \ + OP_NUM(12) \ + OP_NUM(13) \ + OP_NUM(14) \ + OP_NUM(15) \ + OP_NUM(16) \ + OP_NUM(17) \ + OP_NUM(18) \ + OP_NUM(19) \ + OP_NUM(20) \ + OP_NUM(21) \ + OP_NUM(22) \ + OP_NUM(23) \ + OP_NUM(24) \ + OP_NUM(25) \ + OP_NUM(26) \ + OP_NUM(27) \ + OP_NUM(28) \ + OP_NUM(29) \ + OP_NUM(30) \ + OP_REG(sp) + +#else +#error "Unsupported architecture for fuzzing on Fuchsia" +#endif + +// Produces a CFI directive for the named or numbered register. +#define CFI_OFFSET_REG(reg) ".cfi_offset " #reg ", %c[" #reg "]\n" +#define CFI_OFFSET_NUM(num) CFI_OFFSET_REG(r##num) + +// Produces an assembler input operand for the named or numbered register. +#define ASM_OPERAND_REG(reg) \ + [reg] "i"(offsetof(zx_thread_state_general_regs_t, reg)), +#define ASM_OPERAND_NUM(num) \ + [r##num] "i"(offsetof(zx_thread_state_general_regs_t, r[num])), + +// Trampoline to bridge from the assembly below to the static C++ crash +// callback. +__attribute__((noreturn)) +static void StaticCrashHandler() { Fuzzer::StaticCrashSignalCallback(); + for (;;) { + _Exit(1); + } +} + +// Creates the trampoline with the necessary CFI information to unwind through +// to the crashing call stack. The attribute is necessary because the function +// is never called; it's just a container around the assembly to allow it to +// use operands for compile-time computed constants. +__attribute__((used)) +void MakeTrampoline() { + __asm__(".cfi_endproc\n" + ".pushsection .text.CrashTrampolineAsm\n" + ".type CrashTrampolineAsm,STT_FUNC\n" +"CrashTrampolineAsm:\n" + ".cfi_startproc simple\n" + ".cfi_signal_frame\n" +#if defined(__x86_64__) + ".cfi_return_column rip\n" + ".cfi_def_cfa rsp, 0\n" + FOREACH_REGISTER(CFI_OFFSET_REG, CFI_OFFSET_NUM) + "call %c[StaticCrashHandler]\n" + "ud2\n" +#elif defined(__aarch64__) + ".cfi_return_column 33\n" + ".cfi_def_cfa sp, 0\n" + ".cfi_offset 33, %c[pc]\n" + FOREACH_REGISTER(CFI_OFFSET_REG, CFI_OFFSET_NUM) + "bl %[StaticCrashHandler]\n" +#else +#error "Unsupported architecture for fuzzing on Fuchsia" +#endif + ".cfi_endproc\n" + ".size CrashTrampolineAsm, . - CrashTrampolineAsm\n" + ".popsection\n" + ".cfi_startproc\n" + : // No outputs + : FOREACH_REGISTER(ASM_OPERAND_REG, ASM_OPERAND_NUM) +#if defined(__aarch64__) + ASM_OPERAND_REG(pc) +#endif + [StaticCrashHandler] "i" (StaticCrashHandler)); +} + +void CrashHandler(zx_handle_t *Event) { + // This structure is used to ensure we close handles to objects we create in + // this handler. + struct ScopedHandle { + ~ScopedHandle() { _zx_handle_close(Handle); } + zx_handle_t Handle = ZX_HANDLE_INVALID; + }; + + // Create and bind the exception port. We need to claim to be a "debugger" so + // the kernel will allow us to modify and resume dying threads (see below). + // Once the port is set, we can signal the main thread to continue and wait + // for the exception to arrive. + ScopedHandle Port; + ExitOnErr(_zx_port_create(0, &Port.Handle), "_zx_port_create"); + zx_handle_t Self = _zx_process_self(); + + ExitOnErr(_zx_task_bind_exception_port(Self, Port.Handle, kFuzzingCrash, + ZX_EXCEPTION_PORT_DEBUGGER), + "_zx_task_bind_exception_port"); + + ExitOnErr(_zx_object_signal(*Event, 0, ZX_USER_SIGNAL_0), + "_zx_object_signal"); + + zx_port_packet_t Packet; + ExitOnErr(_zx_port_wait(Port.Handle, ZX_TIME_INFINITE_OLD, &Packet), + "_zx_port_wait"); + + // At this point, we want to get the state of the crashing thread, but + // libFuzzer and the sanitizers assume this will happen from that same thread + // via a POSIX signal handler. "Resurrecting" the thread in the middle of the + // appropriate callback is as simple as forcibly setting the instruction + // pointer/program counter, provided we NEVER EVER return from that function + // (since otherwise our stack will not be valid). + ScopedHandle Thread; + ExitOnErr(_zx_object_get_child(Self, Packet.exception.tid, + ZX_RIGHT_SAME_RIGHTS, &Thread.Handle), + "_zx_object_get_child"); + + zx_thread_state_general_regs_t GeneralRegisters; + ExitOnErr(_zx_thread_read_state(Thread.Handle, ZX_THREAD_STATE_GENERAL_REGS, + &GeneralRegisters, sizeof(GeneralRegisters)), + "_zx_thread_read_state"); + + // To unwind properly, we need to push the crashing thread's register state + // onto the stack and jump into a trampoline with CFI instructions on how + // to restore it. +#if defined(__x86_64__) + uintptr_t StackPtr = + (GeneralRegisters.rsp - (128 + sizeof(GeneralRegisters))) & + -(uintptr_t)16; + __unsanitized_memcpy(reinterpret_cast<void *>(StackPtr), &GeneralRegisters, + sizeof(GeneralRegisters)); + GeneralRegisters.rsp = StackPtr; + GeneralRegisters.rip = reinterpret_cast<zx_vaddr_t>(CrashTrampolineAsm); + +#elif defined(__aarch64__) + uintptr_t StackPtr = + (GeneralRegisters.sp - sizeof(GeneralRegisters)) & -(uintptr_t)16; + __unsanitized_memcpy(reinterpret_cast<void *>(StackPtr), &GeneralRegisters, + sizeof(GeneralRegisters)); + GeneralRegisters.sp = StackPtr; + GeneralRegisters.pc = reinterpret_cast<zx_vaddr_t>(CrashTrampolineAsm); + +#else +#error "Unsupported architecture for fuzzing on Fuchsia" +#endif + + // Now force the crashing thread's state. + ExitOnErr(_zx_thread_write_state(Thread.Handle, ZX_THREAD_STATE_GENERAL_REGS, + &GeneralRegisters, sizeof(GeneralRegisters)), + "_zx_thread_write_state"); + + ExitOnErr(_zx_task_resume_from_exception(Thread.Handle, Port.Handle, 0), + "_zx_task_resume_from_exception"); } } // namespace // Platform specific functions. void SetSignalHandler(const FuzzingOptions &Options) { - zx_status_t rc; - // Set up alarm handler if needed. if (Options.UnitTimeoutSec > 0) { std::thread T(AlarmHandler, Options.UnitTimeoutSec / 2 + 1); @@ -96,37 +310,30 @@ void SetSignalHandler(const FuzzingOptions &Options) { !Options.HandleFpe && !Options.HandleAbrt) return; - // Create an exception port - zx::port *ExceptionPort = new zx::port(); - if ((rc = zx::port::create(0, ExceptionPort)) != ZX_OK) { - Printf("libFuzzer: zx_port_create failed: %s\n", zx_status_get_string(rc)); - exit(1); - } + // Set up the crash handler and wait until it is ready before proceeding. + zx_handle_t Event; + ExitOnErr(_zx_event_create(0, &Event), "_zx_event_create"); - // Bind the port to receive exceptions from our process - if ((rc = zx_task_bind_exception_port(zx_process_self(), ExceptionPort->get(), - kFuzzingCrash, 0)) != ZX_OK) { - Printf("libFuzzer: unable to bind exception port: %s\n", - zx_status_get_string(rc)); - exit(1); - } + std::thread T(CrashHandler, &Event); + zx_status_t Status = _zx_object_wait_one(Event, ZX_USER_SIGNAL_0, + ZX_TIME_INFINITE_OLD, nullptr); + _zx_handle_close(Event); + ExitOnErr(Status, "_zx_object_wait_one"); - // Set up the crash handler. - std::thread T(CrashHandler, ExceptionPort); T.detach(); } void SleepSeconds(int Seconds) { - zx::nanosleep(zx::deadline_after(ZX_SEC(Seconds))); + _zx_nanosleep(_zx_deadline_after(ZX_SEC(Seconds))); } unsigned long GetPid() { zx_status_t rc; zx_info_handle_basic_t Info; - if ((rc = zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &Info, - sizeof(Info), NULL, NULL)) != ZX_OK) { + if ((rc = _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &Info, + sizeof(Info), NULL, NULL)) != ZX_OK) { Printf("libFuzzer: unable to get info about self: %s\n", - zx_status_get_string(rc)); + _zx_status_get_string(rc)); exit(1); } return Info.koid; @@ -135,15 +342,30 @@ unsigned long GetPid() { size_t GetPeakRSSMb() { zx_status_t rc; zx_info_task_stats_t Info; - if ((rc = zx_object_get_info(zx_process_self(), ZX_INFO_TASK_STATS, &Info, - sizeof(Info), NULL, NULL)) != ZX_OK) { + if ((rc = _zx_object_get_info(_zx_process_self(), ZX_INFO_TASK_STATS, &Info, + sizeof(Info), NULL, NULL)) != ZX_OK) { Printf("libFuzzer: unable to get info about self: %s\n", - zx_status_get_string(rc)); + _zx_status_get_string(rc)); exit(1); } return (Info.mem_private_bytes + Info.mem_shared_bytes) >> 20; } +template <typename Fn> +class RunOnDestruction { + public: + explicit RunOnDestruction(Fn fn) : fn_(fn) {} + ~RunOnDestruction() { fn_(); } + + private: + Fn fn_; +}; + +template <typename Fn> +RunOnDestruction<Fn> at_scope_exit(Fn fn) { + return RunOnDestruction<Fn>(fn); +} + int ExecuteCommand(const Command &Cmd) { zx_status_t rc; @@ -151,30 +373,24 @@ int ExecuteCommand(const Command &Cmd) { auto Args = Cmd.getArguments(); size_t Argc = Args.size(); assert(Argc != 0); - std::unique_ptr<const char *[]> Argv(new const char *[Argc]); + std::unique_ptr<const char *[]> Argv(new const char *[Argc + 1]); for (size_t i = 0; i < Argc; ++i) Argv[i] = Args[i].c_str(); - - // Create the basic launchpad. Clone everything except stdio. - launchpad_t *lp; - launchpad_create(ZX_HANDLE_INVALID, Argv[0], &lp); - launchpad_load_from_file(lp, Argv[0]); - launchpad_set_args(lp, Argc, Argv.get()); - launchpad_clone(lp, LP_CLONE_ALL & (~LP_CLONE_FDIO_STDIO)); + Argv[Argc] = nullptr; // Determine stdout int FdOut = STDOUT_FILENO; - fbl::unique_fd OutputFile; + if (Cmd.hasOutputFile()) { auto Filename = Cmd.getOutputFile(); - OutputFile.reset(open(Filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0)); - if (!OutputFile) { + FdOut = open(Filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0); + if (FdOut == -1) { Printf("libFuzzer: failed to open %s: %s\n", Filename.c_str(), strerror(errno)); return ZX_ERR_IO; } - FdOut = OutputFile.get(); } + auto CloseFdOut = at_scope_exit([&]() { close(FdOut); } ); // Determine stderr int FdErr = STDERR_FILENO; @@ -182,36 +398,59 @@ int ExecuteCommand(const Command &Cmd) { FdErr = FdOut; // Clone the file descriptors into the new process - if ((rc = launchpad_clone_fd(lp, STDIN_FILENO, STDIN_FILENO)) != ZX_OK || - (rc = launchpad_clone_fd(lp, FdOut, STDOUT_FILENO)) != ZX_OK || - (rc = launchpad_clone_fd(lp, FdErr, STDERR_FILENO)) != ZX_OK) { - Printf("libFuzzer: failed to clone FDIO: %s\n", zx_status_get_string(rc)); - return rc; - } - - // Start the process + fdio_spawn_action_t SpawnAction[] = { + { + .action = FDIO_SPAWN_ACTION_CLONE_FD, + .fd = + { + .local_fd = STDIN_FILENO, + .target_fd = STDIN_FILENO, + }, + }, + { + .action = FDIO_SPAWN_ACTION_CLONE_FD, + .fd = + { + .local_fd = FdOut, + .target_fd = STDOUT_FILENO, + }, + }, + { + .action = FDIO_SPAWN_ACTION_CLONE_FD, + .fd = + { + .local_fd = FdErr, + .target_fd = STDERR_FILENO, + }, + }, + }; + + // Start the process. + char ErrorMsg[FDIO_SPAWN_ERR_MSG_MAX_LENGTH]; zx_handle_t ProcessHandle = ZX_HANDLE_INVALID; - const char *ErrorMsg = nullptr; - if ((rc = launchpad_go(lp, &ProcessHandle, &ErrorMsg)) != ZX_OK) { + rc = fdio_spawn_etc( + ZX_HANDLE_INVALID, FDIO_SPAWN_CLONE_ALL & (~FDIO_SPAWN_CLONE_STDIO), + Argv[0], Argv.get(), nullptr, 3, SpawnAction, &ProcessHandle, ErrorMsg); + if (rc != ZX_OK) { Printf("libFuzzer: failed to launch '%s': %s, %s\n", Argv[0], ErrorMsg, - zx_status_get_string(rc)); + _zx_status_get_string(rc)); return rc; } - zx::process Process(ProcessHandle); + auto CloseHandle = at_scope_exit([&]() { _zx_handle_close(ProcessHandle); }); // Now join the process and return the exit status. - if ((rc = Process.wait_one(ZX_PROCESS_TERMINATED, ZX_TIME_INFINITE, - nullptr)) != ZX_OK) { + if ((rc = _zx_object_wait_one(ProcessHandle, ZX_PROCESS_TERMINATED, + ZX_TIME_INFINITE_OLD, nullptr)) != ZX_OK) { Printf("libFuzzer: failed to join '%s': %s\n", Argv[0], - zx_status_get_string(rc)); + _zx_status_get_string(rc)); return rc; } zx_info_process_t Info; - if ((rc = Process.get_info(ZX_INFO_PROCESS, &Info, sizeof(Info), nullptr, - nullptr)) != ZX_OK) { + if ((rc = _zx_object_get_info(ProcessHandle, ZX_INFO_PROCESS, &Info, + sizeof(Info), nullptr, nullptr)) != ZX_OK) { Printf("libFuzzer: unable to get return code from '%s': %s\n", Argv[0], - zx_status_get_string(rc)); + _zx_status_get_string(rc)); return rc; } diff --git a/lib/fuzzer/FuzzerUtilLinux.cpp b/lib/fuzzer/FuzzerUtilLinux.cpp index c7cf2c0a778b..c103fd230b0c 100644 --- a/lib/fuzzer/FuzzerUtilLinux.cpp +++ b/lib/fuzzer/FuzzerUtilLinux.cpp @@ -9,7 +9,8 @@ // Misc utils for Linux. //===----------------------------------------------------------------------===// #include "FuzzerDefs.h" -#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD +#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FREEBSD || \ + LIBFUZZER_OPENBSD #include "FuzzerCommand.h" #include <stdlib.h> @@ -23,4 +24,4 @@ int ExecuteCommand(const Command &Cmd) { } // namespace fuzzer -#endif // LIBFUZZER_LINUX || LIBFUZZER_NETBSD +#endif diff --git a/lib/fuzzer/FuzzerUtilPosix.cpp b/lib/fuzzer/FuzzerUtilPosix.cpp index 934b7aa98ff1..bc64d3293702 100644 --- a/lib/fuzzer/FuzzerUtilPosix.cpp +++ b/lib/fuzzer/FuzzerUtilPosix.cpp @@ -118,7 +118,8 @@ size_t GetPeakRSSMb() { struct rusage usage; if (getrusage(RUSAGE_SELF, &usage)) return 0; - if (LIBFUZZER_LINUX) { + if (LIBFUZZER_LINUX || LIBFUZZER_FREEBSD || LIBFUZZER_NETBSD || + LIBFUZZER_OPENBSD) { // ru_maxrss is in KiB return usage.ru_maxrss >> 10; } else if (LIBFUZZER_APPLE) { diff --git a/lib/fuzzer/afl/afl_driver.cpp b/lib/fuzzer/afl/afl_driver.cpp index bbe5be795ed4..fa494c03bde0 100644 --- a/lib/fuzzer/afl/afl_driver.cpp +++ b/lib/fuzzer/afl/afl_driver.cpp @@ -69,20 +69,38 @@ statistics from the file. If that fails then the process will quit. #define LIBFUZZER_LINUX 1 #define LIBFUZZER_APPLE 0 #define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 0 #elif __APPLE__ #define LIBFUZZER_LINUX 0 #define LIBFUZZER_APPLE 1 #define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 0 #elif __NetBSD__ #define LIBFUZZER_LINUX 0 #define LIBFUZZER_APPLE 0 #define LIBFUZZER_NETBSD 1 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 0 +#elif __FreeBSD__ +#define LIBFUZZER_LINUX 0 +#define LIBFUZZER_APPLE 0 +#define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 1 +#define LIBFUZZER_OPENBSD 0 +#elif __OpenBSD__ +#define LIBFUZZER_LINUX 0 +#define LIBFUZZER_APPLE 0 +#define LIBFUZZER_NETBSD 0 +#define LIBFUZZER_FREEBSD 0 +#define LIBFUZZER_OPENBSD 1 #else #error "Support for your platform has not been implemented" #endif // Used to avoid repeating error checking boilerplate. If cond is false, a -// fatal error has occured in the program. In this event print error_message +// fatal error has occurred in the program. In this event print error_message // to stderr and abort(). Otherwise do nothing. Note that setting // AFL_DRIVER_STDERR_DUPLICATE_FILENAME may cause error_message to be appended // to the file as well, if the error occurs after the duplication is performed. @@ -120,12 +138,24 @@ static const int kNumExtraStats = 2; static const char *kExtraStatsFormatString = "peak_rss_mb : %u\n" "slowest_unit_time_sec : %u\n"; +// Experimental feature to use afl_driver without AFL's deferred mode. +// Needs to run before __afl_auto_init. +__attribute__((constructor(0))) void __decide_deferred_forkserver(void) { + if (getenv("AFL_DRIVER_DONT_DEFER")) { + if (unsetenv("__AFL_DEFER_FORKSRV")) { + perror("Failed to unset __AFL_DEFER_FORKSRV"); + abort(); + } + } +} + // Copied from FuzzerUtil.cpp. size_t GetPeakRSSMb() { struct rusage usage; if (getrusage(RUSAGE_SELF, &usage)) return 0; - if (LIBFUZZER_LINUX || LIBFUZZER_NETBSD) { + if (LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FREEBSD || + LIBFUZZER_OPENBSD) { // ru_maxrss is in KiB return usage.ru_maxrss >> 10; } else if (LIBFUZZER_APPLE) { @@ -270,7 +300,7 @@ int ExecuteFilesOnyByOne(int argc, char **argv) { assert(in); LLVMFuzzerTestOneInput(reinterpret_cast<const uint8_t *>(bytes.data()), bytes.size()); - std::cout << "Execution successfull" << std::endl; + std::cout << "Execution successful" << std::endl; } return 0; } @@ -296,7 +326,8 @@ int main(int argc, char **argv) { maybe_duplicate_stderr(); maybe_initialize_extra_stats(); - __afl_manual_init(); + if (!getenv("AFL_DRIVER_DONT_DEFER")) + __afl_manual_init(); int N = 1000; if (argc == 2 && argv[1][0] == '-') diff --git a/lib/fuzzer/build.sh b/lib/fuzzer/build.sh index 4556af5daf7d..504e54e3a819 100755 --- a/lib/fuzzer/build.sh +++ b/lib/fuzzer/build.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/sh LIBFUZZER_SRC_DIR=$(dirname $0) CXX="${CXX:-clang}" for f in $LIBFUZZER_SRC_DIR/*.cpp; do diff --git a/lib/fuzzer/dataflow/DataFlow.cpp b/lib/fuzzer/dataflow/DataFlow.cpp new file mode 100644 index 000000000000..a79c796ac456 --- /dev/null +++ b/lib/fuzzer/dataflow/DataFlow.cpp @@ -0,0 +1,217 @@ +/*===- DataFlow.cpp - a standalone DataFlow tracer -------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// An experimental data-flow tracer for fuzz targets. +// It is based on DFSan and SanitizerCoverage. +// https://clang.llvm.org/docs/DataFlowSanitizer.html +// https://clang.llvm.org/docs/SanitizerCoverage.html#tracing-data-flow +// +// It executes the fuzz target on the given input while monitoring the +// data flow for every instrumented comparison instruction. +// +// The output shows which functions depend on which bytes of the input. +// +// Build: +// 1. Compile this file with -fsanitize=dataflow +// 2. Build the fuzz target with -g -fsanitize=dataflow +// -fsanitize-coverage=trace-pc-guard,pc-table,func,trace-cmp +// 3. Link those together with -fsanitize=dataflow +// +// -fsanitize-coverage=trace-cmp inserts callbacks around every comparison +// instruction, DFSan modifies the calls to pass the data flow labels. +// The callbacks update the data flow label for the current function. +// See e.g. __dfsw___sanitizer_cov_trace_cmp1 below. +// +// -fsanitize-coverage=trace-pc-guard,pc-table,func instruments function +// entries so that the comparison callback knows that current function. +// +// +// Run: +// # Collect data flow for INPUT_FILE, write to OUTPUT_FILE (default: stdout) +// ./a.out INPUT_FILE [OUTPUT_FILE] +// +// # Print all instrumented functions. llvm-symbolizer must be present in PATH +// ./a.out +// +// Example output: +// =============== +// F0 11111111111111 +// F1 10000000000000 +// =============== +// "FN xxxxxxxxxx": tells what bytes of the input does the function N depend on. +// The byte string is LEN+1 bytes. The last byte is set if the function +// depends on the input length. +//===----------------------------------------------------------------------===*/ + +#include <assert.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <string.h> + +#include <execinfo.h> // backtrace_symbols_fd + +#include <sanitizer/dfsan_interface.h> + +extern "C" { +extern int LLVMFuzzerTestOneInput(const unsigned char *Data, size_t Size); +__attribute__((weak)) extern int LLVMFuzzerInitialize(int *argc, char ***argv); +} // extern "C" + +static size_t InputLen; +static size_t NumFuncs; +static const uintptr_t *FuncsBeg; +static __thread size_t CurrentFunc; +static dfsan_label *FuncLabels; // Array of NumFuncs elements. +static char *PrintableStringForLabel; // InputLen + 2 bytes. +static bool LabelSeen[1 << 8 * sizeof(dfsan_label)]; + +// Prints all instrumented functions. +static int PrintFunctions() { + // We don't have the symbolizer integrated with dfsan yet. + // So use backtrace_symbols_fd and pipe it through llvm-symbolizer. + // TODO(kcc): this is pretty ugly and may break in lots of ways. + // We'll need to make a proper in-process symbolizer work with DFSan. + FILE *Pipe = popen("sed 's/(+/ /g; s/).*//g' " + "| llvm-symbolizer " + "| grep 'dfs\\$' " + "| sed 's/dfs\\$//g'", "w"); + for (size_t I = 0; I < NumFuncs; I++) { + uintptr_t PC = FuncsBeg[I * 2]; + void *const Buf[1] = {(void*)PC}; + backtrace_symbols_fd(Buf, 1, fileno(Pipe)); + } + pclose(Pipe); + return 0; +} + +extern "C" +void SetBytesForLabel(dfsan_label L, char *Bytes) { + if (LabelSeen[L]) + return; + LabelSeen[L] = true; + assert(L); + if (L <= InputLen + 1) { + Bytes[L - 1] = '1'; + } else { + auto *DLI = dfsan_get_label_info(L); + SetBytesForLabel(DLI->l1, Bytes); + SetBytesForLabel(DLI->l2, Bytes); + } +} + +static char *GetPrintableStringForLabel(dfsan_label L) { + memset(PrintableStringForLabel, '0', InputLen + 1); + PrintableStringForLabel[InputLen + 1] = 0; + memset(LabelSeen, 0, sizeof(LabelSeen)); + SetBytesForLabel(L, PrintableStringForLabel); + return PrintableStringForLabel; +} + +static void PrintDataFlow(FILE *Out) { + for (size_t I = 0; I < NumFuncs; I++) + if (FuncLabels[I]) + fprintf(Out, "F%zd %s\n", I, GetPrintableStringForLabel(FuncLabels[I])); +} + +int main(int argc, char **argv) { + if (LLVMFuzzerInitialize) + LLVMFuzzerInitialize(&argc, &argv); + if (argc == 1) + return PrintFunctions(); + assert(argc == 4 || argc == 5); + size_t Beg = atoi(argv[1]); + size_t End = atoi(argv[2]); + assert(Beg < End); + + const char *Input = argv[3]; + fprintf(stderr, "INFO: reading '%s'\n", Input); + FILE *In = fopen(Input, "r"); + assert(In); + fseek(In, 0, SEEK_END); + InputLen = ftell(In); + fseek(In, 0, SEEK_SET); + unsigned char *Buf = (unsigned char*)malloc(InputLen); + size_t NumBytesRead = fread(Buf, 1, InputLen, In); + assert(NumBytesRead == InputLen); + PrintableStringForLabel = (char*)malloc(InputLen + 2); + fclose(In); + + fprintf(stderr, "INFO: running '%s'\n", Input); + for (size_t I = 1; I <= InputLen; I++) { + dfsan_label L = dfsan_create_label("", nullptr); + assert(L == I); + size_t Idx = I - 1; + if (Idx >= Beg && Idx < End) + dfsan_set_label(L, Buf + Idx, 1); + } + dfsan_label SizeL = dfsan_create_label("", nullptr); + assert(SizeL == InputLen + 1); + dfsan_set_label(SizeL, &InputLen, sizeof(InputLen)); + + LLVMFuzzerTestOneInput(Buf, InputLen); + free(Buf); + + bool OutIsStdout = argc == 4; + fprintf(stderr, "INFO: writing dataflow to %s\n", + OutIsStdout ? "<stdout>" : argv[4]); + FILE *Out = OutIsStdout ? stdout : fopen(argv[4], "w"); + PrintDataFlow(Out); + if (!OutIsStdout) fclose(Out); +} + +extern "C" { + +void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, + uint32_t *stop) { + assert(NumFuncs == 0 && "This tool does not support DSOs"); + assert(start < stop && "The code is not instrumented for coverage"); + if (start == stop || *start) return; // Initialize only once. + for (uint32_t *x = start; x < stop; x++) + *x = ++NumFuncs; // The first index is 1. + FuncLabels = (dfsan_label*)calloc(NumFuncs, sizeof(dfsan_label)); + fprintf(stderr, "INFO: %zd instrumented function(s) observed\n", NumFuncs); +} + +void __sanitizer_cov_pcs_init(const uintptr_t *pcs_beg, + const uintptr_t *pcs_end) { + assert(NumFuncs == (pcs_end - pcs_beg) / 2); + FuncsBeg = pcs_beg; +} + +void __sanitizer_cov_trace_pc_indir(uint64_t x){} // unused. + +void __sanitizer_cov_trace_pc_guard(uint32_t *guard){ + uint32_t FuncNum = *guard - 1; // Guards start from 1. + assert(FuncNum < NumFuncs); + CurrentFunc = FuncNum; +} + +void __dfsw___sanitizer_cov_trace_switch(uint64_t Val, uint64_t *Cases, + dfsan_label L1, dfsan_label UnusedL) { + assert(CurrentFunc < NumFuncs); + FuncLabels[CurrentFunc] = dfsan_union(FuncLabels[CurrentFunc], L1); +} + +#define HOOK(Name, Type) \ + void Name(Type Arg1, Type Arg2, dfsan_label L1, dfsan_label L2) { \ + assert(CurrentFunc < NumFuncs); \ + FuncLabels[CurrentFunc] = \ + dfsan_union(FuncLabels[CurrentFunc], dfsan_union(L1, L2)); \ + } + +HOOK(__dfsw___sanitizer_cov_trace_const_cmp1, uint8_t) +HOOK(__dfsw___sanitizer_cov_trace_const_cmp2, uint16_t) +HOOK(__dfsw___sanitizer_cov_trace_const_cmp4, uint32_t) +HOOK(__dfsw___sanitizer_cov_trace_const_cmp8, uint64_t) +HOOK(__dfsw___sanitizer_cov_trace_cmp1, uint8_t) +HOOK(__dfsw___sanitizer_cov_trace_cmp2, uint16_t) +HOOK(__dfsw___sanitizer_cov_trace_cmp4, uint32_t) +HOOK(__dfsw___sanitizer_cov_trace_cmp8, uint64_t) + +} // extern "C" diff --git a/lib/fuzzer/scripts/collect_data_flow.py b/lib/fuzzer/scripts/collect_data_flow.py new file mode 100755 index 000000000000..3edff66bb9d1 --- /dev/null +++ b/lib/fuzzer/scripts/collect_data_flow.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python +#===- lib/fuzzer/scripts/collect_data_flow.py ------------------------------===# +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +#===------------------------------------------------------------------------===# +# Runs the data-flow tracer several times on the same input in order to collect +# the complete trace for all input bytes (running it on all bytes at once +# may fail if DFSan runs out of labels). +# Usage: +# +# # Collect dataflow for one input, store it in OUTPUT (default is stdout) +# collect_data_flow.py BINARY INPUT [OUTPUT] +# +# # Collect dataflow for all inputs in CORPUS_DIR, store them in OUTPUT_DIR +# collect_data_flow.py BINARY CORPUS_DIR OUTPUT_DIR +#===------------------------------------------------------------------------===# +import atexit +import hashlib +import sys +import os +import subprocess +import tempfile +import shutil + +tmpdir = "" + +def cleanup(d): + print("removing: %s" % d) + shutil.rmtree(d) + +def collect_dataflow_for_corpus(self, exe, corpus_dir, output_dir): + print("Collecting dataflow for corpus: %s output_dir: %s" % (corpus_dir, + output_dir)) + assert not os.path.exists(output_dir) + os.mkdir(output_dir) + for root, dirs, files in os.walk(corpus_dir): + for f in files: + path = os.path.join(root, f) + sha1 = hashlib.sha1(open(path).read()).hexdigest() + output = os.path.join(output_dir, sha1) + subprocess.call([self, exe, path, output]) + functions_txt = open(os.path.join(output_dir, "functions.txt"), "w") + subprocess.call([exe], stdout=functions_txt) + + +def main(argv): + exe = argv[1] + inp = argv[2] + if os.path.isdir(inp): + return collect_dataflow_for_corpus(argv[0], exe, inp, argv[3]) + size = os.path.getsize(inp) + q = [[0, size]] + tmpdir = tempfile.mkdtemp(prefix="libfuzzer-tmp-") + atexit.register(cleanup, tmpdir) + print "tmpdir: ", tmpdir + outputs = [] + while len(q): + r = q.pop() + print "******* Trying: ", r + tmpfile = os.path.join(tmpdir, str(r[0]) + "-" + str(r[1])) + ret = subprocess.call([exe, str(r[0]), str(r[1]), inp, tmpfile]) + if ret and r[1] - r[0] >= 2: + q.append([r[0], (r[1] + r[0]) / 2]) + q.append([(r[1] + r[0]) / 2, r[1]]) + else: + outputs.append(tmpfile) + print "******* Success: ", r + f = sys.stdout + if len(argv) >= 4: + f = open(argv[3], "w") + merge = os.path.join(os.path.dirname(argv[0]), "merge_data_flow.py") + subprocess.call([merge] + outputs, stdout=f) + +if __name__ == '__main__': + main(sys.argv) diff --git a/lib/fuzzer/scripts/merge_data_flow.py b/lib/fuzzer/scripts/merge_data_flow.py new file mode 100755 index 000000000000..d2f5081e7b8c --- /dev/null +++ b/lib/fuzzer/scripts/merge_data_flow.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python +#===- lib/fuzzer/scripts/merge_data_flow.py ------------------------------===# +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +#===------------------------------------------------------------------------===# +# Merge several data flow traces into one. +# Usage: +# merge_data_flow.py trace1 trace2 ... > result +#===------------------------------------------------------------------------===# +import sys +import fileinput +from array import array + +def Merge(a, b): + res = array('b') + for i in range(0, len(a)): + res.append(ord('1' if a[i] == '1' or b[i] == '1' else '0')) + return res.tostring() + +def main(argv): + D = {} + for line in fileinput.input(): + [F,BV] = line.strip().split(' ') + if F in D: + D[F] = Merge(D[F], BV) + else: + D[F] = BV; + for F in D.keys(): + print("%s %s" % (F, D[F])) + +if __name__ == '__main__': + main(sys.argv) diff --git a/lib/fuzzer/scripts/unbalanced_allocs.py b/lib/fuzzer/scripts/unbalanced_allocs.py index a4ce187679d7..74478ad55af0 100755 --- a/lib/fuzzer/scripts/unbalanced_allocs.py +++ b/lib/fuzzer/scripts/unbalanced_allocs.py @@ -24,9 +24,9 @@ def PrintStack(line, stack): global _skip if _skip > 0: return - print 'Unbalanced ' + line.rstrip(); + print('Unbalanced ' + line.rstrip()); for l in stack: - print l.rstrip() + print(l.rstrip()) def ProcessStack(line, f): stack = [] @@ -63,15 +63,15 @@ def ProcessRun(line, f): return ProcessMalloc(line, f, {}) allocs = {} - print line.rstrip() + print(line.rstrip()) line = f.readline() while line: if line.startswith('MallocFreeTracer: STOP'): global _skip _skip = _skip - 1 - for _, (l, s) in allocs.iteritems(): + for _, (l, s) in allocs.items(): PrintStack(l, s) - print line.rstrip() + print(line.rstrip()) return f.readline() line = ProcessMalloc(line, f, allocs) return line diff --git a/lib/fuzzer/tests/CMakeLists.txt b/lib/fuzzer/tests/CMakeLists.txt index dac8773597e8..ed5807168301 100644 --- a/lib/fuzzer/tests/CMakeLists.txt +++ b/lib/fuzzer/tests/CMakeLists.txt @@ -3,22 +3,32 @@ set(LIBFUZZER_UNITTEST_CFLAGS ${COMPILER_RT_GTEST_CFLAGS} -I${COMPILER_RT_SOURCE_DIR}/lib/fuzzer -fno-rtti - -Werror -O2) +if (APPLE) + set(FUZZER_SUPPORTED_OS osx) +endif() + add_custom_target(FuzzerUnitTests) set_target_properties(FuzzerUnitTests PROPERTIES FOLDER "Compiler-RT Tests") set(LIBFUZZER_UNITTEST_LINK_FLAGS ${COMPILER_RT_UNITTEST_LINK_FLAGS}) list(APPEND LIBFUZZER_UNITTEST_LINK_FLAGS --driver-mode=g++) -if(APPLE) - list(APPEND LIBFUZZER_UNITTEST_LINK_FLAGS -lc++) +if(APPLE OR CMAKE_SYSTEM_NAME STREQUAL "FreeBSD") + list(APPEND LIBFUZZER_UNITTEST_LINK_FLAGS -lc++ -lpthread) else() list(APPEND LIBFUZZER_UNITTEST_LINK_FLAGS -lstdc++ -lpthread) endif() -foreach(arch ${FUZZER_SUPPORTED_ARCH}) +if("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux" AND COMPILER_RT_LIBCXX_PATH) + list(APPEND LIBFUZZER_UNITTEST_CFLAGS -nostdinc++ -D_LIBCPP_ABI_VERSION=Fuzzer) +endif() + +if(COMPILER_RT_DEFAULT_TARGET_ARCH IN_LIST FUZZER_SUPPORTED_ARCH) + # libFuzzer unit tests are only run on the host machine. + set(arch ${COMPILER_RT_DEFAULT_TARGET_ARCH}) + set(LIBFUZZER_TEST_RUNTIME RTFuzzerTest.${arch}) if(APPLE) set(LIBFUZZER_TEST_RUNTIME_OBJECTS @@ -33,14 +43,20 @@ foreach(arch ${FUZZER_SUPPORTED_ARCH}) ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} FOLDER "Compiler-RT Runtime tests") + if("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux" AND COMPILER_RT_LIBCXX_PATH) + set(LIBFUZZER_TEST_RUNTIME_DEPS libcxx_fuzzer_${arch}-build) + set(LIBFUZZER_TEST_RUNTIME_CFLAGS -isystem ${COMPILER_RT_LIBCXX_PATH}/include) + set(LIBFUZZER_TEST_RUNTIME_LINK_FLAGS ${LIBCXX_${arch}_PREFIX}/lib/libc++.a) + endif() + set(FuzzerTestObjects) generate_compiler_rt_tests(FuzzerTestObjects FuzzerUnitTests "Fuzzer-${arch}-Test" ${arch} SOURCES FuzzerUnittest.cpp ${COMPILER_RT_GTEST_SOURCE} RUNTIME ${LIBFUZZER_TEST_RUNTIME} - DEPS gtest - CFLAGS ${LIBFUZZER_UNITTEST_CFLAGS} - LINK_FLAGS ${LIBFUZZER_UNITTEST_LINK_FLAGS}) + DEPS gtest ${LIBFUZZER_TEST_RUNTIME_DEPS} + CFLAGS ${LIBFUZZER_UNITTEST_CFLAGS} ${LIBFUZZER_TEST_RUNTIME_CFLAGS} + LINK_FLAGS ${LIBFUZZER_UNITTEST_LINK_FLAGS} ${LIBFUZZER_TEST_RUNTIME_LINK_FLAGS}) set_target_properties(FuzzerUnitTests PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) -endforeach() +endif() diff --git a/lib/fuzzer/tests/FuzzerUnittest.cpp b/lib/fuzzer/tests/FuzzerUnittest.cpp index 97ec3b4bb6af..e3b06702603d 100644 --- a/lib/fuzzer/tests/FuzzerUnittest.cpp +++ b/lib/fuzzer/tests/FuzzerUnittest.cpp @@ -28,6 +28,14 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { abort(); } +TEST(Fuzzer, Basename) { + EXPECT_EQ(Basename("foo/bar"), "bar"); + EXPECT_EQ(Basename("bar"), "bar"); + EXPECT_EQ(Basename("/bar"), "bar"); + EXPECT_EQ(Basename("foo/x"), "x"); + EXPECT_EQ(Basename("foo/"), ""); +} + TEST(Fuzzer, CrossOver) { std::unique_ptr<ExternalFunctions> t(new ExternalFunctions()); fuzzer::EF = t.get(); @@ -328,7 +336,7 @@ void TestShuffleBytes(Mutator M, int NumIter) { } TEST(FuzzerMutate, ShuffleBytes1) { - TestShuffleBytes(&MutationDispatcher::Mutate_ShuffleBytes, 1 << 16); + TestShuffleBytes(&MutationDispatcher::Mutate_ShuffleBytes, 1 << 17); } TEST(FuzzerMutate, ShuffleBytes2) { TestShuffleBytes(&MutationDispatcher::Mutate, 1 << 20); @@ -381,6 +389,21 @@ TEST(FuzzerMutate, CopyPart1) { TEST(FuzzerMutate, CopyPart2) { TestCopyPart(&MutationDispatcher::Mutate, 1 << 13); } +TEST(FuzzerMutate, CopyPartNoInsertAtMaxSize) { + // This (non exhaustively) tests if `Mutate_CopyPart` tries to perform an + // insert on an input of size `MaxSize`. Performing an insert in this case + // will lead to the mutation failing. + std::unique_ptr<ExternalFunctions> t(new ExternalFunctions()); + fuzzer::EF = t.get(); + Random Rand(0); + std::unique_ptr<MutationDispatcher> MD(new MutationDispatcher(Rand, {})); + uint8_t Data[8] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x00, 0x11, 0x22}; + size_t MaxSize = sizeof(Data); + for (int count = 0; count < (1 << 18); ++count) { + size_t NewSize = MD->Mutate_CopyPart(Data, MaxSize, MaxSize); + ASSERT_EQ(NewSize, MaxSize); + } +} void TestAddWordFromDictionary(Mutator M, int NumIter) { std::unique_ptr<ExternalFunctions> t(new ExternalFunctions()); @@ -559,12 +582,14 @@ TEST(FuzzerUtil, Base64) { } TEST(Corpus, Distribution) { + DataFlowTrace DFT; Random Rand(0); std::unique_ptr<InputCorpus> C(new InputCorpus("")); size_t N = 10; size_t TriesPerUnit = 1<<16; for (size_t i = 0; i < N; i++) - C->AddToCorpus(Unit{ static_cast<uint8_t>(i) }, 1, false, {}); + C->AddToCorpus(Unit{static_cast<uint8_t>(i)}, 1, false, false, {}, DFT, + nullptr); Vector<size_t> Hist(N); for (size_t i = 0; i < N * TriesPerUnit; i++) { diff --git a/lib/hwasan/.clang-format b/lib/hwasan/.clang-format index f6cb8ad931f5..560308c91dee 100644 --- a/lib/hwasan/.clang-format +++ b/lib/hwasan/.clang-format @@ -1 +1,2 @@ BasedOnStyle: Google +AllowShortIfStatementsOnASingleLine: false diff --git a/lib/hwasan/CMakeLists.txt b/lib/hwasan/CMakeLists.txt index 3f3a6155590c..42bf4366f192 100644 --- a/lib/hwasan/CMakeLists.txt +++ b/lib/hwasan/CMakeLists.txt @@ -4,39 +4,38 @@ include_directories(..) set(HWASAN_RTL_SOURCES hwasan.cc hwasan_allocator.cc + hwasan_dynamic_shadow.cc hwasan_interceptors.cc hwasan_linux.cc + hwasan_poisoning.cc hwasan_report.cc hwasan_thread.cc - hwasan_poisoning.cc ) set(HWASAN_RTL_CXX_SOURCES hwasan_new_delete.cc) +set(HWASAN_RTL_HEADERS + hwasan.h + hwasan_allocator.h + hwasan_dynamic_shadow.h + hwasan_flags.h + hwasan_flags.inc + hwasan_interface_internal.h + hwasan_mapping.h + hwasan_poisoning.h + hwasan_report.h + hwasan_thread.h) + set(HWASAN_RTL_CFLAGS ${SANITIZER_COMMON_CFLAGS}) append_rtti_flag(OFF HWASAN_RTL_CFLAGS) -append_list_if(COMPILER_RT_HAS_FPIE_FLAG -fPIE HWASAN_RTL_CFLAGS) +append_list_if(COMPILER_RT_HAS_FPIC_FLAG -fPIC HWASAN_RTL_CFLAGS) # Prevent clang from generating libc calls. append_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding HWASAN_RTL_CFLAGS) set(HWASAN_DYNAMIC_LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS}) -if(ANDROID) -# On Android, -z global does not do what it is documented to do. -# On Android, -z global moves the library ahead in the lookup order, -# placing it right after the LD_PRELOADs. This is used to compensate for the fact -# that Android linker does not look at the dependencies of the main executable -# that aren't dependencies of the current DSO when resolving symbols from said DSO. -# As a net result, this allows running ASan executables without LD_PRELOAD-ing the -# ASan runtime library. -# The above is applicable to L MR1 or newer. - if (COMPILER_RT_HAS_Z_GLOBAL) - list(APPEND HWASAN_DYNAMIC_LINK_FLAGS -Wl,-z,global) - endif() -endif() - set(HWASAN_DYNAMIC_CFLAGS ${HWASAN_RTL_CFLAGS}) append_list_if(COMPILER_RT_HAS_FTLS_MODEL_INITIAL_EXEC -ftls-model=initial-exec HWASAN_DYNAMIC_CFLAGS) @@ -55,13 +54,18 @@ add_compiler_rt_component(hwasan) add_compiler_rt_object_libraries(RTHwasan ARCHS ${HWASAN_SUPPORTED_ARCH} - SOURCES ${HWASAN_RTL_SOURCES} CFLAGS ${HWASAN_RTL_CFLAGS}) + SOURCES ${HWASAN_RTL_SOURCES} + ADDITIONAL_HEADERS ${HWASAN_RTL_HEADERS} + CFLAGS ${HWASAN_RTL_CFLAGS}) add_compiler_rt_object_libraries(RTHwasan_cxx ARCHS ${HWASAN_SUPPORTED_ARCH} - SOURCES ${HWASAN_RTL_CXX_SOURCES} CFLAGS ${HWASAN_RTL_CFLAGS}) + SOURCES ${HWASAN_RTL_CXX_SOURCES} + ADDITIONAL_HEADERS ${HWASAN_RTL_HEADERS} + CFLAGS ${HWASAN_RTL_CFLAGS}) add_compiler_rt_object_libraries(RTHwasan_dynamic ARCHS ${HWASAN_SUPPORTED_ARCH} - SOURCES ${HWASAN_RTL_SOURCES} ${TSAN_RTL_CXX_SOURCES} + SOURCES ${HWASAN_RTL_SOURCES} ${HWASAN_RTL_CXX_SOURCES} + ADDITIONAL_HEADERS ${HWASAN_RTL_HEADERS} CFLAGS ${HWASAN_DYNAMIC_CFLAGS}) file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/dummy.cc "") @@ -78,7 +82,9 @@ foreach(arch ${HWASAN_SUPPORTED_ARCH}) RTInterception RTSanitizerCommon RTSanitizerCommonLibc - RTUbsan + RTSanitizerCommonCoverage + RTSanitizerCommonSymbolizer + RTUbsan CFLAGS ${HWASAN_RTL_CFLAGS} PARENT_TARGET hwasan) add_compiler_rt_runtime(clang_rt.hwasan_cxx @@ -112,7 +118,9 @@ foreach(arch ${HWASAN_SUPPORTED_ARCH}) RTInterception RTSanitizerCommon RTSanitizerCommonLibc - RTUbsan + RTSanitizerCommonCoverage + RTSanitizerCommonSymbolizer + RTUbsan # The only purpose of RTHWAsan_dynamic_version_script_dummy is to # carry a dependency of the shared runtime on the version script. # Replacing it with a straightforward @@ -126,7 +134,7 @@ foreach(arch ${HWASAN_SUPPORTED_ARCH}) DEFS ${ASAN_DYNAMIC_DEFINITIONS} PARENT_TARGET hwasan) - if(UNIX) + if(SANITIZER_USE_SYMBOLS) add_sanitizer_rt_symbols(clang_rt.hwasan ARCHS ${arch} EXTRA hwasan.syms.extra) diff --git a/lib/hwasan/hwasan.cc b/lib/hwasan/hwasan.cc index 8b1e5a7846af..7dab8249e3f7 100644 --- a/lib/hwasan/hwasan.cc +++ b/lib/hwasan/hwasan.cc @@ -1,4 +1,4 @@ -//===-- hwasan.cc -----------------------------------------------------------===// +//===-- hwasan.cc ---------------------------------------------------------===// // // The LLVM Compiler Infrastructure // @@ -13,8 +13,10 @@ //===----------------------------------------------------------------------===// #include "hwasan.h" -#include "hwasan_thread.h" +#include "hwasan_mapping.h" #include "hwasan_poisoning.h" +#include "hwasan_report.h" +#include "hwasan_thread.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" @@ -84,7 +86,7 @@ static void InitializeFlags() { cf.check_printf = false; cf.intercept_tls_get_addr = true; cf.exitcode = 99; - cf.handle_sigill = kHandleSignalExclusive; + cf.handle_sigtrap = kHandleSignalExclusive; OverrideCommonFlags(cf); } @@ -143,12 +145,22 @@ void PrintWarning(uptr pc, uptr bp) { ReportInvalidAccess(&stack, 0); } +static void HWAsanCheckFailed(const char *file, int line, const char *cond, + u64 v1, u64 v2) { + Report("HWAddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, + line, cond, (uptr)v1, (uptr)v2); + PRINT_CURRENT_STACK_CHECK(); + Die(); +} + } // namespace __hwasan // Interface. using namespace __hwasan; +uptr __hwasan_shadow_memory_dynamic_address; // Global interface symbol. + void __hwasan_init() { CHECK(!hwasan_init_is_running); if (hwasan_inited) return; @@ -160,23 +172,28 @@ void __hwasan_init() { CacheBinaryName(); InitializeFlags(); - __sanitizer_set_report_path(common_flags()->log_path); + // Install tool-specific callbacks in sanitizer_common. + SetCheckFailedCallback(HWAsanCheckFailed); - InitializeInterceptors(); - InstallDeadlySignalHandlers(HwasanOnDeadlySignal); - InstallAtExitHandler(); // Needs __cxa_atexit interceptor. + __sanitizer_set_report_path(common_flags()->log_path); DisableCoreDumperIfNecessary(); if (!InitShadow()) { - Printf("FATAL: HWAddressSanitizer can not mmap the shadow memory.\n"); - Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n"); - Printf("FATAL: Disabling ASLR is known to cause this error.\n"); - Printf("FATAL: If running under GDB, try " - "'set disable-randomization off'.\n"); + Printf("FATAL: HWAddressSanitizer cannot mmap the shadow memory.\n"); + if (HWASAN_FIXED_MAPPING) { + Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n"); + Printf("FATAL: Disabling ASLR is known to cause this error.\n"); + Printf("FATAL: If running under GDB, try " + "'set disable-randomization off'.\n"); + } DumpProcessMap(); Die(); } + InitializeInterceptors(); + InstallDeadlySignalHandlers(HwasanOnDeadlySignal); + InstallAtExitHandler(); // Needs __cxa_atexit interceptor. + Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); @@ -240,11 +257,23 @@ void __sanitizer_unaligned_store64(uu64 *p, u64 x) { template<unsigned X> __attribute__((always_inline)) -static void SigIll() { +static void SigTrap(uptr p) { #if defined(__aarch64__) - asm("hlt %0\n\t" ::"n"(X)); -#elif defined(__x86_64__) || defined(__i386__) - asm("ud2\n\t"); + (void)p; + // 0x900 is added to do not interfere with the kernel use of lower values of + // brk immediate. + // FIXME: Add a constraint to put the pointer into x0, the same as x86 branch. + asm("brk %0\n\t" ::"n"(0x900 + X)); +#elif defined(__x86_64__) + // INT3 + NOP DWORD ptr [EAX + X] to pass X to our signal handler, 5 bytes + // total. The pointer is passed via rdi. + // 0x40 is added as a safeguard, to help distinguish our trap from others and + // to avoid 0 offsets in the command (otherwise it'll be reduced to a + // different nop command, the three bytes one). + asm volatile( + "int3\n" + "nopl %c0(%%rax)\n" + :: "n"(0x40 + X), "D"(p)); #else // FIXME: not always sigill. __builtin_trap(); @@ -261,8 +290,8 @@ __attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) { uptr ptr_raw = p & ~kAddressTagMask; tag_t mem_tag = *(tag_t *)MEM_TO_SHADOW(ptr_raw); if (UNLIKELY(ptr_tag != mem_tag)) { - SigIll<0x100 + 0x20 * (EA == ErrorAction::Recover) + - 0x10 * (AT == AccessType::Store) + LogSize>(); + SigTrap<0x20 * (EA == ErrorAction::Recover) + + 0x10 * (AT == AccessType::Store) + LogSize>(p); if (EA == ErrorAction::Abort) __builtin_unreachable(); } } @@ -277,13 +306,13 @@ __attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p, tag_t *shadow_last = (tag_t *)MEM_TO_SHADOW(ptr_raw + sz - 1); for (tag_t *t = shadow_first; t <= shadow_last; ++t) if (UNLIKELY(ptr_tag != *t)) { - SigIll<0x100 + 0x20 * (EA == ErrorAction::Recover) + - 0x10 * (AT == AccessType::Store) + 0xf>(); + SigTrap<0x20 * (EA == ErrorAction::Recover) + + 0x10 * (AT == AccessType::Store) + 0xf>(p); if (EA == ErrorAction::Abort) __builtin_unreachable(); } } -void __hwasan_load(uptr p, uptr sz) { +void __hwasan_loadN(uptr p, uptr sz) { CheckAddressSized<ErrorAction::Abort, AccessType::Load>(p, sz); } void __hwasan_load1(uptr p) { @@ -302,7 +331,7 @@ void __hwasan_load16(uptr p) { CheckAddress<ErrorAction::Abort, AccessType::Load, 4>(p); } -void __hwasan_load_noabort(uptr p, uptr sz) { +void __hwasan_loadN_noabort(uptr p, uptr sz) { CheckAddressSized<ErrorAction::Recover, AccessType::Load>(p, sz); } void __hwasan_load1_noabort(uptr p) { @@ -321,7 +350,7 @@ void __hwasan_load16_noabort(uptr p) { CheckAddress<ErrorAction::Recover, AccessType::Load, 4>(p); } -void __hwasan_store(uptr p, uptr sz) { +void __hwasan_storeN(uptr p, uptr sz) { CheckAddressSized<ErrorAction::Abort, AccessType::Store>(p, sz); } void __hwasan_store1(uptr p) { @@ -340,7 +369,7 @@ void __hwasan_store16(uptr p) { CheckAddress<ErrorAction::Abort, AccessType::Store, 4>(p); } -void __hwasan_store_noabort(uptr p, uptr sz) { +void __hwasan_storeN_noabort(uptr p, uptr sz) { CheckAddressSized<ErrorAction::Recover, AccessType::Store>(p, sz); } void __hwasan_store1_noabort(uptr p) { @@ -359,6 +388,18 @@ void __hwasan_store16_noabort(uptr p) { CheckAddress<ErrorAction::Recover, AccessType::Store, 4>(p); } +void __hwasan_tag_memory(uptr p, u8 tag, uptr sz) { + TagMemoryAligned(p, sz, tag); +} + +static const u8 kFallbackTag = 0xBB; + +u8 __hwasan_generate_tag() { + HwasanThread *t = GetCurrentThread(); + if (!t) return kFallbackTag; + return t->GenerateRandomTag(); +} + #if !SANITIZER_SUPPORTS_WEAK_HOOKS extern "C" { SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE diff --git a/lib/hwasan/hwasan.h b/lib/hwasan/hwasan.h index bcf5282dce7c..47d1d057a0dd 100644 --- a/lib/hwasan/hwasan.h +++ b/lib/hwasan/hwasan.h @@ -1,4 +1,4 @@ -//===-- hwasan.h --------------------------------------------------*- C++ -*-===// +//===-- hwasan.h ------------------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -32,16 +32,6 @@ typedef u8 tag_t; -// Reasonable values are 4 (for 1/16th shadow) and 6 (for 1/64th). -const uptr kShadowScale = 4; -const uptr kShadowAlignment = 1UL << kShadowScale; - -#define MEM_TO_SHADOW_OFFSET(mem) ((uptr)(mem) >> kShadowScale) -#define MEM_TO_SHADOW(mem) ((uptr)(mem) >> kShadowScale) -#define SHADOW_TO_MEM(shadow) ((uptr)(shadow) << kShadowScale) - -#define MEM_IS_APP(mem) true - // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address // translation and can be used to store a tag. const unsigned kAddressTagShift = 56; @@ -107,15 +97,6 @@ void PrintWarning(uptr pc, uptr bp); void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp, void *context, bool request_fast_unwind); -void ReportInvalidAccess(StackTrace *stack, u32 origin); -void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size, - bool is_store); -void ReportStats(); -void ReportAtExitStatistics(); -void DescribeMemoryRange(const void *x, uptr size); -void ReportInvalidAccessInsideAddressRange(const char *what, const void *start, uptr size, - uptr offset); - // Returns a "chained" origin id, pointing to the given stack trace followed by // the previous origin id. u32 ChainOrigin(u32 id, StackTrace *stack); @@ -135,6 +116,15 @@ const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1; GetStackTrace(&stack, kStackTraceMax, pc, bp, nullptr, \ common_flags()->fast_unwind_on_fatal) +#define GET_FATAL_STACK_TRACE_HERE \ + GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME()) + +#define PRINT_CURRENT_STACK_CHECK() \ + { \ + GET_FATAL_STACK_TRACE_HERE; \ + stack.Print(); \ + } + class ScopedThreadLocalStateBackup { public: ScopedThreadLocalStateBackup() { Backup(); } diff --git a/lib/hwasan/hwasan_allocator.cc b/lib/hwasan/hwasan_allocator.cc index fbcaf78b88f0..c2b9b0b69589 100644 --- a/lib/hwasan/hwasan_allocator.cc +++ b/lib/hwasan/hwasan_allocator.cc @@ -1,4 +1,4 @@ -//===-- hwasan_allocator.cc --------------------------- ---------------------===// +//===-- hwasan_allocator.cc ------------------------- ---------------------===// // // The LLVM Compiler Infrastructure // @@ -15,11 +15,13 @@ #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_allocator_report.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "hwasan.h" #include "hwasan_allocator.h" +#include "hwasan_mapping.h" #include "hwasan_thread.h" #include "hwasan_poisoning.h" @@ -70,8 +72,8 @@ struct HwasanMapUnmapCallback { } }; -#if !defined(__aarch64__) -#error unsupported platform +#if !defined(__aarch64__) && !defined(__x86_64__) +#error Unsupported platform #endif static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G @@ -100,6 +102,9 @@ static AllocatorCache fallback_allocator_cache; static SpinMutex fallback_mutex; static atomic_uint8_t hwasan_allocator_tagging_enabled; +static const tag_t kFallbackAllocTag = 0xBB; +static const tag_t kFallbackFreeTag = 0xBC; + void HwasanAllocatorInit() { atomic_store_relaxed(&hwasan_allocator_tagging_enabled, !flags()->disable_allocator_tagging); @@ -123,9 +128,12 @@ static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment, size = RoundUpTo(size, kShadowAlignment); if (size > kMaxAllowedMallocSize) { - Report("WARNING: HWAddressSanitizer failed to allocate %p bytes\n", - (void *)size); - return Allocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) { + Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n", + size); + return nullptr; + } + ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, stack); } HwasanThread *t = GetCurrentThread(); void *allocated; @@ -137,6 +145,12 @@ static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment, AllocatorCache *cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, size, alignment); } + if (UNLIKELY(!allocated)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportOutOfMemory(size, stack); + } Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated)); meta->state = CHUNK_ALLOCATED; @@ -145,10 +159,11 @@ static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment, if (zeroise) internal_memset(allocated, 0, size); - void *user_ptr = (flags()->tag_in_malloc && - atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) - ? (void *)TagMemoryAligned((uptr)allocated, size, 0xBB) - : allocated; + void *user_ptr = allocated; + if (flags()->tag_in_malloc && + atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) + user_ptr = (void *)TagMemoryAligned( + (uptr)user_ptr, size, t ? t->GenerateRandomTag() : kFallbackAllocTag); HWASAN_MALLOC_HOOK(user_ptr, size); return user_ptr; @@ -166,10 +181,11 @@ void HwasanDeallocate(StackTrace *stack, void *user_ptr) { meta->free_context_id = StackDepotPut(*stack); // This memory will not be reused by anyone else, so we are free to keep it // poisoned. + HwasanThread *t = GetCurrentThread(); if (flags()->tag_in_free && atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) - TagMemoryAligned((uptr)p, size, 0xBC); - HwasanThread *t = GetCurrentThread(); + TagMemoryAligned((uptr)p, size, + t ? t->GenerateRandomTag() : kFallbackFreeTag); if (t) { AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); allocator.Deallocate(cache, p); @@ -195,8 +211,12 @@ void *HwasanReallocate(StackTrace *stack, void *user_old_p, uptr new_size, meta->requested_size = new_size; if (!atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) return user_old_p; - if (flags()->retag_in_realloc) - return (void *)TagMemoryAligned((uptr)old_p, new_size, 0xCC); + if (flags()->retag_in_realloc) { + HwasanThread *t = GetCurrentThread(); + return (void *)TagMemoryAligned( + (uptr)old_p, new_size, + t ? t->GenerateRandomTag() : kFallbackAllocTag); + } if (new_size > old_size) { tag_t tag = GetTagFromPointer((uptr)user_old_p); TagMemoryAligned((uptr)old_p + old_size, new_size - old_size, tag); @@ -212,6 +232,15 @@ void *HwasanReallocate(StackTrace *stack, void *user_old_p, uptr new_size, return new_p; } +void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportCallocOverflow(nmemb, size, stack); + } + return HwasanAllocate(stack, nmemb * size, sizeof(u64), true); +} + HwasanChunkView FindHeapChunkByAddress(uptr address) { void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address)); if (!block) @@ -235,9 +264,7 @@ void *hwasan_malloc(uptr size, StackTrace *stack) { } void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) { - if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) - return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest()); - return SetErrnoOnNull(HwasanAllocate(stack, nmemb * size, sizeof(u64), true)); + return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size)); } void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) { @@ -251,14 +278,17 @@ void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) { } void *hwasan_valloc(uptr size, StackTrace *stack) { - return SetErrnoOnNull(HwasanAllocate(stack, size, GetPageSizeCached(), false)); + return SetErrnoOnNull( + HwasanAllocate(stack, size, GetPageSizeCached(), false)); } void *hwasan_pvalloc(uptr size, StackTrace *stack) { uptr PageSize = GetPageSizeCached(); if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { errno = errno_ENOMEM; - return Allocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportPvallocOverflow(size, stack); } // pvalloc(0) should allocate one page. size = size ? RoundUpTo(size, PageSize) : PageSize; @@ -268,7 +298,9 @@ void *hwasan_pvalloc(uptr size, StackTrace *stack) { void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAlignedAllocAlignment(size, alignment, stack); } return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false)); } @@ -276,7 +308,9 @@ void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAllocationAlignment(alignment, stack); } return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false)); } @@ -284,18 +318,20 @@ void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) { int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size, StackTrace *stack) { if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { - Allocator::FailureHandler::OnBadRequest(); - return errno_EINVAL; + if (AllocatorMayReturnNull()) + return errno_EINVAL; + ReportInvalidPosixMemalignAlignment(alignment, stack); } void *ptr = HwasanAllocate(stack, size, alignment, false); if (UNLIKELY(!ptr)) + // OOM error is already taken care of by HwasanAllocate. return errno_ENOMEM; CHECK(IsAligned((uptr)ptr, alignment)); *memptr = ptr; return 0; } -} // namespace __hwasan +} // namespace __hwasan using namespace __hwasan; diff --git a/lib/hwasan/hwasan_dynamic_shadow.cc b/lib/hwasan/hwasan_dynamic_shadow.cc new file mode 100644 index 000000000000..17338003aa65 --- /dev/null +++ b/lib/hwasan/hwasan_dynamic_shadow.cc @@ -0,0 +1,132 @@ +//===-- hwasan_dynamic_shadow.cc --------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file is a part of HWAddressSanitizer. It reserves dynamic shadow memory +/// region and handles ifunc resolver case, when necessary. +/// +//===----------------------------------------------------------------------===// + +#include "hwasan_dynamic_shadow.h" +#include "hwasan_mapping.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_posix.h" + +// The code in this file needs to run in an unrelocated binary. It should not +// access any external symbol, including its own non-hidden globals. + +namespace __hwasan { + +static void UnmapFromTo(uptr from, uptr to) { + if (to == from) + return; + CHECK(to >= from); + uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from); + if (UNLIKELY(internal_iserror(res))) { + Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n", + SanitizerToolName, to - from, to - from, from); + CHECK("unable to unmap" && 0); + } +} + +// Returns an address aligned to 8 pages, such that one page on the left and +// shadow_size_bytes bytes on the right of it are mapped r/o. +static uptr MapDynamicShadow(uptr shadow_size_bytes) { + const uptr granularity = GetMmapGranularity(); + const uptr alignment = granularity * SHADOW_GRANULARITY; + const uptr left_padding = granularity; + const uptr shadow_size = + RoundUpTo(shadow_size_bytes, granularity); + const uptr map_size = shadow_size + left_padding + alignment; + + const uptr map_start = (uptr)MmapNoAccess(map_size); + CHECK_NE(map_start, ~(uptr)0); + + const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); + + UnmapFromTo(map_start, shadow_start - left_padding); + UnmapFromTo(shadow_start + shadow_size, map_start + map_size); + + return shadow_start; +} + +} // namespace __hwasan + +#if HWASAN_PREMAP_SHADOW + +extern "C" { + +INTERFACE_ATTRIBUTE void __hwasan_shadow(); +decltype(__hwasan_shadow)* __hwasan_premap_shadow(); + +} // extern "C" + +namespace __hwasan { + +// Conservative upper limit. +static uptr PremapShadowSize() { + return RoundUpTo(GetMaxVirtualAddress() >> kShadowScale, + GetMmapGranularity()); +} + +static uptr PremapShadow() { + return MapDynamicShadow(PremapShadowSize()); +} + +static bool IsPremapShadowAvailable() { + const uptr shadow = reinterpret_cast<uptr>(&__hwasan_shadow); + const uptr resolver = reinterpret_cast<uptr>(&__hwasan_premap_shadow); + // shadow == resolver is how Android KitKat and older handles ifunc. + // shadow == 0 just in case. + return shadow != 0 && shadow != resolver; +} + +static uptr FindPremappedShadowStart(uptr shadow_size_bytes) { + const uptr granularity = GetMmapGranularity(); + const uptr shadow_start = reinterpret_cast<uptr>(&__hwasan_shadow); + const uptr premap_shadow_size = PremapShadowSize(); + const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity); + + // We may have mapped too much. Release extra memory. + UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size); + return shadow_start; +} + +} // namespace __hwasan + +extern "C" { + +decltype(__hwasan_shadow)* __hwasan_premap_shadow() { + // The resolver might be called multiple times. Map the shadow just once. + static __sanitizer::uptr shadow = 0; + if (!shadow) + shadow = __hwasan::PremapShadow(); + return reinterpret_cast<decltype(__hwasan_shadow)*>(shadow); +} + +// __hwasan_shadow is a "function" that has the same address as the first byte +// of the shadow mapping. +INTERFACE_ATTRIBUTE __attribute__((ifunc("__hwasan_premap_shadow"))) +void __hwasan_shadow(); + +} // extern "C" + +#endif // HWASAN_PREMAP_SHADOW + +namespace __hwasan { + +uptr FindDynamicShadowStart(uptr shadow_size_bytes) { +#if HWASAN_PREMAP_SHADOW + if (IsPremapShadowAvailable()) + return FindPremappedShadowStart(shadow_size_bytes); +#endif + return MapDynamicShadow(shadow_size_bytes); +} + +} // namespace __hwasan diff --git a/lib/hwasan/hwasan_dynamic_shadow.h b/lib/hwasan/hwasan_dynamic_shadow.h new file mode 100644 index 000000000000..b5e9e1dd6a06 --- /dev/null +++ b/lib/hwasan/hwasan_dynamic_shadow.h @@ -0,0 +1,27 @@ +//===-- hwasan_dynamic_shadow.h ---------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file is a part of HWAddressSanitizer. It reserves dynamic shadow memory +/// region. +/// +//===----------------------------------------------------------------------===// + +#ifndef HWASAN_PREMAP_SHADOW_H +#define HWASAN_PREMAP_SHADOW_H + +#include "sanitizer_common/sanitizer_internal_defs.h" + +namespace __hwasan { + +uptr FindDynamicShadowStart(uptr shadow_size_bytes); + +} // namespace __hwasan + +#endif // HWASAN_PREMAP_SHADOW_H diff --git a/lib/hwasan/hwasan_flags.inc b/lib/hwasan/hwasan_flags.inc index a2cb701ae93d..c45781168d6c 100644 --- a/lib/hwasan/hwasan_flags.inc +++ b/lib/hwasan/hwasan_flags.inc @@ -27,3 +27,7 @@ HWASAN_FLAG(bool, atexit, false, "") // Test only flag to disable malloc/realloc/free memory tagging on startup. // Tagging can be reenabled with __hwasan_enable_allocator_tagging(). HWASAN_FLAG(bool, disable_allocator_tagging, false, "") + +// If false, use simple increment of a thread local counter to generate new +// tags. +HWASAN_FLAG(bool, random_tags, true, "") diff --git a/lib/hwasan/hwasan_interceptors.cc b/lib/hwasan/hwasan_interceptors.cc index fb39e9fdacf9..66aab95db56f 100644 --- a/lib/hwasan/hwasan_interceptors.cc +++ b/lib/hwasan/hwasan_interceptors.cc @@ -17,8 +17,10 @@ #include "interception/interception.h" #include "hwasan.h" +#include "hwasan_mapping.h" #include "hwasan_thread.h" #include "hwasan_poisoning.h" +#include "hwasan_report.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator_interface.h" @@ -258,34 +260,17 @@ INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) { INTERCEPTOR(void *, malloc, SIZE_T size) { GET_MALLOC_STACK_TRACE; + if (UNLIKELY(!hwasan_init_is_running)) + ENSURE_HWASAN_INITED(); if (UNLIKELY(!hwasan_inited)) // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym. return AllocateFromLocalPool(size); return hwasan_malloc(size, &stack); } - -INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags, - int fd, OFF_T offset) { - if (hwasan_init_is_running) - return REAL(mmap)(addr, length, prot, flags, fd, offset); - ENSURE_HWASAN_INITED(); - if (addr && !MEM_IS_APP(addr)) { - if (flags & map_fixed) { - errno = errno_EINVAL; - return (void *)-1; - } else { - addr = nullptr; - } - } - void *res = REAL(mmap)(addr, length, prot, flags, fd, offset); - return res; -} - -#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD -INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags, - int fd, OFF64_T offset) { - ENSURE_HWASAN_INITED(); +template <class Mmap> +static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T sz, int prot, + int flags, int fd, OFF64_T off) { if (addr && !MEM_IS_APP(addr)) { if (flags & map_fixed) { errno = errno_EINVAL; @@ -294,13 +279,8 @@ INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags, addr = nullptr; } } - void *res = REAL(mmap64)(addr, length, prot, flags, fd, offset); - return res; + return real_mmap(addr, sz, prot, flags, fd, off); } -#define HWASAN_MAYBE_INTERCEPT_MMAP64 INTERCEPT_FUNCTION(mmap64) -#else -#define HWASAN_MAYBE_INTERCEPT_MMAP64 -#endif extern "C" int pthread_attr_init(void *attr); extern "C" int pthread_attr_destroy(void *attr); @@ -427,6 +407,22 @@ int OnExit() { *begin = *end = 0; \ } +#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \ + { \ + COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \ + if (common_flags()->intercept_intrin && \ + MEM_IS_APP(GetAddressFromPointer(dst))) \ + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \ + return REAL(memset)(dst, v, size); \ + } + +#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, length, prot, flags, fd, \ + offset) \ + do { \ + return mmap_interceptor(REAL(mmap), addr, length, prot, flags, fd, \ + offset); \ + } while (false) + #include "sanitizer_common/sanitizer_platform_interceptors.h" #include "sanitizer_common/sanitizer_common_interceptors.inc" #include "sanitizer_common/sanitizer_signal_interceptors.inc" @@ -448,6 +444,7 @@ int OnExit() { (void)(s); \ } while (false) #include "sanitizer_common/sanitizer_common_syscalls.inc" +#include "sanitizer_common/sanitizer_syscalls_netbsd.inc" @@ -459,8 +456,6 @@ void InitializeInterceptors() { InitializeCommonInterceptors(); InitializeSignalInterceptors(); - INTERCEPT_FUNCTION(mmap); - HWASAN_MAYBE_INTERCEPT_MMAP64; INTERCEPT_FUNCTION(posix_memalign); HWASAN_MAYBE_INTERCEPT_MEMALIGN; INTERCEPT_FUNCTION(__libc_memalign); diff --git a/lib/hwasan/hwasan_interface_internal.h b/lib/hwasan/hwasan_interface_internal.h index 7e95271ac0af..b4e5c80904df 100644 --- a/lib/hwasan/hwasan_interface_internal.h +++ b/lib/hwasan/hwasan_interface_internal.h @@ -18,6 +18,7 @@ #include "sanitizer_common/sanitizer_internal_defs.h" extern "C" { + SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_init(); @@ -32,7 +33,10 @@ using __sanitizer::u16; using __sanitizer::u8; SANITIZER_INTERFACE_ATTRIBUTE -void __hwasan_load(uptr, uptr); +extern uptr __hwasan_shadow_memory_dynamic_address; + +SANITIZER_INTERFACE_ATTRIBUTE +void __hwasan_loadN(uptr, uptr); SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_load1(uptr); SANITIZER_INTERFACE_ATTRIBUTE @@ -45,7 +49,7 @@ SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_load16(uptr); SANITIZER_INTERFACE_ATTRIBUTE -void __hwasan_load_noabort(uptr, uptr); +void __hwasan_loadN_noabort(uptr, uptr); SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_load1_noabort(uptr); SANITIZER_INTERFACE_ATTRIBUTE @@ -58,7 +62,7 @@ SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_load16_noabort(uptr); SANITIZER_INTERFACE_ATTRIBUTE -void __hwasan_store(uptr, uptr); +void __hwasan_storeN(uptr, uptr); SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_store1(uptr); SANITIZER_INTERFACE_ATTRIBUTE @@ -71,7 +75,7 @@ SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_store16(uptr); SANITIZER_INTERFACE_ATTRIBUTE -void __hwasan_store_noabort(uptr, uptr); +void __hwasan_storeN_noabort(uptr, uptr); SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_store1_noabort(uptr); SANITIZER_INTERFACE_ATTRIBUTE @@ -83,6 +87,12 @@ void __hwasan_store8_noabort(uptr); SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_store16_noabort(uptr); +SANITIZER_INTERFACE_ATTRIBUTE +void __hwasan_tag_memory(uptr p, u8 tag, uptr sz); + +SANITIZER_INTERFACE_ATTRIBUTE +u8 __hwasan_generate_tag(); + // Returns the offset of the first tag mismatch or -1 if the whole range is // good. SANITIZER_INTERFACE_ATTRIBUTE diff --git a/lib/hwasan/hwasan_linux.cc b/lib/hwasan/hwasan_linux.cc index 48dea8eb6ff4..5ab98dca594d 100644 --- a/lib/hwasan/hwasan_linux.cc +++ b/lib/hwasan/hwasan_linux.cc @@ -1,4 +1,4 @@ -//===-- hwasan_linux.cc -----------------------------------------------------===// +//===-- hwasan_linux.cc -----------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -6,41 +6,45 @@ // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// -// -// This file is a part of HWAddressSanitizer. -// -// Linux-, NetBSD- and FreeBSD-specific code. +/// +/// \file +/// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and +/// FreeBSD-specific code. +/// //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD #include "hwasan.h" +#include "hwasan_dynamic_shadow.h" +#include "hwasan_interface_internal.h" +#include "hwasan_mapping.h" +#include "hwasan_report.h" #include "hwasan_thread.h" #include <elf.h> #include <link.h> #include <pthread.h> +#include <signal.h> #include <stdio.h> #include <stdlib.h> -#include <signal.h> +#include <sys/resource.h> +#include <sys/time.h> #include <unistd.h> #include <unwind.h> -#include <sys/time.h> -#include <sys/resource.h> #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_procmaps.h" namespace __hwasan { -void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { +static void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { CHECK_EQ((beg % GetMmapGranularity()), 0); CHECK_EQ(((end + 1) % GetMmapGranularity()), 0); uptr size = end - beg + 1; DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. - void *res = MmapFixedNoReserve(beg, size, name); - if (res != (void *)beg) { + if (!MmapFixedNoReserve(beg, size, name)) { Report( "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " "Perhaps you're using ulimit -v\n", @@ -52,8 +56,11 @@ void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { } static void ProtectGap(uptr addr, uptr size) { + if (!size) + return; void *res = MmapFixedNoAccess(addr, size, "shadow gap"); - if (addr == (uptr)res) return; + if (addr == (uptr)res) + return; // A few pages at the start of the address space can not be protected. // But we really want to protect as much as possible, to prevent this memory // being returned as a result of a non-FIXED mmap(). @@ -63,63 +70,160 @@ static void ProtectGap(uptr addr, uptr size) { addr += step; size -= step; void *res = MmapFixedNoAccess(addr, size, "shadow gap"); - if (addr == (uptr)res) return; + if (addr == (uptr)res) + return; } } Report( - "ERROR: Failed to protect the shadow gap. " - "ASan cannot proceed correctly. ABORTING.\n"); + "ERROR: Failed to protect shadow gap [%p, %p]. " + "HWASan cannot proceed correctly. ABORTING.\n", (void *)addr, + (void *)(addr + size)); DumpProcessMap(); Die(); } -bool InitShadow() { - const uptr maxVirtualAddress = GetMaxUserVirtualAddress(); +static uptr kLowMemStart; +static uptr kLowMemEnd; +static uptr kLowShadowEnd; +static uptr kLowShadowStart; +static uptr kHighShadowStart; +static uptr kHighShadowEnd; +static uptr kHighMemStart; +static uptr kHighMemEnd; + +static void PrintRange(uptr start, uptr end, const char *name) { + Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name); +} - // LowMem covers as much of the first 4GB as possible. - const uptr kLowMemEnd = 1UL<<32; - const uptr kLowShadowEnd = kLowMemEnd >> kShadowScale; - const uptr kLowShadowStart = kLowShadowEnd >> kShadowScale; +static void PrintAddressSpaceLayout() { + PrintRange(kHighMemStart, kHighMemEnd, "HighMem"); + if (kHighShadowEnd + 1 < kHighMemStart) + PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap"); + else + CHECK_EQ(kHighShadowEnd + 1, kHighMemStart); + PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow"); + if (SHADOW_OFFSET) { + if (kLowShadowEnd + 1 < kHighShadowStart) + PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap"); + else + CHECK_EQ(kLowMemEnd + 1, kHighShadowStart); + PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow"); + if (kLowMemEnd + 1 < kLowShadowStart) + PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap"); + else + CHECK_EQ(kLowMemEnd + 1, kLowShadowStart); + PrintRange(kLowMemStart, kLowMemEnd, "LowMem"); + CHECK_EQ(0, kLowMemStart); + } else { + if (kLowMemEnd + 1 < kHighShadowStart) + PrintRange(kLowMemEnd + 1, kHighShadowStart - 1, "ShadowGap"); + else + CHECK_EQ(kLowMemEnd + 1, kHighShadowStart); + PrintRange(kLowMemStart, kLowMemEnd, "LowMem"); + CHECK_EQ(kLowShadowEnd + 1, kLowMemStart); + PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow"); + PrintRange(0, kLowShadowStart - 1, "ShadowGap"); + } +} +static uptr GetHighMemEnd() { // HighMem covers the upper part of the address space. - const uptr kHighShadowEnd = (maxVirtualAddress >> kShadowScale) + 1; - const uptr kHighShadowStart = Max(kLowMemEnd, kHighShadowEnd >> kShadowScale); - CHECK(kHighShadowStart < kHighShadowEnd); - - const uptr kHighMemStart = kHighShadowStart << kShadowScale; - CHECK(kHighShadowEnd <= kHighMemStart); - - if (Verbosity()) { - Printf("|| `[%p, %p]` || HighMem ||\n", (void *)kHighMemStart, - (void *)maxVirtualAddress); - if (kHighMemStart > kHighShadowEnd) - Printf("|| `[%p, %p]` || ShadowGap2 ||\n", (void *)kHighShadowEnd, - (void *)kHighMemStart); - Printf("|| `[%p, %p]` || HighShadow ||\n", (void *)kHighShadowStart, - (void *)kHighShadowEnd); - if (kHighShadowStart > kLowMemEnd) - Printf("|| `[%p, %p]` || ShadowGap2 ||\n", (void *)kHighShadowEnd, - (void *)kHighMemStart); - Printf("|| `[%p, %p]` || LowMem ||\n", (void *)kLowShadowEnd, - (void *)kLowMemEnd); - Printf("|| `[%p, %p]` || LowShadow ||\n", (void *)kLowShadowStart, - (void *)kLowShadowEnd); - Printf("|| `[%p, %p]` || ShadowGap1 ||\n", (void *)0, - (void *)kLowShadowStart); + uptr max_address = GetMaxUserVirtualAddress(); + if (SHADOW_OFFSET) + // Adjust max address to make sure that kHighMemEnd and kHighMemStart are + // properly aligned: + max_address |= SHADOW_GRANULARITY * GetMmapGranularity() - 1; + return max_address; +} + +static void InitializeShadowBaseAddress(uptr shadow_size_bytes) { + // Set the shadow memory address to uninitialized. + __hwasan_shadow_memory_dynamic_address = kDefaultShadowSentinel; + uptr shadow_start = SHADOW_OFFSET; + // Detect if a dynamic shadow address must be used and find the available + // location when necessary. When dynamic address is used, the macro + // kLowShadowBeg expands to __hwasan_shadow_memory_dynamic_address which + // was just set to kDefaultShadowSentinel. + if (shadow_start == kDefaultShadowSentinel) { + __hwasan_shadow_memory_dynamic_address = 0; + CHECK_EQ(0, SHADOW_OFFSET); + shadow_start = FindDynamicShadowStart(shadow_size_bytes); } + // Update the shadow memory address (potentially) used by instrumentation. + __hwasan_shadow_memory_dynamic_address = shadow_start; +} - ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd - 1, "low shadow"); - ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd - 1, "high shadow"); - ProtectGap(0, kLowShadowStart); - if (kHighShadowStart > kLowMemEnd) - ProtectGap(kLowMemEnd, kHighShadowStart - kLowMemEnd); - if (kHighMemStart > kHighShadowEnd) - ProtectGap(kHighShadowEnd, kHighMemStart - kHighShadowEnd); +bool InitShadow() { + // Define the entire memory range. + kHighMemEnd = GetHighMemEnd(); + + // Determine shadow memory base offset. + InitializeShadowBaseAddress(MEM_TO_SHADOW_SIZE(kHighMemEnd)); + + // Place the low memory first. + if (SHADOW_OFFSET) { + kLowMemEnd = SHADOW_OFFSET - 1; + kLowMemStart = 0; + } else { + // LowMem covers as much of the first 4GB as possible. + kLowMemEnd = (1UL << 32) - 1; + kLowMemStart = MEM_TO_SHADOW(kLowMemEnd) + 1; + } + + // Define the low shadow based on the already placed low memory. + kLowShadowEnd = MEM_TO_SHADOW(kLowMemEnd); + kLowShadowStart = SHADOW_OFFSET ? SHADOW_OFFSET : MEM_TO_SHADOW(kLowMemStart); + + // High shadow takes whatever memory is left up there (making sure it is not + // interfering with low memory in the fixed case). + kHighShadowEnd = MEM_TO_SHADOW(kHighMemEnd); + kHighShadowStart = Max(kLowMemEnd, MEM_TO_SHADOW(kHighShadowEnd)) + 1; + + // High memory starts where allocated shadow allows. + kHighMemStart = SHADOW_TO_MEM(kHighShadowStart); + + // Check the sanity of the defined memory ranges (there might be gaps). + CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0); + CHECK_GT(kHighMemStart, kHighShadowEnd); + CHECK_GT(kHighShadowEnd, kHighShadowStart); + CHECK_GT(kHighShadowStart, kLowMemEnd); + CHECK_GT(kLowMemEnd, kLowMemStart); + CHECK_GT(kLowShadowEnd, kLowShadowStart); + if (SHADOW_OFFSET) + CHECK_GT(kLowShadowStart, kLowMemEnd); + else + CHECK_GT(kLowMemEnd, kLowShadowStart); + + if (Verbosity()) + PrintAddressSpaceLayout(); + + // Reserve shadow memory. + ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow"); + ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow"); + + // Protect all the gaps. + ProtectGap(0, Min(kLowMemStart, kLowShadowStart)); + if (SHADOW_OFFSET) { + if (kLowMemEnd + 1 < kLowShadowStart) + ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1); + if (kLowShadowEnd + 1 < kHighShadowStart) + ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1); + } else { + if (kLowMemEnd + 1 < kHighShadowStart) + ProtectGap(kLowMemEnd + 1, kHighShadowStart - kLowMemEnd - 1); + } + if (kHighShadowEnd + 1 < kHighMemStart) + ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1); return true; } +bool MemIsApp(uptr p) { + CHECK(GetTagFromPointer(p) == 0); + return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd); +} + static void HwasanAtExit(void) { if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0)) ReportStats(); @@ -177,50 +281,65 @@ struct AccessInfo { bool recover; }; -#if defined(__aarch64__) static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) { - // Access type is encoded in HLT immediate as 0x1XY, - // where X&1 is 1 for store, 0 for load, - // and X&2 is 1 if the error is recoverable. - // Valid values of Y are 0 to 4, which are interpreted as log2(access_size), - // and 0xF, which means that access size is stored in X1 register. - // Access address is always in X0 register. - AccessInfo ai; + // Access type is passed in a platform dependent way (see below) and encoded + // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is + // recoverable. Valid values of Y are 0 to 4, which are interpreted as + // log2(access_size), and 0xF, which means that access size is passed via + // platform dependent register (see below). +#if defined(__aarch64__) + // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF, + // access size is stored in X1 register. Access address is always in X0 + // register. uptr pc = (uptr)info->si_addr; - unsigned code = ((*(u32 *)pc) >> 5) & 0xffff; - if ((code & 0xff00) != 0x100) - return AccessInfo{0, 0, false, false}; // Not ours. - bool is_store = code & 0x10; - bool recover = code & 0x20; - unsigned size_log = code & 0xf; + const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff; + if ((code & 0xff00) != 0x900) + return AccessInfo{}; // Not ours. + + const bool is_store = code & 0x10; + const bool recover = code & 0x20; + const uptr addr = uc->uc_mcontext.regs[0]; + const unsigned size_log = code & 0xf; + if (size_log > 4 && size_log != 0xf) + return AccessInfo{}; // Not ours. + const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log; + +#elif defined(__x86_64__) + // Access type is encoded in the instruction following INT3 as + // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in + // RSI register. Access address is always in RDI register. + uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP]; + uint8_t *nop = (uint8_t*)pc; + if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 || + *(nop + 3) < 0x40) + return AccessInfo{}; // Not ours. + const unsigned code = *(nop + 3); + + const bool is_store = code & 0x10; + const bool recover = code & 0x20; + const uptr addr = uc->uc_mcontext.gregs[REG_RDI]; + const unsigned size_log = code & 0xf; if (size_log > 4 && size_log != 0xf) - return AccessInfo{0, 0, false, false}; // Not ours. + return AccessInfo{}; // Not ours. + const uptr size = + size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log; - ai.is_store = is_store; - ai.is_load = !is_store; - ai.addr = uc->uc_mcontext.regs[0]; - if (size_log == 0xf) - ai.size = uc->uc_mcontext.regs[1]; - else - ai.size = 1U << size_log; - ai.recover = recover; - return ai; -} #else -static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) { - return AccessInfo{0, 0, false, false}; -} +# error Unsupported architecture #endif -static bool HwasanOnSIGILL(int signo, siginfo_t *info, ucontext_t *uc) { - SignalContext sig{info, uc}; + return AccessInfo{addr, size, is_store, !is_store, recover}; +} + +static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) { AccessInfo ai = GetAccessInfo(info, uc); if (!ai.is_store && !ai.is_load) return false; - InternalScopedBuffer<BufferedStackTrace> stack_buffer(1); + InternalMmapVector<BufferedStackTrace> stack_buffer(1); BufferedStackTrace *stack = stack_buffer.data(); stack->Reset(); + SignalContext sig{info, uc}; GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, uc, common_flags()->fast_unwind_on_fatal); @@ -230,7 +349,12 @@ static bool HwasanOnSIGILL(int signo, siginfo_t *info, ucontext_t *uc) { if (flags()->halt_on_error || !ai.recover) Die(); +#if defined(__aarch64__) uc->uc_mcontext.pc += 4; +#elif defined(__x86_64__) +#else +# error Unsupported architecture +#endif return true; } @@ -242,8 +366,8 @@ static void OnStackUnwind(const SignalContext &sig, const void *, void HwasanOnDeadlySignal(int signo, void *info, void *context) { // Probably a tag mismatch. - if (signo == SIGILL) - if (HwasanOnSIGILL(signo, (siginfo_t *)info, (ucontext_t*)context)) + if (signo == SIGTRAP) + if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t*)context)) return; HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr); diff --git a/lib/hwasan/hwasan_mapping.h b/lib/hwasan/hwasan_mapping.h new file mode 100644 index 000000000000..650a5aefcb2d --- /dev/null +++ b/lib/hwasan/hwasan_mapping.h @@ -0,0 +1,85 @@ +//===-- hwasan_mapping.h ----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file is a part of HWAddressSanitizer and defines memory mapping. +/// +//===----------------------------------------------------------------------===// + +#ifndef HWASAN_MAPPING_H +#define HWASAN_MAPPING_H + +#include "sanitizer_common/sanitizer_internal_defs.h" + +// Typical mapping on Linux/x86_64 with fixed shadow mapping: +// || [0x080000000000, 0x7fffffffffff] || HighMem || +// || [0x008000000000, 0x07ffffffffff] || HighShadow || +// || [0x000100000000, 0x007fffffffff] || ShadowGap || +// || [0x000010000000, 0x0000ffffffff] || LowMem || +// || [0x000001000000, 0x00000fffffff] || LowShadow || +// || [0x000000000000, 0x000000ffffff] || ShadowGap || +// +// and with dynamic shadow mapped at [0x770d59f40000, 0x7f0d59f40000]: +// || [0x7f0d59f40000, 0x7fffffffffff] || HighMem || +// || [0x7efe2f934000, 0x7f0d59f3ffff] || HighShadow || +// || [0x7e7e2f934000, 0x7efe2f933fff] || ShadowGap || +// || [0x770d59f40000, 0x7e7e2f933fff] || LowShadow || +// || [0x000000000000, 0x770d59f3ffff] || LowMem || + +// Typical mapping on Android/AArch64 (39-bit VMA): +// || [0x001000000000, 0x007fffffffff] || HighMem || +// || [0x000800000000, 0x000fffffffff] || ShadowGap || +// || [0x000100000000, 0x0007ffffffff] || HighShadow || +// || [0x000010000000, 0x0000ffffffff] || LowMem || +// || [0x000001000000, 0x00000fffffff] || LowShadow || +// || [0x000000000000, 0x000000ffffff] || ShadowGap || +// +// and with dynamic shadow mapped: [0x007477480000, 0x007c77480000]: +// || [0x007c77480000, 0x007fffffffff] || HighMem || +// || [0x007c3ebc8000, 0x007c7747ffff] || HighShadow || +// || [0x007bbebc8000, 0x007c3ebc7fff] || ShadowGap || +// || [0x007477480000, 0x007bbebc7fff] || LowShadow || +// || [0x000000000000, 0x00747747ffff] || LowMem || + +static constexpr __sanitizer::u64 kDefaultShadowSentinel = ~(__sanitizer::u64)0; + +// Reasonable values are 4 (for 1/16th shadow) and 6 (for 1/64th). +constexpr __sanitizer::uptr kShadowScale = 4; +constexpr __sanitizer::uptr kShadowAlignment = 1ULL << kShadowScale; + +#if SANITIZER_ANDROID +# define HWASAN_FIXED_MAPPING 0 +#else +# define HWASAN_FIXED_MAPPING 1 +#endif + +#if HWASAN_FIXED_MAPPING +# define SHADOW_OFFSET (0) +# define HWASAN_PREMAP_SHADOW 0 +#else +# define SHADOW_OFFSET (__hwasan_shadow_memory_dynamic_address) +# define HWASAN_PREMAP_SHADOW 1 +#endif + +#define SHADOW_GRANULARITY (1ULL << kShadowScale) + +#define MEM_TO_SHADOW(mem) (((uptr)(mem) >> kShadowScale) + SHADOW_OFFSET) +#define SHADOW_TO_MEM(shadow) (((uptr)(shadow) - SHADOW_OFFSET) << kShadowScale) + +#define MEM_TO_SHADOW_SIZE(size) ((uptr)(size) >> kShadowScale) + +#define MEM_IS_APP(mem) MemIsApp((uptr)(mem)) + +namespace __hwasan { + +bool MemIsApp(uptr p); + +} // namespace __hwasan + +#endif // HWASAN_MAPPING_H diff --git a/lib/hwasan/hwasan_new_delete.cc b/lib/hwasan/hwasan_new_delete.cc index 3ccc26734bf6..63ca74edd481 100644 --- a/lib/hwasan/hwasan_new_delete.cc +++ b/lib/hwasan/hwasan_new_delete.cc @@ -1,4 +1,4 @@ -//===-- hwasan_new_delete.cc ------------------------------------------------===// +//===-- hwasan_new_delete.cc ----------------------------------------------===// // // The LLVM Compiler Infrastructure // @@ -15,6 +15,7 @@ #include "hwasan.h" #include "interception/interception.h" #include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_report.h" #if HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE @@ -32,7 +33,7 @@ namespace std { #define OPERATOR_NEW_BODY(nothrow) \ GET_MALLOC_STACK_TRACE; \ void *res = hwasan_malloc(size, &stack);\ - if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ + if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ return res INTERCEPTOR_ATTRIBUTE diff --git a/lib/hwasan/hwasan_poisoning.cc b/lib/hwasan/hwasan_poisoning.cc index 411fd05b10d5..b99d8ed0be79 100644 --- a/lib/hwasan/hwasan_poisoning.cc +++ b/lib/hwasan/hwasan_poisoning.cc @@ -13,6 +13,7 @@ #include "hwasan_poisoning.h" +#include "hwasan_mapping.h" #include "interception/interception.h" #include "sanitizer_common/sanitizer_common.h" @@ -22,7 +23,7 @@ uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) { CHECK(IsAligned(p, kShadowAlignment)); CHECK(IsAligned(size, kShadowAlignment)); uptr shadow_start = MEM_TO_SHADOW(p); - uptr shadow_size = MEM_TO_SHADOW_OFFSET(size); + uptr shadow_size = MEM_TO_SHADOW_SIZE(size); internal_memset((void *)shadow_start, tag, shadow_size); return AddTagToPointer(p, tag); } diff --git a/lib/hwasan/hwasan_report.cc b/lib/hwasan/hwasan_report.cc index a3c6709491d3..16e9016ea35b 100644 --- a/lib/hwasan/hwasan_report.cc +++ b/lib/hwasan/hwasan_report.cc @@ -1,4 +1,4 @@ -//===-- hwasan_report.cc ----------------------------------------------------===// +//===-- hwasan_report.cc --------------------------------------------------===// // // The LLVM Compiler Infrastructure // @@ -14,6 +14,7 @@ #include "hwasan.h" #include "hwasan_allocator.h" +#include "hwasan_mapping.h" #include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" @@ -36,9 +37,9 @@ static StackTrace GetStackTraceFromId(u32 id) { class Decorator: public __sanitizer::SanitizerCommonDecorator { public: Decorator() : SanitizerCommonDecorator() { } - const char *Allocation() { return Magenta(); } - const char *Origin() { return Magenta(); } - const char *Name() { return Green(); } + const char *Allocation() const { return Magenta(); } + const char *Origin() const { return Magenta(); } + const char *Name() const { return Green(); } }; struct HeapAddressDescription { @@ -129,5 +130,4 @@ void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size, ReportErrorSummary("tag-mismatch", stack); } - } // namespace __hwasan diff --git a/lib/hwasan/hwasan_report.h b/lib/hwasan/hwasan_report.h new file mode 100644 index 000000000000..bb33f1a87308 --- /dev/null +++ b/lib/hwasan/hwasan_report.h @@ -0,0 +1,36 @@ +//===-- hwasan_report.h -----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file is a part of HWAddressSanitizer. HWASan-private header for error +/// reporting functions. +/// +//===----------------------------------------------------------------------===// + +#ifndef HWASAN_REPORT_H +#define HWASAN_REPORT_H + +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_stacktrace.h" + +namespace __hwasan { + +void ReportInvalidAccess(StackTrace *stack, u32 origin); +void ReportStats(); +void ReportInvalidAccessInsideAddressRange(const char *what, const void *start, + uptr size, uptr offset); +void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size, + bool is_store); + +void ReportAtExitStatistics(); + + +} // namespace __hwasan + +#endif // HWASAN_REPORT_H diff --git a/lib/hwasan/hwasan_thread.cc b/lib/hwasan/hwasan_thread.cc index d6551ffc6fdc..b50c0fc76be4 100644 --- a/lib/hwasan/hwasan_thread.cc +++ b/lib/hwasan/hwasan_thread.cc @@ -1,5 +1,6 @@ #include "hwasan.h" +#include "hwasan_mapping.h" #include "hwasan_thread.h" #include "hwasan_poisoning.h" #include "hwasan_interface_internal.h" @@ -8,6 +9,19 @@ namespace __hwasan { +static u32 RandomSeed() { + u32 seed; + do { + if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&seed), sizeof(seed), + /*blocking=*/false))) { + seed = static_cast<u32>( + (NanoTime() >> 12) ^ + (reinterpret_cast<uptr>(__builtin_frame_address(0)) >> 4)); + } + } while (!seed); + return seed; +} + HwasanThread *HwasanThread::Create(thread_callback_t start_routine, void *arg) { uptr PageSize = GetPageSizeCached(); @@ -16,6 +30,7 @@ HwasanThread *HwasanThread::Create(thread_callback_t start_routine, thread->start_routine_ = start_routine; thread->arg_ = arg; thread->destructor_iterations_ = GetPthreadDestructorIterations(); + thread->random_state_ = flags()->random_tags ? RandomSeed() : 0; return thread; } @@ -72,4 +87,28 @@ thread_return_t HwasanThread::ThreadStart() { return res; } +static u32 xorshift(u32 state) { + state ^= state << 13; + state ^= state >> 17; + state ^= state << 5; + return state; +} + +// Generate a (pseudo-)random non-zero tag. +tag_t HwasanThread::GenerateRandomTag() { + tag_t tag; + do { + if (flags()->random_tags) { + if (!random_buffer_) + random_buffer_ = random_state_ = xorshift(random_state_); + CHECK(random_buffer_); + tag = random_buffer_ & 0xFF; + random_buffer_ >>= 8; + } else { + tag = random_state_ = (random_state_ + 1) & 0xFF; + } + } while (!tag); + return tag; +} + } // namespace __hwasan diff --git a/lib/hwasan/hwasan_thread.h b/lib/hwasan/hwasan_thread.h index 96f1bb813adf..1e482adeac84 100644 --- a/lib/hwasan/hwasan_thread.h +++ b/lib/hwasan/hwasan_thread.h @@ -52,6 +52,8 @@ class HwasanThread { HwasanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; } + tag_t GenerateRandomTag(); + int destructor_iterations_; private: @@ -70,6 +72,9 @@ class HwasanThread { unsigned in_symbolizer_; unsigned in_interceptor_scope_; + u32 random_state_; + u32 random_buffer_; + HwasanThreadLocalMallocStorage malloc_storage_; }; diff --git a/lib/interception/.clang-format b/lib/interception/.clang-format index f6cb8ad931f5..560308c91dee 100644 --- a/lib/interception/.clang-format +++ b/lib/interception/.clang-format @@ -1 +1,2 @@ BasedOnStyle: Google +AllowShortIfStatementsOnASingleLine: false diff --git a/lib/interception/CMakeLists.txt b/lib/interception/CMakeLists.txt index 18d25948105d..c0ac974d726a 100644 --- a/lib/interception/CMakeLists.txt +++ b/lib/interception/CMakeLists.txt @@ -4,8 +4,13 @@ set(INTERCEPTION_SOURCES interception_linux.cc interception_mac.cc interception_win.cc - interception_type_test.cc - ) + interception_type_test.cc) + +set(INTERCEPTION_HEADERS + interception.h + interception_linux.h + interception_mac.h + interception_win.h) include_directories(..) @@ -16,6 +21,7 @@ add_compiler_rt_object_libraries(RTInterception OS ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES ${INTERCEPTION_SOURCES} + ADDITIONAL_HEADERS ${INTERCEPTION_HEADERS} CFLAGS ${INTERCEPTION_CFLAGS}) if(COMPILER_RT_INCLUDE_TESTS) diff --git a/lib/interception/interception.h b/lib/interception/interception.h index cba484936eac..ddd6ec20979b 100644 --- a/lib/interception/interception.h +++ b/lib/interception/interception.h @@ -18,8 +18,8 @@ #include "sanitizer_common/sanitizer_internal_defs.h" #if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_MAC && \ - !SANITIZER_NETBSD && !SANITIZER_WINDOWS && !SANITIZER_FUCHSIA && \ - !SANITIZER_SOLARIS + !SANITIZER_NETBSD && !SANITIZER_OPENBSD && !SANITIZER_WINDOWS && \ + !SANITIZER_FUCHSIA && !SANITIZER_RTEMS && !SANITIZER_SOLARIS # error "Interception doesn't work on this operating system." #endif @@ -130,6 +130,11 @@ const interpose_substitution substitution_##func_name[] \ extern "C" ret_type func(__VA_ARGS__); # define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \ extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__); +#elif SANITIZER_RTEMS +# define WRAP(x) x +# define WRAPPER_NAME(x) #x +# define INTERCEPTOR_ATTRIBUTE +# define DECLARE_WRAPPER(ret_type, func, ...) #elif SANITIZER_FREEBSD || SANITIZER_NETBSD # define WRAP(x) __interceptor_ ## x # define WRAPPER_NAME(x) "__interceptor_" #x @@ -157,6 +162,10 @@ const interpose_substitution substitution_##func_name[] \ # define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default"))) # define REAL(x) __unsanitized_##x # define DECLARE_REAL(ret_type, func, ...) +#elif SANITIZER_RTEMS +# define REAL(x) __real_ ## x +# define DECLARE_REAL(ret_type, func, ...) \ + extern "C" ret_type REAL(func)(__VA_ARGS__); #elif !SANITIZER_MAC # define PTR_TO_REAL(x) real_##x # define REAL(x) __interception::PTR_TO_REAL(x) @@ -175,7 +184,7 @@ const interpose_substitution substitution_##func_name[] \ # define ASSIGN_REAL(x, y) #endif // SANITIZER_MAC -#if !SANITIZER_FUCHSIA +#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS #define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \ DECLARE_REAL(ret_type, func, __VA_ARGS__) \ extern "C" ret_type WRAP(func)(__VA_ARGS__); @@ -187,7 +196,7 @@ const interpose_substitution substitution_##func_name[] \ // macros does its job. In exceptional cases you may need to call REAL(foo) // without defining INTERCEPTOR(..., foo, ...). For example, if you override // foo with an interceptor for other function. -#if !SANITIZER_MAC && !SANITIZER_FUCHSIA +#if !SANITIZER_MAC && !SANITIZER_FUCHSIA && !SANITIZER_RTEMS # define DEFINE_REAL(ret_type, func, ...) \ typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \ namespace __interception { \ @@ -266,7 +275,7 @@ typedef unsigned long uptr; // NOLINT #define INCLUDED_FROM_INTERCEPTION_LIB #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS + SANITIZER_OPENBSD || SANITIZER_SOLARIS # include "interception_linux.h" # define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) diff --git a/lib/interception/interception_linux.cc b/lib/interception/interception_linux.cc index c991550a4f72..26bfcd8f6794 100644 --- a/lib/interception/interception_linux.cc +++ b/lib/interception/interception_linux.cc @@ -15,7 +15,7 @@ #include "interception.h" #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS + SANITIZER_OPENBSD || SANITIZER_SOLARIS #include <dlfcn.h> // for dlsym() and dlvsym() @@ -43,7 +43,7 @@ bool GetRealFunctionAddress(const char *func_name, uptr *func_addr, } // Android and Solaris do not have dlvsym -#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS +#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS && !SANITIZER_OPENBSD void *GetFuncAddrVer(const char *func_name, const char *ver) { return dlvsym(RTLD_NEXT, func_name, ver); } @@ -52,4 +52,4 @@ void *GetFuncAddrVer(const char *func_name, const char *ver) { } // namespace __interception #endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || - // SANITIZER_SOLARIS + // SANITIZER_OPENBSD || SANITIZER_SOLARIS diff --git a/lib/interception/interception_linux.h b/lib/interception/interception_linux.h index 98fe51b858fc..942c25609ccb 100644 --- a/lib/interception/interception_linux.h +++ b/lib/interception/interception_linux.h @@ -13,7 +13,7 @@ //===----------------------------------------------------------------------===// #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS + SANITIZER_OPENBSD || SANITIZER_SOLARIS #if !defined(INCLUDED_FROM_INTERCEPTION_LIB) # error "interception_linux.h should be included from interception library only" @@ -35,8 +35,8 @@ void *GetFuncAddrVer(const char *func_name, const char *ver); (::__interception::uptr) & (func), \ (::__interception::uptr) & WRAP(func)) -// Android and Solaris do not have dlvsym -#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS +// Android, Solaris and OpenBSD do not have dlvsym +#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS && !SANITIZER_OPENBSD #define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \ (::__interception::real_##func = (func##_f)( \ unsigned long)::__interception::GetFuncAddrVer(#func, symver)) @@ -47,4 +47,4 @@ void *GetFuncAddrVer(const char *func_name, const char *ver); #endif // INTERCEPTION_LINUX_H #endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || - // SANITIZER_SOLARIS + // SANITIZER_OPENBSD || SANITIZER_SOLARIS diff --git a/lib/interception/interception_win.cc b/lib/interception/interception_win.cc index dc3fe35242ef..bd4ad7274dde 100644 --- a/lib/interception/interception_win.cc +++ b/lib/interception/interception_win.cc @@ -453,6 +453,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) { } switch (*(u16*)(address)) { + case 0x018A: // 8A 01 : mov al, byte ptr [ecx] case 0xFF8B: // 8B FF : mov edi, edi case 0xEC8B: // 8B EC : mov ebp, esp case 0xc889: // 89 C8 : mov eax, ecx diff --git a/lib/lsan/.clang-format b/lib/lsan/.clang-format index f6cb8ad931f5..560308c91dee 100644 --- a/lib/lsan/.clang-format +++ b/lib/lsan/.clang-format @@ -1 +1,2 @@ BasedOnStyle: Google +AllowShortIfStatementsOnASingleLine: false diff --git a/lib/lsan/CMakeLists.txt b/lib/lsan/CMakeLists.txt index 60da3e186871..34f686135ac4 100644 --- a/lib/lsan/CMakeLists.txt +++ b/lib/lsan/CMakeLists.txt @@ -18,12 +18,20 @@ set(LSAN_SOURCES lsan_preinit.cc lsan_thread.cc) +set(LSAN_HEADERS + lsan.h + lsan_allocator.h + lsan_common.h + lsan_flags.inc + lsan_thread.h) + set(LSAN_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}) add_compiler_rt_object_libraries(RTLSanCommon OS ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${LSAN_COMMON_SUPPORTED_ARCH} SOURCES ${LSAN_COMMON_SOURCES} + ADDITIONAL_HEADERS ${LSAN_HEADERS} CFLAGS ${LSAN_CFLAGS}) if(COMPILER_RT_HAS_LSAN) @@ -39,10 +47,13 @@ if(COMPILER_RT_HAS_LSAN) OS ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${LSAN_SUPPORTED_ARCH} SOURCES ${LSAN_SOURCES} + ADDITIONAL_HEADERS ${LSAN_HEADERS} OBJECT_LIBS RTLSanCommon RTInterception RTSanitizerCommon RTSanitizerCommonLibc + RTSanitizerCommonCoverage + RTSanitizerCommonSymbolizer CFLAGS ${LSAN_CFLAGS} LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS} ${WEAK_SYMBOL_LINK_FLAGS} LINK_LIBS ${LSAN_LINK_LIBS} @@ -56,7 +67,10 @@ if(COMPILER_RT_HAS_LSAN) $<TARGET_OBJECTS:RTInterception.${arch}> $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonCoverage.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}> $<TARGET_OBJECTS:RTLSanCommon.${arch}> + ADDITIONAL_HEADERS ${LSAN_HEADERS} CFLAGS ${LSAN_CFLAGS} PARENT_TARGET lsan) endforeach() diff --git a/lib/lsan/lsan.cc b/lib/lsan/lsan.cc index a9f7e399e14c..93bced0459c2 100644 --- a/lib/lsan/lsan.cc +++ b/lib/lsan/lsan.cc @@ -66,6 +66,8 @@ static void InitializeFlags() { if (Verbosity()) ReportUnrecognizedFlags(); if (common_flags()->help) parser.PrintFlagDescriptions(); + + __sanitizer_set_report_path(common_flags()->log_path); } static void OnStackUnwind(const SignalContext &sig, const void *, diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc index 2df58b44f6b8..c58c3548002f 100644 --- a/lib/lsan/lsan_allocator.cc +++ b/lib/lsan/lsan_allocator.cc @@ -17,6 +17,7 @@ #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_allocator_report.h" #include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_stackdepot.h" @@ -70,15 +71,27 @@ static void RegisterDeallocation(void *p) { atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed); } +static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) { + if (AllocatorMayReturnNull()) { + Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size); + return nullptr; + } + ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack); +} + void *Allocate(const StackTrace &stack, uptr size, uptr alignment, bool cleared) { if (size == 0) size = 1; - if (size > kMaxAllowedMallocSize) { - Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); - return Allocator::FailureHandler::OnBadRequest(); - } + if (size > kMaxAllowedMallocSize) + return ReportAllocationSizeTooBig(size, stack); void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); + if (UNLIKELY(!p)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportOutOfMemory(size, &stack); + } // Do not rely on the allocator to clear the memory (it's slow). if (cleared && allocator.FromPrimary(p)) memset(p, 0, size); @@ -89,8 +102,11 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment, } static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { - if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) - return Allocator::FailureHandler::OnBadRequest(); + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportCallocOverflow(nmemb, size, &stack); + } size *= nmemb; return Allocate(stack, size, 1, true); } @@ -106,9 +122,8 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size, uptr alignment) { RegisterDeallocation(p); if (new_size > kMaxAllowedMallocSize) { - Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); allocator.Deallocate(GetAllocatorCache(), p); - return Allocator::FailureHandler::OnBadRequest(); + return ReportAllocationSizeTooBig(new_size, stack); } p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); RegisterAllocation(stack, p, new_size); @@ -126,10 +141,38 @@ uptr GetMallocUsableSize(const void *p) { return m->requested_size; } +int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, + const StackTrace &stack) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { + if (AllocatorMayReturnNull()) + return errno_EINVAL; + ReportInvalidPosixMemalignAlignment(alignment, &stack); + } + void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory); + if (UNLIKELY(!ptr)) + // OOM error is already taken care of by Allocate. + return errno_ENOMEM; + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + +void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAlignedAllocAlignment(size, alignment, &stack); + } + return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); +} + void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAllocationAlignment(alignment, &stack); } return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); } @@ -155,6 +198,19 @@ void *lsan_valloc(uptr size, const StackTrace &stack) { Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory)); } +void *lsan_pvalloc(uptr size, const StackTrace &stack) { + uptr PageSize = GetPageSizeCached(); + if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + ReportPvallocOverflow(size, &stack); + } + // pvalloc(0) should allocate one page. + size = size ? RoundUpTo(size, PageSize) : PageSize; + return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory)); +} + uptr lsan_mz_size(const void *p) { return GetMallocUsableSize(p); } diff --git a/lib/lsan/lsan_allocator.h b/lib/lsan/lsan_allocator.h index 4006f7929269..7c70bb6d9766 100644 --- a/lib/lsan/lsan_allocator.h +++ b/lib/lsan/lsan_allocator.h @@ -90,12 +90,16 @@ typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; AllocatorCache *GetAllocatorCache(); +int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, + const StackTrace &stack); +void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack); void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack); void *lsan_malloc(uptr size, const StackTrace &stack); void lsan_free(void *p); void *lsan_realloc(void *p, uptr size, const StackTrace &stack); void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack); void *lsan_valloc(uptr size, const StackTrace &stack); +void *lsan_pvalloc(uptr size, const StackTrace &stack); uptr lsan_mz_size(const void *p); } // namespace __lsan diff --git a/lib/lsan/lsan_common.cc b/lib/lsan/lsan_common.cc index 69ffda539a26..012a673c3b25 100644 --- a/lib/lsan/lsan_common.cc +++ b/lib/lsan/lsan_common.cc @@ -15,14 +15,15 @@ #include "lsan_common.h" #include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_procmaps.h" +#include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_suppressions.h" -#include "sanitizer_common/sanitizer_report_decorator.h" +#include "sanitizer_common/sanitizer_thread_registry.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" #if CAN_SANITIZE_LEAKS @@ -99,12 +100,14 @@ static SuppressionContext *GetSuppressionContext() { static InternalMmapVector<RootRegion> *root_regions; +static uptr initialized_for_pid; + InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; } void InitializeRootRegions() { CHECK(!root_regions); ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)]; - root_regions = new(placeholder) InternalMmapVector<RootRegion>(1); + root_regions = new (placeholder) InternalMmapVector<RootRegion>(); // NOLINT } const char *MaybeCallLsanDefaultOptions() { @@ -112,6 +115,7 @@ const char *MaybeCallLsanDefaultOptions() { } void InitCommonLsan() { + initialized_for_pid = internal_getpid(); InitializeRootRegions(); if (common_flags()->detect_leaks) { // Initialization which can fail or print warnings should only be done if @@ -214,9 +218,10 @@ void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) { // Scans thread data (stacks and TLS) for heap pointers. static void ProcessThreads(SuspendedThreadsList const &suspended_threads, Frontier *frontier) { - InternalScopedBuffer<uptr> registers(suspended_threads.RegisterCount()); + InternalMmapVector<uptr> registers(suspended_threads.RegisterCount()); uptr registers_begin = reinterpret_cast<uptr>(registers.data()); - uptr registers_end = registers_begin + registers.size(); + uptr registers_end = + reinterpret_cast<uptr>(registers.data() + registers.size()); for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i)); LOG_THREADS("Processing thread %d.\n", os_id); @@ -444,7 +449,7 @@ void ProcessPC(Frontier *frontier) { // Sets the appropriate tag on each chunk. static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { // Holds the flood fill frontier. - Frontier frontier(1); + Frontier frontier; ForEachChunk(CollectIgnoredCb, &frontier); ProcessGlobalRegions(&frontier); @@ -506,7 +511,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) { } static void PrintMatchedSuppressions() { - InternalMmapVector<Suppression *> matched(1); + InternalMmapVector<Suppression *> matched; GetSuppressionContext()->GetMatched(&matched); if (!matched.size()) return; @@ -525,11 +530,36 @@ struct CheckForLeaksParam { LeakReport leak_report; }; +static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) { + const InternalMmapVector<tid_t> &suspended_threads = + *(const InternalMmapVector<tid_t> *)arg; + if (tctx->status == ThreadStatusRunning) { + uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(), + tctx->os_id, CompareLess<int>()); + if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id) + Report("Running thread %d was not suspended. False leaks are possible.\n", + tctx->os_id); + }; +} + +static void ReportUnsuspendedThreads( + const SuspendedThreadsList &suspended_threads) { + InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount()); + for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i) + threads[i] = suspended_threads.GetThreadID(i); + + Sort(threads.data(), threads.size()); + + GetThreadRegistryLocked()->RunCallbackForEachThreadLocked( + &ReportIfNotSuspended, &threads); +} + static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, void *arg) { CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg); CHECK(param); CHECK(!param->success); + ReportUnsuspendedThreads(suspended_threads); ClassifyAllChunks(suspended_threads); ForEachChunk(CollectLeaksCb, ¶m->leak_report); // Clean up for subsequent leak checks. This assumes we did not overwrite any @@ -541,6 +571,12 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, static bool CheckForLeaks() { if (&__lsan_is_turned_off && __lsan_is_turned_off()) return false; + if (initialized_for_pid != internal_getpid()) { + // If process was forked and it had threads we fail to detect references + // from other threads. + Report("WARNING: LeakSanitizer is disabled in forked process.\n"); + return false; + } EnsureMainThreadIDIsCorrect(); CheckForLeaksParam param; param.success = false; @@ -684,7 +720,7 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { uptr unsuppressed_count = UnsuppressedLeakCount(); if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count) Printf("The %zu top leak(s):\n", num_leaks_to_report); - InternalSort(&leaks_, leaks_.size(), LeakComparator); + Sort(leaks_.data(), leaks_.size(), &LeakComparator); uptr leaks_reported = 0; for (uptr i = 0; i < leaks_.size(); i++) { if (leaks_[i].is_suppressed) continue; diff --git a/lib/lsan/lsan_common.h b/lib/lsan/lsan_common.h index f3863309d893..1d1e1e462435 100644 --- a/lib/lsan/lsan_common.h +++ b/lib/lsan/lsan_common.h @@ -47,6 +47,7 @@ namespace __sanitizer { class FlagParser; +class ThreadRegistry; struct DTLS; } @@ -95,7 +96,7 @@ struct LeakedObject { // Aggregates leaks by stack trace prefix. class LeakReport { public: - LeakReport() : next_id_(0), leaks_(1), leaked_objects_(1) {} + LeakReport() {} void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size, ChunkTag tag); void ReportTopLeaks(uptr max_leaks); @@ -103,12 +104,11 @@ class LeakReport { void ApplySuppressions(); uptr UnsuppressedLeakCount(); - private: void PrintReportForLeak(uptr index); void PrintLeakedObjectsForLeak(uptr index); - u32 next_id_; + u32 next_id_ = 0; InternalMmapVector<Leak> leaks_; InternalMmapVector<LeakedObject> leaked_objects_; }; @@ -205,6 +205,7 @@ bool WordIsPoisoned(uptr addr); // Wrappers for ThreadRegistry access. void LockThreadRegistry(); void UnlockThreadRegistry(); +ThreadRegistry *GetThreadRegistryLocked(); bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end, DTLS **dtls); diff --git a/lib/lsan/lsan_common_mac.cc b/lib/lsan/lsan_common_mac.cc index ac27c7af6e35..2508c1dbd873 100644 --- a/lib/lsan/lsan_common_mac.cc +++ b/lib/lsan/lsan_common_mac.cc @@ -24,6 +24,13 @@ #include <mach/mach.h> +// Only introduced in Mac OS X 10.9. +#ifdef VM_MEMORY_OS_ALLOC_ONCE +static const int kSanitizerVmMemoryOsAllocOnce = VM_MEMORY_OS_ALLOC_ONCE; +#else +static const int kSanitizerVmMemoryOsAllocOnce = 73; +#endif + namespace __lsan { typedef struct { @@ -112,7 +119,8 @@ void ProcessGlobalRegions(Frontier *frontier) { for (auto name : kSkippedSecNames) CHECK(ARRAY_SIZE(name) < kMaxSegName); MemoryMappingLayout memory_mapping(false); - InternalMmapVector<LoadedModule> modules(/*initial_capacity*/ 128); + InternalMmapVector<LoadedModule> modules; + modules.reserve(128); memory_mapping.DumpListOfModules(&modules); for (uptr i = 0; i < modules.size(); ++i) { // Even when global scanning is disabled, we still need to scan @@ -157,7 +165,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) { // libxpc stashes some pointers in the Kernel Alloc Once page, // make sure not to report those as leaks. - if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) { + if (info.user_tag == kSanitizerVmMemoryOsAllocOnce) { ScanRangeForPointers(address, end_address, frontier, "GLOBAL", kReachable); diff --git a/lib/lsan/lsan_interceptors.cc b/lib/lsan/lsan_interceptors.cc index b3e73e3896d5..fde52e496164 100644 --- a/lib/lsan/lsan_interceptors.cc +++ b/lib/lsan/lsan_interceptors.cc @@ -14,6 +14,7 @@ #include "interception/interception.h" #include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_report.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" @@ -86,9 +87,7 @@ INTERCEPTOR(void*, realloc, void *q, uptr size) { INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; - *memptr = lsan_memalign(alignment, size, stack); - // FIXME: Return ENOMEM if user requested more than max alloc size. - return 0; + return lsan_posix_memalign(memptr, alignment, size, stack); } INTERCEPTOR(void*, valloc, uptr size) { @@ -123,7 +122,7 @@ INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) { INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) { ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; - return lsan_memalign(alignment, size, stack); + return lsan_aligned_alloc(alignment, size, stack); } #define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc) #else @@ -166,13 +165,7 @@ INTERCEPTOR(int, mallopt, int cmd, int value) { INTERCEPTOR(void*, pvalloc, uptr size) { ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; - uptr PageSize = GetPageSizeCached(); - size = RoundUpTo(size, PageSize); - if (size == 0) { - // pvalloc(0) should allocate one page. - size = PageSize; - } - return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory); + return lsan_pvalloc(size, stack); } #define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc) #else @@ -202,21 +195,21 @@ INTERCEPTOR(int, mprobe, void *ptr) { // TODO(alekseys): throw std::bad_alloc instead of dying on OOM. -#define OPERATOR_NEW_BODY(nothrow) \ - ENSURE_LSAN_INITED; \ - GET_STACK_TRACE_MALLOC; \ - void *res = lsan_malloc(size, stack); \ - if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \ +#define OPERATOR_NEW_BODY(nothrow)\ + ENSURE_LSAN_INITED;\ + GET_STACK_TRACE_MALLOC;\ + void *res = lsan_malloc(size, stack);\ + if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ return res; -#define OPERATOR_NEW_BODY_ALIGN(nothrow) \ - ENSURE_LSAN_INITED; \ - GET_STACK_TRACE_MALLOC; \ - void *res = lsan_memalign((uptr)align, size, stack); \ - if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \ +#define OPERATOR_NEW_BODY_ALIGN(nothrow)\ + ENSURE_LSAN_INITED;\ + GET_STACK_TRACE_MALLOC;\ + void *res = lsan_memalign((uptr)align, size, stack);\ + if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ return res; -#define OPERATOR_DELETE_BODY \ - ENSURE_LSAN_INITED; \ +#define OPERATOR_DELETE_BODY\ + ENSURE_LSAN_INITED;\ lsan_free(ptr); // On OS X it's not enough to just provide our own 'operator new' and @@ -309,7 +302,7 @@ INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) ///// Thread initialization and finalization. ///// -#if !SANITIZER_NETBSD +#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD static unsigned g_thread_finalize_key; static void thread_finalize(void *v) { @@ -336,6 +329,17 @@ INTERCEPTOR(void, _lwp_exit) { #define LSAN_MAYBE_INTERCEPT__LWP_EXIT #endif +#if SANITIZER_INTERCEPT_THR_EXIT +INTERCEPTOR(void, thr_exit, tid_t *state) { + ENSURE_LSAN_INITED; + ThreadFinish(); + REAL(thr_exit)(state); +} +#define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit) +#else +#define LSAN_MAYBE_INTERCEPT_THR_EXIT +#endif + struct ThreadParam { void *(*callback)(void *arg); void *param; @@ -348,7 +352,7 @@ extern "C" void *__lsan_thread_start_func(void *arg) { void *param = p->param; // Wait until the last iteration to maximize the chance that we are the last // destructor to run. -#if !SANITIZER_NETBSD +#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD if (pthread_setspecific(g_thread_finalize_key, (void*)GetPthreadDestructorIterations())) { Report("LeakSanitizer: failed to set thread key.\n"); @@ -443,8 +447,9 @@ void InitializeInterceptors() { INTERCEPT_FUNCTION(_exit); LSAN_MAYBE_INTERCEPT__LWP_EXIT; + LSAN_MAYBE_INTERCEPT_THR_EXIT; -#if !SANITIZER_NETBSD +#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) { Report("LeakSanitizer: failed to create thread key.\n"); Die(); diff --git a/lib/lsan/lsan_malloc_mac.cc b/lib/lsan/lsan_malloc_mac.cc index 9c1dacc055bd..94ffb6d02539 100644 --- a/lib/lsan/lsan_malloc_mac.cc +++ b/lib/lsan/lsan_malloc_mac.cc @@ -37,6 +37,9 @@ using namespace __lsan; #define COMMON_MALLOC_CALLOC(count, size) \ GET_STACK_TRACE_MALLOC; \ void *p = lsan_calloc(count, size, stack) +#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \ + GET_STACK_TRACE_MALLOC; \ + int res = lsan_posix_memalign(memptr, alignment, size, stack) #define COMMON_MALLOC_VALLOC(size) \ GET_STACK_TRACE_MALLOC; \ void *p = lsan_valloc(size, stack) diff --git a/lib/lsan/lsan_thread.cc b/lib/lsan/lsan_thread.cc index 4404c8cc51d2..a25aff379961 100644 --- a/lib/lsan/lsan_thread.cc +++ b/lib/lsan/lsan_thread.cc @@ -155,4 +155,9 @@ void UnlockThreadRegistry() { thread_registry->Unlock(); } +ThreadRegistry *GetThreadRegistryLocked() { + thread_registry->CheckLocked(); + return thread_registry; +} + } // namespace __lsan diff --git a/lib/msan/.clang-format b/lib/msan/.clang-format index f6cb8ad931f5..560308c91dee 100644 --- a/lib/msan/.clang-format +++ b/lib/msan/.clang-format @@ -1 +1,2 @@ BasedOnStyle: Google +AllowShortIfStatementsOnASingleLine: false diff --git a/lib/msan/CMakeLists.txt b/lib/msan/CMakeLists.txt index 598ae54588c1..15cc513c20e9 100644 --- a/lib/msan/CMakeLists.txt +++ b/lib/msan/CMakeLists.txt @@ -15,10 +15,26 @@ set(MSAN_RTL_SOURCES set(MSAN_RTL_CXX_SOURCES msan_new_delete.cc) +set(MSAN_RTL_HEADERS + msan.h + msan_allocator.h + msan_chained_origin_depot.h + msan_flags.h + msan_flags.inc + msan_interface_internal.h + msan_origin.h + msan_poisoning.h + msan_report.h + msan_thread.h) set(MSAN_RTL_CFLAGS ${SANITIZER_COMMON_CFLAGS}) +if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD") + append_list_if(COMPILER_RT_HAS_FTLS_MODEL_INITIAL_EXEC -ftls-model=initial-exec MSAN_RTL_CFLAGS) +endif() append_rtti_flag(OFF MSAN_RTL_CFLAGS) -append_list_if(COMPILER_RT_HAS_FPIE_FLAG -fPIE MSAN_RTL_CFLAGS) +if(NOT CMAKE_SYSTEM_NAME MATCHES "FreeBSD") + append_list_if(COMPILER_RT_HAS_FPIE_FLAG -fPIE MSAN_RTL_CFLAGS) +endif() # Prevent clang from generating libc calls. append_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding MSAN_RTL_CFLAGS) @@ -35,7 +51,10 @@ foreach(arch ${MSAN_SUPPORTED_ARCH}) $<TARGET_OBJECTS:RTInterception.${arch}> $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonCoverage.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}> $<TARGET_OBJECTS:RTUbsan.${arch}> + ADDITIONAL_HEADERS ${MSAN_RTL_HEADERS} CFLAGS ${MSAN_RTL_CFLAGS} PARENT_TARGET msan) add_compiler_rt_runtime(clang_rt.msan_cxx @@ -43,11 +62,12 @@ foreach(arch ${MSAN_SUPPORTED_ARCH}) ARCHS ${arch} SOURCES ${MSAN_RTL_CXX_SOURCES} $<TARGET_OBJECTS:RTUbsan_cxx.${arch}> + ADDITIONAL_HEADERS ${MSAN_RTL_HEADERS} CFLAGS ${MSAN_RTL_CFLAGS} PARENT_TARGET msan) list(APPEND MSAN_RUNTIME_LIBRARIES clang_rt.msan-${arch} clang_rt.msan_cxx-${arch}) - if(UNIX) + if(SANITIZER_USE_SYMBOLS) add_sanitizer_rt_symbols(clang_rt.msan ARCHS ${arch} EXTRA msan.syms.extra) diff --git a/lib/msan/msan.cc b/lib/msan/msan.cc index e6226ba7670f..06bcbdf88691 100644 --- a/lib/msan/msan.cc +++ b/lib/msan/msan.cc @@ -15,6 +15,7 @@ #include "msan.h" #include "msan_chained_origin_depot.h" #include "msan_origin.h" +#include "msan_report.h" #include "msan_thread.h" #include "msan_poisoning.h" #include "sanitizer_common/sanitizer_atomic.h" @@ -379,6 +380,14 @@ static void MsanOnDeadlySignal(int signo, void *siginfo, void *context) { HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr); } +static void MsanCheckFailed(const char *file, int line, const char *cond, + u64 v1, u64 v2) { + Report("MemorySanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, + line, cond, (uptr)v1, (uptr)v2); + PRINT_CURRENT_STACK_CHECK(); + Die(); +} + void __msan_init() { CHECK(!msan_init_is_running); if (msan_inited) return; @@ -386,14 +395,18 @@ void __msan_init() { SanitizerToolName = "MemorySanitizer"; AvoidCVE_2016_2143(); - InitTlsSize(); CacheBinaryName(); + CheckASLR(); InitializeFlags(); + // Install tool-specific callbacks in sanitizer_common. + SetCheckFailedCallback(MsanCheckFailed); + __sanitizer_set_report_path(common_flags()->log_path); InitializeInterceptors(); + InitTlsSize(); InstallDeadlySignalHandlers(MsanOnDeadlySignal); InstallAtExitHandler(); // Needs __cxa_atexit interceptor. diff --git a/lib/msan/msan.h b/lib/msan/msan.h index cbae444127ee..4c2e9f9e24a0 100644 --- a/lib/msan/msan.h +++ b/lib/msan/msan.h @@ -316,14 +316,6 @@ void PrintWarningWithOrigin(uptr pc, uptr bp, u32 origin); void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp, void *context, bool request_fast_unwind); -void ReportUMR(StackTrace *stack, u32 origin); -void ReportExpectedUMRNotFound(StackTrace *stack); -void ReportStats(); -void ReportAtExitStatistics(); -void DescribeMemoryRange(const void *x, uptr size); -void ReportUMRInsideAddressRange(const char *what, const void *start, uptr size, - uptr offset); - // Unpoison first n function arguments. void UnpoisonParam(uptr n); void UnpoisonThreadLocalState(); @@ -356,14 +348,23 @@ const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1; common_flags()->fast_unwind_on_malloc); \ } +#define GET_STORE_STACK_TRACE \ + GET_STORE_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME()) + #define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \ BufferedStackTrace stack; \ if (msan_inited) \ GetStackTrace(&stack, kStackTraceMax, pc, bp, nullptr, \ common_flags()->fast_unwind_on_fatal) -#define GET_STORE_STACK_TRACE \ - GET_STORE_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME()) +#define GET_FATAL_STACK_TRACE_HERE \ + GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME()) + +#define PRINT_CURRENT_STACK_CHECK() \ + { \ + GET_FATAL_STACK_TRACE_HERE; \ + stack.Print(); \ + } class ScopedThreadLocalStateBackup { public: diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc index 0f9942324931..36f0497a9d83 100644 --- a/lib/msan/msan_allocator.cc +++ b/lib/msan/msan_allocator.cc @@ -15,6 +15,7 @@ #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_allocator_report.h" #include "sanitizer_common/sanitizer_errno.h" #include "msan.h" #include "msan_allocator.h" @@ -119,7 +120,7 @@ typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, static Allocator allocator; static AllocatorCache fallback_allocator_cache; -static SpinMutex fallback_mutex; +static StaticSpinMutex fallback_mutex; void MsanAllocatorInit() { SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); @@ -139,9 +140,11 @@ void MsanThreadLocalMallocStorage::CommitBack() { static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment, bool zeroise) { if (size > kMaxAllowedMallocSize) { - Report("WARNING: MemorySanitizer failed to allocate %p bytes\n", - (void *)size); - return Allocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) { + Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size); + return nullptr; + } + ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, stack); } MsanThread *t = GetCurrentThread(); void *allocated; @@ -153,6 +156,12 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment, AllocatorCache *cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, size, alignment); } + if (UNLIKELY(!allocated)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportOutOfMemory(size, stack); + } Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated)); meta->requested_size = size; @@ -222,6 +231,15 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size, return new_p; } +void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportCallocOverflow(nmemb, size, stack); + } + return MsanAllocate(stack, nmemb * size, sizeof(u64), true); +} + static uptr AllocationSize(const void *p) { if (!p) return 0; const void *beg = allocator.GetBlockBegin(p); @@ -235,9 +253,7 @@ void *msan_malloc(uptr size, StackTrace *stack) { } void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) { - if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) - return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest()); - return SetErrnoOnNull(MsanAllocate(stack, nmemb * size, sizeof(u64), true)); + return SetErrnoOnNull(MsanCalloc(stack, nmemb, size)); } void *msan_realloc(void *ptr, uptr size, StackTrace *stack) { @@ -258,7 +274,9 @@ void *msan_pvalloc(uptr size, StackTrace *stack) { uptr PageSize = GetPageSizeCached(); if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { errno = errno_ENOMEM; - return Allocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportPvallocOverflow(size, stack); } // pvalloc(0) should allocate one page. size = size ? RoundUpTo(size, PageSize) : PageSize; @@ -268,7 +286,9 @@ void *msan_pvalloc(uptr size, StackTrace *stack) { void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAlignedAllocAlignment(size, alignment, stack); } return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); } @@ -276,7 +296,9 @@ void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAllocationAlignment(alignment, stack); } return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); } @@ -284,11 +306,13 @@ void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) { int msan_posix_memalign(void **memptr, uptr alignment, uptr size, StackTrace *stack) { if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { - Allocator::FailureHandler::OnBadRequest(); - return errno_EINVAL; + if (AllocatorMayReturnNull()) + return errno_EINVAL; + ReportInvalidPosixMemalignAlignment(alignment, stack); } void *ptr = MsanAllocate(stack, size, alignment, false); if (UNLIKELY(!ptr)) + // OOM error is already taken care of by MsanAllocate. return errno_ENOMEM; CHECK(IsAligned((uptr)ptr, alignment)); *memptr = ptr; diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc index a7fe09b25ffb..b3429bcf06b5 100644 --- a/lib/msan/msan_interceptors.cc +++ b/lib/msan/msan_interceptors.cc @@ -19,6 +19,7 @@ #include "msan.h" #include "msan_chained_origin_depot.h" #include "msan_origin.h" +#include "msan_report.h" #include "msan_thread.h" #include "msan_poisoning.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" @@ -35,6 +36,7 @@ #include "sanitizer_common/sanitizer_tls_get_addr.h" #if SANITIZER_NETBSD +#define fstat __fstat50 #define gettimeofday __gettimeofday50 #define getrusage __getrusage50 #endif @@ -58,6 +60,9 @@ DECLARE_REAL(void *, memset, void *dest, int c, uptr n) // True if this is a nested interceptor. static THREADLOCAL int in_interceptor_scope; +void __msan_scoped_disable_interceptor_checks() { ++in_interceptor_scope; } +void __msan_scoped_enable_interceptor_checks() { --in_interceptor_scope; } + struct InterceptorScope { InterceptorScope() { ++in_interceptor_scope; } ~InterceptorScope() { --in_interceptor_scope; } @@ -137,15 +142,6 @@ INTERCEPTOR(SIZE_T, fread_unlocked, void *ptr, SIZE_T size, SIZE_T nmemb, #define MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED #endif -INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) { - ENSURE_MSAN_INITED(); - CHECK_UNPOISONED_STRING(path, 0); - SSIZE_T res = REAL(readlink)(path, buf, bufsiz); - if (res > 0) - __msan_unpoison(buf, res); - return res; -} - #if !SANITIZER_NETBSD INTERCEPTOR(void *, mempcpy, void *dest, const void *src, SIZE_T n) { return (char *)__msan_memcpy(dest, src, n) + n; @@ -489,39 +485,9 @@ INTERCEPTOR(int, swprintf, void *str, uptr size, void *format, ...) { return res; } -INTERCEPTOR(SIZE_T, strxfrm, char *dest, const char *src, SIZE_T n) { - ENSURE_MSAN_INITED(); - CHECK_UNPOISONED(src, REAL(strlen)(src) + 1); - SIZE_T res = REAL(strxfrm)(dest, src, n); - if (res < n) __msan_unpoison(dest, res + 1); - return res; -} - -INTERCEPTOR(SIZE_T, strxfrm_l, char *dest, const char *src, SIZE_T n, - void *loc) { - ENSURE_MSAN_INITED(); - CHECK_UNPOISONED(src, REAL(strlen)(src) + 1); - SIZE_T res = REAL(strxfrm_l)(dest, src, n, loc); - if (res < n) __msan_unpoison(dest, res + 1); - return res; -} - -#if SANITIZER_LINUX -INTERCEPTOR(SIZE_T, __strxfrm_l, char *dest, const char *src, SIZE_T n, - void *loc) { - ENSURE_MSAN_INITED(); - CHECK_UNPOISONED(src, REAL(strlen)(src) + 1); - SIZE_T res = REAL(__strxfrm_l)(dest, src, n, loc); - if (res < n) __msan_unpoison(dest, res + 1); - return res; -} -#define MSAN_MAYBE_INTERCEPT___STRXFRM_L INTERCEPT_FUNCTION(__strxfrm_l) -#else -#define MSAN_MAYBE_INTERCEPT___STRXFRM_L -#endif - #define INTERCEPTOR_STRFTIME_BODY(char_type, ret_type, func, s, ...) \ ENSURE_MSAN_INITED(); \ + InterceptorScope interceptor_scope; \ ret_type res = REAL(func)(s, __VA_ARGS__); \ if (s) __msan_unpoison(s, sizeof(char_type) * (res + 1)); \ return res; @@ -688,6 +654,19 @@ INTERCEPTOR(int, putenv, char *string) { return res; } +#if SANITIZER_FREEBSD || SANITIZER_NETBSD +INTERCEPTOR(int, fstat, int fd, void *buf) { + ENSURE_MSAN_INITED(); + int res = REAL(fstat)(fd, buf); + if (!res) + __msan_unpoison(buf, __sanitizer::struct_stat_sz); + return res; +} +#define MSAN_MAYBE_INTERCEPT_FSTAT INTERCEPT_FUNCTION(fstat) +#else +#define MSAN_MAYBE_INTERCEPT_FSTAT +#endif + #if !SANITIZER_FREEBSD && !SANITIZER_NETBSD INTERCEPTOR(int, __fxstat, int magic, int fd, void *buf) { ENSURE_MSAN_INITED(); @@ -772,14 +751,6 @@ INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int sv[2]) { return res; } -INTERCEPTOR(char *, fgets, char *s, int size, void *stream) { - ENSURE_MSAN_INITED(); - char *res = REAL(fgets)(s, size, stream); - if (res) - __msan_unpoison(s, REAL(strlen)(s) + 1); - return res; -} - #if !SANITIZER_FREEBSD && !SANITIZER_NETBSD INTERCEPTOR(char *, fgets_unlocked, char *s, int size, void *stream) { ENSURE_MSAN_INITED(); @@ -964,11 +935,9 @@ void __sanitizer_dtor_callback(const void *data, uptr size) { } } -INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags, - int fd, OFF_T offset) { - if (msan_init_is_running) - return REAL(mmap)(addr, length, prot, flags, fd, offset); - ENSURE_MSAN_INITED(); +template <class Mmap> +static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length, + int prot, int flags, int fd, OFF64_T offset) { if (addr && !MEM_IS_APP(addr)) { if (flags & map_fixed) { errno = errno_EINVAL; @@ -977,34 +946,11 @@ INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags, addr = nullptr; } } - void *res = REAL(mmap)(addr, length, prot, flags, fd, offset); - if (res != (void*)-1) - __msan_unpoison(res, RoundUpTo(length, GetPageSize())); + void *res = real_mmap(addr, length, prot, flags, fd, offset); + if (res != (void *)-1) __msan_unpoison(res, RoundUpTo(length, GetPageSize())); return res; } -#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD -INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags, - int fd, OFF64_T offset) { - ENSURE_MSAN_INITED(); - if (addr && !MEM_IS_APP(addr)) { - if (flags & map_fixed) { - errno = errno_EINVAL; - return (void *)-1; - } else { - addr = nullptr; - } - } - void *res = REAL(mmap64)(addr, length, prot, flags, fd, offset); - if (res != (void*)-1) - __msan_unpoison(res, RoundUpTo(length, GetPageSize())); - return res; -} -#define MSAN_MAYBE_INTERCEPT_MMAP64 INTERCEPT_FUNCTION(mmap64) -#else -#define MSAN_MAYBE_INTERCEPT_MMAP64 -#endif - INTERCEPTOR(int, getrusage, int who, void *usage) { ENSURE_MSAN_INITED(); int res = REAL(getrusage)(who, usage); @@ -1175,6 +1121,9 @@ INTERCEPTOR(int, fork, void) { return pid; } +// NetBSD ships with openpty(3) in -lutil, that needs to be prebuilt explicitly +// with MSan. +#if SANITIZER_LINUX INTERCEPTOR(int, openpty, int *amaster, int *aslave, char *name, const void *termp, const void *winp) { ENSURE_MSAN_INITED(); @@ -1186,7 +1135,14 @@ INTERCEPTOR(int, openpty, int *amaster, int *aslave, char *name, } return res; } +#define MSAN_MAYBE_INTERCEPT_OPENPTY INTERCEPT_FUNCTION(openpty) +#else +#define MSAN_MAYBE_INTERCEPT_OPENPTY +#endif +// NetBSD ships with forkpty(3) in -lutil, that needs to be prebuilt explicitly +// with MSan. +#if SANITIZER_LINUX INTERCEPTOR(int, forkpty, int *amaster, char *name, const void *termp, const void *winp) { ENSURE_MSAN_INITED(); @@ -1196,6 +1152,10 @@ INTERCEPTOR(int, forkpty, int *amaster, char *name, const void *termp, __msan_unpoison(amaster, sizeof(*amaster)); return res; } +#define MSAN_MAYBE_INTERCEPT_FORKPTY INTERCEPT_FUNCTION(forkpty) +#else +#define MSAN_MAYBE_INTERCEPT_FORKPTY +#endif struct MSanInterceptorContext { bool in_interceptor_scope; @@ -1308,6 +1268,12 @@ int OnExit() { __msan_unpoison(to + size, 1); \ } while (false) +#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, length, prot, flags, fd, \ + offset) \ + do { \ + return mmap_interceptor(REAL(mmap), addr, sz, prot, flags, fd, off); \ + } while (false) + #include "sanitizer_common/sanitizer_platform_interceptors.h" #include "sanitizer_common/sanitizer_common_interceptors.inc" @@ -1321,6 +1287,7 @@ static int sigaction_impl(int signo, const __sanitizer_sigaction *act, #define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \ { \ handler = signal_impl(signo, handler); \ + InterceptorScope interceptor_scope; \ return REAL(func)(signo, handler); \ } @@ -1387,6 +1354,7 @@ static uptr signal_impl(int signo, uptr cb) { } while (false) #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) __msan_unpoison(p, s) #include "sanitizer_common/sanitizer_common_syscalls.inc" +#include "sanitizer_common/sanitizer_syscalls_netbsd.inc" struct dlinfo { char *dli_fname; @@ -1555,8 +1523,6 @@ void InitializeInterceptors() { InitializeCommonInterceptors(); InitializeSignalInterceptors(); - INTERCEPT_FUNCTION(mmap); - MSAN_MAYBE_INTERCEPT_MMAP64; INTERCEPT_FUNCTION(posix_memalign); MSAN_MAYBE_INTERCEPT_MEMALIGN; MSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN; @@ -1573,7 +1539,6 @@ void InitializeInterceptors() { MSAN_MAYBE_INTERCEPT_MALLOC_STATS; INTERCEPT_FUNCTION(fread); MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED; - INTERCEPT_FUNCTION(readlink); INTERCEPT_FUNCTION(memccpy); MSAN_MAYBE_INTERCEPT_MEMPCPY; INTERCEPT_FUNCTION(bcopy); @@ -1611,9 +1576,6 @@ void InitializeInterceptors() { INTERCEPT_FUNCTION(vswprintf); INTERCEPT_FUNCTION(swprintf); #endif - INTERCEPT_FUNCTION(strxfrm); - INTERCEPT_FUNCTION(strxfrm_l); - MSAN_MAYBE_INTERCEPT___STRXFRM_L; INTERCEPT_FUNCTION(strftime); INTERCEPT_FUNCTION(strftime_l); MSAN_MAYBE_INTERCEPT___STRFTIME_L; @@ -1633,6 +1595,7 @@ void InitializeInterceptors() { INTERCEPT_FUNCTION(putenv); INTERCEPT_FUNCTION(gettimeofday); MSAN_MAYBE_INTERCEPT_FCVT; + MSAN_MAYBE_INTERCEPT_FSTAT; MSAN_MAYBE_INTERCEPT___FXSTAT; MSAN_INTERCEPT_FSTATAT; MSAN_MAYBE_INTERCEPT___FXSTAT64; @@ -1640,7 +1603,6 @@ void InitializeInterceptors() { INTERCEPT_FUNCTION(pipe); INTERCEPT_FUNCTION(pipe2); INTERCEPT_FUNCTION(socketpair); - INTERCEPT_FUNCTION(fgets); MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED; INTERCEPT_FUNCTION(getrlimit); MSAN_MAYBE_INTERCEPT_GETRLIMIT64; @@ -1670,8 +1632,8 @@ void InitializeInterceptors() { INTERCEPT_FUNCTION(__cxa_atexit); INTERCEPT_FUNCTION(shmat); INTERCEPT_FUNCTION(fork); - INTERCEPT_FUNCTION(openpty); - INTERCEPT_FUNCTION(forkpty); + MSAN_MAYBE_INTERCEPT_OPENPTY; + MSAN_MAYBE_INTERCEPT_FORKPTY; inited = 1; } diff --git a/lib/msan/msan_interface_internal.h b/lib/msan/msan_interface_internal.h index c6990db243c1..9a67cbc9b5f8 100644 --- a/lib/msan/msan_interface_internal.h +++ b/lib/msan/msan_interface_internal.h @@ -174,6 +174,12 @@ void __msan_set_death_callback(void (*callback)(void)); SANITIZER_INTERFACE_ATTRIBUTE void __msan_copy_shadow(void *dst, const void *src, uptr size); + +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_scoped_disable_interceptor_checks(); + +SANITIZER_INTERFACE_ATTRIBUTE +void __msan_scoped_enable_interceptor_checks(); } // extern "C" #endif // MSAN_INTERFACE_INTERNAL_H diff --git a/lib/msan/msan_linux.cc b/lib/msan/msan_linux.cc index 4e6321fcb918..385a650c4afc 100644 --- a/lib/msan/msan_linux.cc +++ b/lib/msan/msan_linux.cc @@ -16,6 +16,7 @@ #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD #include "msan.h" +#include "msan_report.h" #include "msan_thread.h" #include <elf.h> @@ -142,7 +143,7 @@ bool InitShadow(bool init_origins) { if (map) { if (!CheckMemoryRangeAvailability(start, size)) return false; - if ((uptr)MmapFixedNoReserve(start, size, kMemoryLayout[i].name) != start) + if (!MmapFixedNoReserve(start, size, kMemoryLayout[i].name)) return false; if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(start, size); diff --git a/lib/msan/msan_new_delete.cc b/lib/msan/msan_new_delete.cc index 5cc76e4bc08c..a0959aec5eb0 100644 --- a/lib/msan/msan_new_delete.cc +++ b/lib/msan/msan_new_delete.cc @@ -15,6 +15,7 @@ #include "msan.h" #include "interception/interception.h" #include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_report.h" #if MSAN_REPLACE_OPERATORS_NEW_AND_DELETE @@ -33,12 +34,12 @@ namespace std { #define OPERATOR_NEW_BODY(nothrow) \ GET_MALLOC_STACK_TRACE; \ void *res = msan_malloc(size, &stack);\ - if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ + if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ return res #define OPERATOR_NEW_BODY_ALIGN(nothrow) \ GET_MALLOC_STACK_TRACE;\ void *res = msan_memalign((uptr)align, size, &stack);\ - if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ + if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ return res; INTERCEPTOR_ATTRIBUTE diff --git a/lib/msan/msan_poisoning.cc b/lib/msan/msan_poisoning.cc index 92134f6a15b8..7420d946928b 100644 --- a/lib/msan/msan_poisoning.cc +++ b/lib/msan/msan_poisoning.cc @@ -139,7 +139,8 @@ void SetShadow(const void *ptr, uptr size, u8 value) { if (page_end != shadow_end) { REAL(memset)((void *)page_end, 0, shadow_end - page_end); } - MmapFixedNoReserve(page_beg, page_end - page_beg); + if (!MmapFixedNoReserve(page_beg, page_end - page_beg)) + Die(); } } } diff --git a/lib/msan/msan_report.cc b/lib/msan/msan_report.cc index 28c9bbabb3e9..2f0cc8d370ee 100644 --- a/lib/msan/msan_report.cc +++ b/lib/msan/msan_report.cc @@ -15,6 +15,7 @@ #include "msan.h" #include "msan_chained_origin_depot.h" #include "msan_origin.h" +#include "msan_report.h" #include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" @@ -30,8 +31,8 @@ namespace __msan { class Decorator: public __sanitizer::SanitizerCommonDecorator { public: Decorator() : SanitizerCommonDecorator() { } - const char *Origin() { return Magenta(); } - const char *Name() { return Green(); } + const char *Origin() const { return Magenta(); } + const char *Name() const { return Green(); } }; static void DescribeStackOrigin(const char *so, uptr pc) { diff --git a/lib/msan/msan_report.h b/lib/msan/msan_report.h new file mode 100644 index 000000000000..73840e417505 --- /dev/null +++ b/lib/msan/msan_report.h @@ -0,0 +1,34 @@ +//===-- msan_report.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file is a part of MemorySanitizer. MSan-private header for error +/// reporting functions. +/// +//===----------------------------------------------------------------------===// + +#ifndef MSAN_REPORT_H +#define MSAN_REPORT_H + +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_stacktrace.h" + +namespace __msan { + +void ReportUMR(StackTrace *stack, u32 origin); +void ReportExpectedUMRNotFound(StackTrace *stack); +void ReportStats(); +void ReportAtExitStatistics(); +void DescribeMemoryRange(const void *x, uptr size); +void ReportUMRInsideAddressRange(const char *what, const void *start, uptr size, + uptr offset); + +} // namespace __msan + +#endif // MSAN_REPORT_H diff --git a/lib/msan/tests/CMakeLists.txt b/lib/msan/tests/CMakeLists.txt index b460231783b8..e9f4e34bfe61 100644 --- a/lib/msan/tests/CMakeLists.txt +++ b/lib/msan/tests/CMakeLists.txt @@ -37,6 +37,9 @@ set(MSAN_UNITTEST_COMMON_CFLAGS -Werror=sign-compare -Wno-gnu-zero-variadic-macro-arguments ) +# Remove -stdlib= which is unused when passing -nostdinc++. +string(REGEX REPLACE "-stdlib=[a-zA-Z+]*" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) + set(MSAN_UNITTEST_INSTRUMENTED_CFLAGS ${MSAN_UNITTEST_COMMON_CFLAGS} -fsanitize=memory @@ -106,7 +109,7 @@ macro(add_msan_tests_for_arch arch kind cflags) DEPS ${MSAN_INST_LOADABLE_OBJECTS}) set(MSAN_TEST_OBJECTS ${MSAN_INST_TEST_OBJECTS} ${MSAN_INST_GTEST}) - set(MSAN_TEST_DEPS ${MSAN_TEST_OBJECTS} libcxx_msan_${arch} + set(MSAN_TEST_DEPS ${MSAN_TEST_OBJECTS} libcxx_msan_${arch}-build ${MSAN_LOADABLE_SO}) if(NOT COMPILER_RT_STANDALONE_BUILD) list(APPEND MSAN_TEST_DEPS msan) @@ -128,7 +131,8 @@ if(COMPILER_RT_CAN_EXECUTE_TESTS AND COMPILER_RT_LIBCXX_PATH) set(LIBCXX_PREFIX ${CMAKE_CURRENT_BINARY_DIR}/../libcxx_msan_${arch}) add_custom_libcxx(libcxx_msan_${arch} ${LIBCXX_PREFIX} DEPS ${MSAN_RUNTIME_LIBRARIES} - CFLAGS ${MSAN_LIBCXX_CFLAGS} ${TARGET_CFLAGS}) + CFLAGS ${MSAN_LIBCXX_CFLAGS} ${TARGET_CFLAGS} + USE_TOOLCHAIN) set(MSAN_LIBCXX_SO ${LIBCXX_PREFIX}/lib/libc++.so) add_msan_tests_for_arch(${arch} "" "") diff --git a/lib/msan/tests/msan_test.cc b/lib/msan/tests/msan_test.cc index 074a2f609eda..29260f16e704 100644 --- a/lib/msan/tests/msan_test.cc +++ b/lib/msan/tests/msan_test.cc @@ -65,16 +65,15 @@ int shmdt(const void *); #include <sys/ipc.h> #include <sys/shm.h> -#if !defined(__FreeBSD__) -# include <malloc.h> -# include <sys/sysinfo.h> -# include <sys/vfs.h> -# include <mntent.h> -# include <netinet/ether.h> -# if defined(__linux__) -# include <sys/uio.h> -# endif -#else +#if defined(__NetBSD__) +# include <signal.h> +# include <netinet/in.h> +# include <sys/uio.h> +# include <sys/mount.h> +# include <sys/sysctl.h> +# include <net/if.h> +# include <net/if_ether.h> +#elif defined(__FreeBSD__) # include <signal.h> # include <netinet/in.h> # include <pthread_np.h> @@ -90,6 +89,15 @@ extern "C" { // ordinary function, we can declare it here to complete the tests. void *mempcpy(void *dest, const void *src, size_t n); } +#else +# include <malloc.h> +# include <sys/sysinfo.h> +# include <sys/vfs.h> +# include <mntent.h> +# include <netinet/ether.h> +# if defined(__linux__) +# include <sys/uio.h> +# endif #endif #if defined(__i386__) || defined(__x86_64__) @@ -103,8 +111,7 @@ void *mempcpy(void *dest, const void *src, size_t n); # include <immintrin.h> #endif -// On FreeBSD procfs is not enabled by default. -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) || defined(__NetBSD__) # define FILE_TO_READ "/bin/cat" # define DIR_TO_READ "/bin" # define SUBFILE_TO_READ "cat" @@ -717,6 +724,13 @@ TEST(MemorySanitizer, readlink) { delete [] x; } +TEST(MemorySanitizer, readlinkat) { + char *x = new char[1000]; + readlinkat(AT_FDCWD, SYMLINK_TO_READ, x, 1000); + EXPECT_NOT_POISONED(x[0]); + delete[] x; +} + TEST(MemorySanitizer, stat) { struct stat* st = new struct stat; int res = stat(FILE_TO_READ, st); @@ -738,6 +752,7 @@ TEST(MemorySanitizer, fstatat) { close(dirfd); } +#if !defined(__NetBSD__) TEST(MemorySanitizer, statfs) { struct statfs st; int res = statfs("/", &st); @@ -746,6 +761,7 @@ TEST(MemorySanitizer, statfs) { EXPECT_NOT_POISONED(st.f_bfree); EXPECT_NOT_POISONED(st.f_namelen); } +#endif TEST(MemorySanitizer, statvfs) { struct statvfs st; @@ -822,8 +838,7 @@ TEST(MemorySanitizer, poll) { close(pipefd[1]); } -// There is no ppoll() on FreeBSD. -#if !defined (__FreeBSD__) +#if !defined (__FreeBSD__) && !defined (__NetBSD__) TEST(MemorySanitizer, ppoll) { int* pipefd = new int[2]; int res = pipe(pipefd); @@ -1161,6 +1176,7 @@ TEST(MemorySanitizer, gethostbyaddr) { EXPECT_HOSTENT_NOT_POISONED(he); } +#if !defined(__NetBSD__) TEST(MemorySanitizer, gethostent_r) { char buf[2000]; struct hostent he; @@ -1173,7 +1189,9 @@ TEST(MemorySanitizer, gethostent_r) { EXPECT_HOSTENT_NOT_POISONED(result); EXPECT_NOT_POISONED(err); } +#endif +#if !defined(__NetBSD__) TEST(MemorySanitizer, gethostbyname_r) { char buf[2000]; struct hostent he; @@ -1186,7 +1204,9 @@ TEST(MemorySanitizer, gethostbyname_r) { EXPECT_HOSTENT_NOT_POISONED(result); EXPECT_NOT_POISONED(err); } +#endif +#if !defined(__NetBSD__) TEST(MemorySanitizer, gethostbyname_r_bad_host_name) { char buf[2000]; struct hostent he; @@ -1196,7 +1216,9 @@ TEST(MemorySanitizer, gethostbyname_r_bad_host_name) { ASSERT_EQ((struct hostent *)0, result); EXPECT_NOT_POISONED(err); } +#endif +#if !defined(__NetBSD__) TEST(MemorySanitizer, gethostbyname_r_erange) { char buf[5]; struct hostent he; @@ -1206,7 +1228,9 @@ TEST(MemorySanitizer, gethostbyname_r_erange) { ASSERT_EQ(ERANGE, errno); EXPECT_NOT_POISONED(err); } +#endif +#if !defined(__NetBSD__) TEST(MemorySanitizer, gethostbyname2_r) { char buf[2000]; struct hostent he; @@ -1220,7 +1244,9 @@ TEST(MemorySanitizer, gethostbyname2_r) { EXPECT_HOSTENT_NOT_POISONED(result); EXPECT_NOT_POISONED(err); } +#endif +#if !defined(__NetBSD__) TEST(MemorySanitizer, gethostbyaddr_r) { char buf[2000]; struct hostent he; @@ -1236,6 +1262,7 @@ TEST(MemorySanitizer, gethostbyaddr_r) { EXPECT_HOSTENT_NOT_POISONED(result); EXPECT_NOT_POISONED(err); } +#endif TEST(MemorySanitizer, getsockopt) { int sock = socket(AF_UNIX, SOCK_STREAM, 0); @@ -1262,8 +1289,7 @@ TEST(MemorySanitizer, getcwd_gnu) { free(res); } -// There's no get_current_dir_name() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, get_current_dir_name) { char* res = get_current_dir_name(); ASSERT_TRUE(res != NULL); @@ -1281,8 +1307,7 @@ TEST(MemorySanitizer, shmctl) { ASSERT_GT(res, -1); EXPECT_NOT_POISONED(ds); - // FreeBSD does not support shmctl(IPC_INFO) and shmctl(SHM_INFO). -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) struct shminfo si; res = shmctl(id, IPC_INFO, (struct shmid_ds *)&si); ASSERT_GT(res, -1); @@ -1330,8 +1355,7 @@ TEST(MemorySanitizer, shmat) { ASSERT_GT(res, -1); } -// There's no random_r() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, random_r) { int32_t x; char z[64]; @@ -1411,8 +1435,7 @@ TEST(MemorySanitizer, realpath_null) { free(res); } -// There's no canonicalize_file_name() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, canonicalize_file_name) { const char* relpath = "."; char* res = canonicalize_file_name(relpath); @@ -1870,8 +1893,7 @@ TEST(MemorySanitizer, modfl) { EXPECT_NOT_POISONED(y); } -// There's no sincos() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, sincos) { double s, c; sincos(0.2, &s, &c); @@ -1880,8 +1902,7 @@ TEST(MemorySanitizer, sincos) { } #endif -// There's no sincosf() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, sincosf) { float s, c; sincosf(0.2, &s, &c); @@ -1890,8 +1911,7 @@ TEST(MemorySanitizer, sincosf) { } #endif -// There's no sincosl() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, sincosl) { long double s, c; sincosl(0.2, &s, &c); @@ -1953,8 +1973,7 @@ TEST(MemorySanitizer, lgammaf_r) { EXPECT_NOT_POISONED(sgn); } -// There's no lgammal_r() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, lgammal_r) { int sgn; long double res = lgammal_r(1.1, &sgn); @@ -1963,8 +1982,7 @@ TEST(MemorySanitizer, lgammal_r) { } #endif -// There's no drand48_r() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, drand48_r) { struct drand48_data buf; srand48_r(0, &buf); @@ -1974,8 +1992,7 @@ TEST(MemorySanitizer, drand48_r) { } #endif -// There's no lrand48_r() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, lrand48_r) { struct drand48_data buf; srand48_r(0, &buf); @@ -2260,7 +2277,7 @@ TEST(MemorySanitizer, localtime_r) { EXPECT_NE(0U, strlen(time.tm_zone)); } -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) /* Creates a temporary file with contents similar to /etc/fstab to be used with getmntent{_r}. */ class TempFstabFile { @@ -2298,8 +2315,7 @@ class TempFstabFile { }; #endif -// There's no getmntent() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, getmntent) { TempFstabFile fstabtmp; ASSERT_TRUE(fstabtmp.Create()); @@ -2317,8 +2333,7 @@ TEST(MemorySanitizer, getmntent) { } #endif -// There's no getmntent_r() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, getmntent_r) { TempFstabFile fstabtmp; ASSERT_TRUE(fstabtmp.Create()); @@ -2338,6 +2353,7 @@ TEST(MemorySanitizer, getmntent_r) { } #endif +#if !defined(__NetBSD__) TEST(MemorySanitizer, ether) { const char *asc = "11:22:33:44:55:66"; struct ether_addr *paddr = ether_aton(asc); @@ -2356,6 +2372,7 @@ TEST(MemorySanitizer, ether) { ASSERT_EQ(s, buf); ASSERT_NE(0U, strlen(buf)); } +#endif TEST(MemorySanitizer, mmap) { const int size = 4096; @@ -2376,8 +2393,7 @@ TEST(MemorySanitizer, mmap) { } } -// There's no fcvt() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) // FIXME: enable and add ecvt. // FIXME: check why msandr does nt handle fcvt. TEST(MemorySanitizer, fcvt) { @@ -2395,8 +2411,7 @@ TEST(MemorySanitizer, fcvt) { } #endif -// There's no fcvt_long() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, fcvt_long) { int a, b; break_optimization(&a); @@ -2471,11 +2486,15 @@ void SigactionHandler(int signo, siginfo_t* si, void* uc) { ASSERT_TRUE(si != NULL); EXPECT_NOT_POISONED(si->si_errno); EXPECT_NOT_POISONED(si->si_pid); -#if __linux__ -# if defined(__x86_64__) +#ifdef _UC_MACHINE_PC + EXPECT_NOT_POISONED(_UC_MACHINE_PC((ucontext_t*)uc)); +#else +# if __linux__ +# if defined(__x86_64__) EXPECT_NOT_POISONED(((ucontext_t*)uc)->uc_mcontext.gregs[REG_RIP]); -# elif defined(__i386__) +# elif defined(__i386__) EXPECT_NOT_POISONED(((ucontext_t*)uc)->uc_mcontext.gregs[REG_EIP]); +# endif # endif #endif ++cnt; @@ -3005,7 +3024,9 @@ TEST(MemorySanitizer, LongStruct) { EXPECT_POISONED(s2.a8); } -#ifdef __GLIBC__ +#if defined(__FreeBSD__) || defined(__NetBSD__) +#define MSAN_TEST_PRLIMIT 0 +#elif defined(__GLIBC__) #define MSAN_TEST_PRLIMIT __GLIBC_PREREQ(2, 13) #else #define MSAN_TEST_PRLIMIT 1 @@ -3056,9 +3077,13 @@ TEST(MemorySanitizer, getrusage) { EXPECT_NOT_POISONED(usage.ru_nivcsw); } -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) || defined(__NetBSD__) static void GetProgramPath(char *buf, size_t sz) { +#if defined(__FreeBSD__) int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 }; +#elif defined(__NetBSD__) + int mib[4] = { CTL_KERN, KERN_PROC_ARGS, -1, KERN_PROC_PATHNAME}; +#endif int res = sysctl(mib, 4, buf, &sz, NULL, 0); ASSERT_EQ(0, res); } @@ -3180,8 +3205,7 @@ TEST(MemorySanitizer, dlopenFailed) { #endif // MSAN_TEST_DISABLE_DLOPEN -// There's no sched_getaffinity() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, sched_getaffinity) { cpu_set_t mask; int res = sched_getaffinity(getpid(), sizeof(mask), &mask); @@ -3329,12 +3353,14 @@ TEST(MemorySanitizer, pthread_attr_get) { EXPECT_NOT_POISONED(v); EXPECT_NOT_POISONED(w); } +#if !defined(__NetBSD__) { cpu_set_t v; res = pthread_attr_getaffinity_np(&attr, sizeof(v), &v); ASSERT_EQ(0, res); EXPECT_NOT_POISONED(v); } +#endif res = pthread_attr_destroy(&attr); ASSERT_EQ(0, res); } @@ -3426,8 +3452,7 @@ TEST(MemorySanitizer, posix_memalign) { free(p); } -// There's no memalign() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, memalign) { void *p = memalign(4096, 13); EXPECT_EQ(0U, (uintptr_t)p % 4096); @@ -3442,8 +3467,7 @@ TEST(MemorySanitizer, valloc) { free(a); } -// There's no pvalloc() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, pvalloc) { uintptr_t PageSize = GetPageSize(); void *p = pvalloc(PageSize + 100); @@ -3500,8 +3524,7 @@ TEST(MemorySanitizer, gethostname) { EXPECT_NOT_POISONED(strlen(buf)); } -// There's no sysinfo() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, sysinfo) { struct sysinfo info; int res = sysinfo(&info); @@ -3598,8 +3621,7 @@ TEST(MemorySanitizer, getpwent_r) { EXPECT_NOT_POISONED(pwdres); } -// There's no fgetpwent() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, fgetpwent) { FILE *fp = fopen("/etc/passwd", "r"); struct passwd *p = fgetpwent(fp); @@ -3622,8 +3644,7 @@ TEST(MemorySanitizer, getgrent) { EXPECT_NOT_POISONED(p->gr_gid); } -// There's no fgetgrent() on FreeBSD. -#if !defined(__FreeBSD__) +#if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, fgetgrent) { FILE *fp = fopen("/etc/group", "r"); struct group *grp = fgetgrent(fp); @@ -3654,7 +3675,6 @@ TEST(MemorySanitizer, getgrent_r) { EXPECT_NOT_POISONED(grpres); } -// There's no fgetgrent_r() on FreeBSD and NetBSD. #if !defined(__FreeBSD__) && !defined(__NetBSD__) TEST(MemorySanitizer, fgetgrent_r) { FILE *fp = fopen("/etc/group", "r"); @@ -4037,7 +4057,6 @@ typedef U4 V2x32 __attribute__((__vector_size__(8))); typedef U2 V4x16 __attribute__((__vector_size__(8))); typedef U1 V8x8 __attribute__((__vector_size__(8))); - V8x16 shift_sse2_left_scalar(V8x16 x, U4 y) { return _mm_slli_epi16(x, y); } @@ -4385,12 +4404,14 @@ void MemCpyTest() { EXPECT_POISONED_O(y[N/2], ox); EXPECT_POISONED_O(y[N-1], ox); EXPECT_NOT_POISONED(x); +#if !defined(__NetBSD__) void *res = mempcpy(q, x, N * sizeof(T)); ASSERT_EQ(q + N, res); EXPECT_POISONED_O(q[0], ox); EXPECT_POISONED_O(q[N/2], ox); EXPECT_POISONED_O(q[N-1], ox); EXPECT_NOT_POISONED(x); +#endif memmove(z, x, N * sizeof(T)); EXPECT_POISONED_O(z[0], ox); EXPECT_POISONED_O(z[N/2], ox); diff --git a/lib/profile/CMakeLists.txt b/lib/profile/CMakeLists.txt index 91d67ec365c5..488673dd2c22 100644 --- a/lib/profile/CMakeLists.txt +++ b/lib/profile/CMakeLists.txt @@ -59,16 +59,25 @@ set(PROFILE_SOURCES InstrProfilingNameVar.c InstrProfilingWriter.c InstrProfilingPlatformDarwin.c + InstrProfilingPlatformFuchsia.c InstrProfilingPlatformLinux.c InstrProfilingPlatformOther.c InstrProfilingRuntime.cc InstrProfilingUtil.c) +set(PROFILE_HEADERS + InstrProfData.inc + InstrProfiling.h + InstrProfilingInternal.h + InstrProfilingPort.h + InstrProfilingUtil.h + WindowsMMap.h) + if(WIN32) list(APPEND PROFILE_SOURCES WindowsMMap.c) endif() -if(UNIX) +if(FUCHSIA OR UNIX) set(EXTRA_FLAGS -fPIC -Wno-pedantic) @@ -104,6 +113,7 @@ if(APPLE) ARCHS ${PROFILE_SUPPORTED_ARCH} CFLAGS ${EXTRA_FLAGS} SOURCES ${PROFILE_SOURCES} + ADDITIONAL_HEADERS ${PROFILE_HEADERS} PARENT_TARGET profile) else() add_compiler_rt_runtime(clang_rt.profile @@ -111,5 +121,6 @@ else() ARCHS ${PROFILE_SUPPORTED_ARCH} CFLAGS ${EXTRA_FLAGS} SOURCES ${PROFILE_SOURCES} + ADDITIONAL_HEADERS ${PROFILE_HEADERS} PARENT_TARGET profile) endif() diff --git a/lib/profile/GCDAProfiling.c b/lib/profile/GCDAProfiling.c index ef299f002e20..cbca365510b5 100644 --- a/lib/profile/GCDAProfiling.c +++ b/lib/profile/GCDAProfiling.c @@ -20,6 +20,8 @@ |* \*===----------------------------------------------------------------------===*/ +#if !defined(__Fuchsia__) + #include <errno.h> #include <fcntl.h> #include <stdio.h> @@ -79,38 +81,83 @@ static FILE *output_file = NULL; * Buffer that we write things into. */ #define WRITE_BUFFER_SIZE (128 * 1024) -static char *write_buffer = NULL; +static unsigned char *write_buffer = NULL; static uint64_t cur_buffer_size = 0; static uint64_t cur_pos = 0; static uint64_t file_size = 0; static int new_file = 0; static int fd = -1; -/* - * A list of functions to write out the data. - */ -typedef void (*writeout_fn)(); +typedef void (*fn_ptr)(); + +typedef void* dynamic_object_id; +// The address of this variable identifies a given dynamic object. +static dynamic_object_id current_id; +#define CURRENT_ID (¤t_id) -struct writeout_fn_node { - writeout_fn fn; - struct writeout_fn_node *next; +struct fn_node { + dynamic_object_id id; + fn_ptr fn; + struct fn_node* next; }; -static struct writeout_fn_node *writeout_fn_head = NULL; -static struct writeout_fn_node *writeout_fn_tail = NULL; +struct fn_list { + struct fn_node *head, *tail; +}; /* - * A list of flush functions that our __gcov_flush() function should call. + * A list of functions to write out the data, shared between all dynamic objects. */ -typedef void (*flush_fn)(); +struct fn_list writeout_fn_list; -struct flush_fn_node { - flush_fn fn; - struct flush_fn_node *next; -}; +/* + * A list of flush functions that our __gcov_flush() function should call, shared between all dynamic objects. + */ +struct fn_list flush_fn_list; + +static void fn_list_insert(struct fn_list* list, fn_ptr fn) { + struct fn_node* new_node = malloc(sizeof(struct fn_node)); + new_node->fn = fn; + new_node->next = NULL; + new_node->id = CURRENT_ID; -static struct flush_fn_node *flush_fn_head = NULL; -static struct flush_fn_node *flush_fn_tail = NULL; + if (!list->head) { + list->head = list->tail = new_node; + } else { + list->tail->next = new_node; + list->tail = new_node; + } +} + +static void fn_list_remove(struct fn_list* list) { + struct fn_node* curr = list->head; + struct fn_node* prev = NULL; + struct fn_node* next = NULL; + + while (curr) { + next = curr->next; + + if (curr->id == CURRENT_ID) { + if (curr == list->head) { + list->head = next; + } + + if (curr == list->tail) { + list->tail = prev; + } + + if (prev) { + prev->next = next; + } + + free(curr); + } else { + prev = curr; + } + + curr = next; + } +} static void resize_write_buffer(uint64_t size) { if (!new_file) return; @@ -133,7 +180,12 @@ static void write_32bit_value(uint32_t i) { } static void write_64bit_value(uint64_t i) { - write_bytes((char*)&i, 8); + // GCOV uses a lo-/hi-word format even on big-endian systems. + // See also GCOVBuffer::readInt64 in LLVM. + uint32_t lo = (uint32_t) i; + uint32_t hi = (uint32_t) (i >> 32); + write_32bit_value(lo); + write_32bit_value(hi); } static uint32_t length_of_string(const char *s) { @@ -158,17 +210,26 @@ static uint32_t read_32bit_value() { return val; } -static uint64_t read_64bit_value() { - uint64_t val; +static uint32_t read_le_32bit_value() { + uint32_t val = 0; + int i; if (new_file) - return (uint64_t)-1; + return (uint32_t)-1; - val = *(uint64_t*)&write_buffer[cur_pos]; - cur_pos += 8; + for (i = 0; i < 4; i++) + val |= write_buffer[cur_pos++] << (8*i); return val; } +static uint64_t read_64bit_value() { + // GCOV uses a lo-/hi-word format even on big-endian systems. + // See also GCOVBuffer::readInt64 in LLVM. + uint32_t lo = read_32bit_value(); + uint32_t hi = read_32bit_value(); + return ((uint64_t)hi << 32) | ((uint64_t)lo); +} + static char *mangle_filename(const char *orig_filename) { char *new_filename; size_t prefix_len; @@ -228,6 +289,7 @@ static void unmap_file() { * profiling enabled will emit to a different file. Only one file may be * started at a time. */ +COMPILER_RT_VISIBILITY void llvm_gcda_start_file(const char *orig_filename, const char version[4], uint32_t checksum) { const char *mode = "r+b"; @@ -295,6 +357,7 @@ void llvm_gcda_start_file(const char *orig_filename, const char version[4], /* Given an array of pointers to counters (counters), increment the n-th one, * where we're also given a pointer to n (predecessor). */ +COMPILER_RT_VISIBILITY void llvm_gcda_increment_indirect_counter(uint32_t *predecessor, uint64_t **counters) { uint64_t *counter; @@ -317,6 +380,7 @@ void llvm_gcda_increment_indirect_counter(uint32_t *predecessor, #endif } +COMPILER_RT_VISIBILITY void llvm_gcda_emit_function(uint32_t ident, const char *function_name, uint32_t func_checksum, uint8_t use_extra_checksum, uint32_t cfg_checksum) { @@ -343,6 +407,7 @@ void llvm_gcda_emit_function(uint32_t ident, const char *function_name, write_string(function_name); } +COMPILER_RT_VISIBILITY void llvm_gcda_emit_arcs(uint32_t num_counters, uint64_t *counters) { uint32_t i; uint64_t *old_ctrs = NULL; @@ -351,7 +416,7 @@ void llvm_gcda_emit_arcs(uint32_t num_counters, uint64_t *counters) { if (!output_file) return; - val = read_32bit_value(); + val = read_le_32bit_value(); if (val != (uint32_t)-1) { /* There are counters present in the file. Merge them. */ @@ -394,16 +459,18 @@ void llvm_gcda_emit_arcs(uint32_t num_counters, uint64_t *counters) { #endif } +COMPILER_RT_VISIBILITY void llvm_gcda_summary_info() { const uint32_t obj_summary_len = 9; /* Length for gcov compatibility. */ uint32_t i; uint32_t runs = 1; + static uint32_t run_counted = 0; // We only want to increase the run count once. uint32_t val = 0; uint64_t save_cur_pos = cur_pos; if (!output_file) return; - val = read_32bit_value(); + val = read_le_32bit_value(); if (val != (uint32_t)-1) { /* There are counters present in the file. Merge them. */ @@ -424,7 +491,9 @@ void llvm_gcda_summary_info() { read_32bit_value(); /* checksum, unused */ read_32bit_value(); /* num, unused */ - runs += read_32bit_value(); /* Add previous run count to new counter. */ + uint32_t prev_runs = read_32bit_value(); + /* Add previous run count to new counter, if not already counted before. */ + runs = run_counted ? prev_runs : prev_runs + 1; } cur_pos = save_cur_pos; @@ -442,11 +511,14 @@ void llvm_gcda_summary_info() { write_bytes("\0\0\0\xa3", 4); /* tag indicates 1 program */ write_32bit_value(0); /* 0 length */ + run_counted = 1; + #ifdef DEBUG_GCDAPROFILING fprintf(stderr, "llvmgcda: %u runs\n", runs); #endif } +COMPILER_RT_VISIBILITY void llvm_gcda_end_file() { /* Write out EOF record. */ if (output_file) { @@ -459,6 +531,7 @@ void llvm_gcda_end_file() { unmap_file(); } + fflush(output_file); lprofUnlockFd(fd); fclose(output_file); output_file = NULL; @@ -471,53 +544,35 @@ void llvm_gcda_end_file() { #endif } -void llvm_register_writeout_function(writeout_fn fn) { - struct writeout_fn_node *new_node = malloc(sizeof(struct writeout_fn_node)); - new_node->fn = fn; - new_node->next = NULL; - - if (!writeout_fn_head) { - writeout_fn_head = writeout_fn_tail = new_node; - } else { - writeout_fn_tail->next = new_node; - writeout_fn_tail = new_node; - } +COMPILER_RT_VISIBILITY +void llvm_register_writeout_function(fn_ptr fn) { + fn_list_insert(&writeout_fn_list, fn); } +COMPILER_RT_VISIBILITY void llvm_writeout_files(void) { - struct writeout_fn_node *curr = writeout_fn_head; + struct fn_node *curr = writeout_fn_list.head; while (curr) { - curr->fn(); + if (curr->id == CURRENT_ID) { + curr->fn(); + } curr = curr->next; } } +COMPILER_RT_VISIBILITY void llvm_delete_writeout_function_list(void) { - while (writeout_fn_head) { - struct writeout_fn_node *node = writeout_fn_head; - writeout_fn_head = writeout_fn_head->next; - free(node); - } - - writeout_fn_head = writeout_fn_tail = NULL; + fn_list_remove(&writeout_fn_list); } -void llvm_register_flush_function(flush_fn fn) { - struct flush_fn_node *new_node = malloc(sizeof(struct flush_fn_node)); - new_node->fn = fn; - new_node->next = NULL; - - if (!flush_fn_head) { - flush_fn_head = flush_fn_tail = new_node; - } else { - flush_fn_tail->next = new_node; - flush_fn_tail = new_node; - } +COMPILER_RT_VISIBILITY +void llvm_register_flush_function(fn_ptr fn) { + fn_list_insert(&flush_fn_list, fn); } void __gcov_flush() { - struct flush_fn_node *curr = flush_fn_head; + struct fn_node* curr = flush_fn_list.head; while (curr) { curr->fn(); @@ -525,17 +580,13 @@ void __gcov_flush() { } } +COMPILER_RT_VISIBILITY void llvm_delete_flush_function_list(void) { - while (flush_fn_head) { - struct flush_fn_node *node = flush_fn_head; - flush_fn_head = flush_fn_head->next; - free(node); - } - - flush_fn_head = flush_fn_tail = NULL; + fn_list_remove(&flush_fn_list); } -void llvm_gcov_init(writeout_fn wfn, flush_fn ffn) { +COMPILER_RT_VISIBILITY +void llvm_gcov_init(fn_ptr wfn, fn_ptr ffn) { static int atexit_ran = 0; if (wfn) @@ -553,3 +604,5 @@ void llvm_gcov_init(writeout_fn wfn, flush_fn ffn) { atexit(llvm_writeout_files); } } + +#endif diff --git a/lib/profile/InstrProfData.inc b/lib/profile/InstrProfData.inc index 6a98dc7b9b85..eb4a792ce82d 100644 --- a/lib/profile/InstrProfData.inc +++ b/lib/profile/InstrProfData.inc @@ -178,7 +178,7 @@ VALUE_PROF_FUNC_PARAM(uint64_t, LargeValue, Type::getInt64Ty(Ctx)) * functions are profiled by the instrumented code. The target addresses are * written in the raw profile data and converted to target function name's MD5 * hash by the profile reader during deserialization. Typically, this happens - * when the the raw profile data is read during profile merging. + * when the raw profile data is read during profile merging. * * For this remapping the ProfData is used. ProfData contains both the function * name hash and the function address. diff --git a/lib/profile/InstrProfiling.h b/lib/profile/InstrProfiling.h index 945f1c4ac38d..3ef7eacf06be 100644 --- a/lib/profile/InstrProfiling.h +++ b/lib/profile/InstrProfiling.h @@ -106,6 +106,10 @@ void INSTR_PROF_VALUE_PROF_FUNC( #include "InstrProfData.inc" ); +void __llvm_profile_instrument_target_value(uint64_t TargetValue, void *Data, + uint32_t CounterIndex, + uint64_t CounterValue); + /*! * \brief Write instrumentation data to the current file. * @@ -166,6 +170,14 @@ void __llvm_profile_initialize_file(void); */ const char *__llvm_profile_get_path_prefix(); +/*! + * \brief Return filename (including path) of the profile data. Note that if the + * user calls __llvm_profile_set_filename later after invoking this interface, + * the actual file name may differ from what is returned here. + * Side-effect: this API call will invoke malloc with dynamic memory allocation. + */ +const char *__llvm_profile_get_filename(); + /*! \brief Get the magic token for the file format. */ uint64_t __llvm_profile_get_magic(void); diff --git a/lib/profile/InstrProfilingFile.c b/lib/profile/InstrProfilingFile.c index d7c0abbc16e3..c4cf3ccd70c2 100644 --- a/lib/profile/InstrProfilingFile.c +++ b/lib/profile/InstrProfilingFile.c @@ -7,6 +7,8 @@ |* \*===----------------------------------------------------------------------===*/ +#if !defined(__Fuchsia__) + #include <errno.h> #include <stdio.h> #include <stdlib.h> @@ -19,6 +21,7 @@ #include "WindowsMMap.h" /* For _chsize_s */ #include <io.h> +#include <process.h> #else #include <sys/file.h> #include <sys/mman.h> @@ -68,6 +71,7 @@ typedef struct lprofFilename { * by runtime. */ unsigned OwnsFilenamePat; const char *ProfilePathPrefix; + const char *Filename; char PidChars[MAX_PID_SIZE]; char Hostname[COMPILER_RT_MAX_HOSTLEN]; unsigned NumPids; @@ -83,11 +87,11 @@ typedef struct lprofFilename { ProfileNameSpecifier PNS; } lprofFilename; -COMPILER_RT_WEAK lprofFilename lprofCurFilename = {0, 0, 0, {0}, {0}, - 0, 0, 0, PNS_unknown}; +COMPILER_RT_WEAK lprofFilename lprofCurFilename = {0, 0, 0, 0, {0}, + {0}, 0, 0, 0, PNS_unknown}; static int getCurFilenameLength(); -static const char *getCurFilename(char *FilenameBuf); +static const char *getCurFilename(char *FilenameBuf, int ForceUseBuf); static unsigned doMerging() { return lprofCurFilename.MergePoolSize; } /* Return 1 if there is an error, otherwise return 0. */ @@ -183,8 +187,12 @@ static int doProfileMerging(FILE *ProfileFile, int *MergeDone) { /* Now start merging */ __llvm_profile_merge_from_buffer(ProfileBuffer, ProfileFileSize); - (void)munmap(ProfileBuffer, ProfileFileSize); + // Truncate the file in case merging of value profile did not happend to + // prevent from leaving garbage data at the end of the profile file. + COMPILER_RT_FTRUNCATE(ProfileFile, __llvm_profile_get_size_for_buffer()); + + (void)munmap(ProfileBuffer, ProfileFileSize); *MergeDone = 1; return 0; @@ -234,6 +242,7 @@ static int writeFile(const char *OutputName) { FILE *OutputFile; int MergeDone = 0; + VPMergeHook = &lprofMergeValueProfData; if (!doMerging()) OutputFile = fopen(OutputName, "ab"); else @@ -260,7 +269,7 @@ static void truncateCurrentFile(void) { Length = getCurFilenameLength(); FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1); - Filename = getCurFilename(FilenameBuf); + Filename = getCurFilename(FilenameBuf, 0); if (!Filename) return; @@ -305,15 +314,18 @@ static int parseFilenamePattern(const char *FilenamePat, char *Hostname = &lprofCurFilename.Hostname[0]; int MergingEnabled = 0; - /* Clean up cached prefix. */ + /* Clean up cached prefix and filename. */ if (lprofCurFilename.ProfilePathPrefix) free((void *)lprofCurFilename.ProfilePathPrefix); - memset(&lprofCurFilename, 0, sizeof(lprofCurFilename)); + if (lprofCurFilename.Filename) + free((void *)lprofCurFilename.Filename); if (lprofCurFilename.FilenamePat && lprofCurFilename.OwnsFilenamePat) { free((void *)lprofCurFilename.FilenamePat); } + memset(&lprofCurFilename, 0, sizeof(lprofCurFilename)); + if (!CopyFilenamePat) lprofCurFilename.FilenamePat = FilenamePat; else { @@ -422,17 +434,25 @@ static int getCurFilenameLength() { /* Return the pointer to the current profile file name (after substituting * PIDs and Hostnames in filename pattern. \p FilenameBuf is the buffer * to store the resulting filename. If no substitution is needed, the - * current filename pattern string is directly returned. */ -static const char *getCurFilename(char *FilenameBuf) { - int I, J, PidLength, HostNameLength; + * current filename pattern string is directly returned, unless ForceUseBuf + * is enabled. */ +static const char *getCurFilename(char *FilenameBuf, int ForceUseBuf) { + int I, J, PidLength, HostNameLength, FilenamePatLength; const char *FilenamePat = lprofCurFilename.FilenamePat; if (!lprofCurFilename.FilenamePat || !lprofCurFilename.FilenamePat[0]) return 0; if (!(lprofCurFilename.NumPids || lprofCurFilename.NumHosts || - lprofCurFilename.MergePoolSize)) - return lprofCurFilename.FilenamePat; + lprofCurFilename.MergePoolSize)) { + if (!ForceUseBuf) + return lprofCurFilename.FilenamePat; + + FilenamePatLength = strlen(lprofCurFilename.FilenamePat); + memcpy(FilenameBuf, lprofCurFilename.FilenamePat, FilenamePatLength); + FilenameBuf[FilenamePatLength] = '\0'; + return FilenameBuf; + } PidLength = strlen(lprofCurFilename.PidChars); HostNameLength = strlen(lprofCurFilename.Hostname); @@ -486,7 +506,7 @@ const char *__llvm_profile_get_path_prefix(void) { Length = getCurFilenameLength(); FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1); - Filename = getCurFilename(FilenameBuf); + Filename = getCurFilename(FilenameBuf, 0); if (!Filename) return "\0"; @@ -506,6 +526,29 @@ const char *__llvm_profile_get_path_prefix(void) { return Prefix; } +COMPILER_RT_VISIBILITY +const char *__llvm_profile_get_filename(void) { + int Length; + char *FilenameBuf; + const char *Filename; + + if (lprofCurFilename.Filename) + return lprofCurFilename.Filename; + + Length = getCurFilenameLength(); + FilenameBuf = (char *)malloc(Length + 1); + if (!FilenameBuf) { + PROF_ERR("Failed to %s\n", "allocate memory."); + return "\0"; + } + Filename = getCurFilename(FilenameBuf, 1); + if (!Filename) + return "\0"; + + lprofCurFilename.Filename = FilenameBuf; + return FilenameBuf; +} + /* This method is invoked by the runtime initialization hook * InstrProfilingRuntime.o if it is linked in. Both user specified * profile path via -fprofile-instr-generate= and LLVM_PROFILE_FILE @@ -562,7 +605,7 @@ int __llvm_profile_write_file(void) { Length = getCurFilenameLength(); FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1); - Filename = getCurFilename(FilenameBuf); + Filename = getCurFilename(FilenameBuf, 0); /* Check the filename. */ if (!Filename) { @@ -620,3 +663,5 @@ int __llvm_profile_register_write_file_atexit(void) { HasBeenRegistered = 1; return atexit(writeFileWithoutReturn); } + +#endif diff --git a/lib/profile/InstrProfilingMerge.c b/lib/profile/InstrProfilingMerge.c index a2021154df73..2397250fb122 100644 --- a/lib/profile/InstrProfilingMerge.c +++ b/lib/profile/InstrProfilingMerge.c @@ -17,8 +17,9 @@ #define INSTR_PROF_VALUE_PROF_DATA #include "InstrProfData.inc" -COMPILER_RT_WEAK void (*VPMergeHook)(ValueProfData *, - __llvm_profile_data *) = NULL; +COMPILER_RT_VISIBILITY +void (*VPMergeHook)(ValueProfData *, __llvm_profile_data *); + COMPILER_RT_VISIBILITY uint64_t lprofGetLoadModuleSignature() { /* A very fast way to compute a module signature. */ diff --git a/lib/profile/InstrProfilingMergeFile.c b/lib/profile/InstrProfilingMergeFile.c index ac5ee9fbedc6..dc1bc9762676 100644 --- a/lib/profile/InstrProfilingMergeFile.c +++ b/lib/profile/InstrProfilingMergeFile.c @@ -10,6 +10,8 @@ |* stored in files. \*===----------------------------------------------------------------------===*/ +#if !defined(__Fuchsia__) + #include "InstrProfiling.h" #include "InstrProfilingInternal.h" #include "InstrProfilingUtil.h" @@ -17,25 +19,27 @@ #define INSTR_PROF_VALUE_PROF_DATA #include "InstrProfData.inc" -void (*VPMergeHook)(ValueProfData *, - __llvm_profile_data *) = &lprofMergeValueProfData; - /* Merge value profile data pointed to by SrcValueProfData into * in-memory profile counters pointed by to DstData. */ void lprofMergeValueProfData(ValueProfData *SrcValueProfData, __llvm_profile_data *DstData) { - unsigned I, S, V, C; + unsigned I, S, V, DstIndex = 0; InstrProfValueData *VData; ValueProfRecord *VR = getFirstValueProfRecord(SrcValueProfData); for (I = 0; I < SrcValueProfData->NumValueKinds; I++) { VData = getValueProfRecordValueData(VR); + unsigned SrcIndex = 0; for (S = 0; S < VR->NumValueSites; S++) { uint8_t NV = VR->SiteCountArray[S]; for (V = 0; V < NV; V++) { - for (C = 0; C < VData[V].Count; C++) - __llvm_profile_instrument_target(VData[V].Value, DstData, S); + __llvm_profile_instrument_target_value(VData[SrcIndex].Value, DstData, + DstIndex, VData[SrcIndex].Count); + ++SrcIndex; } + ++DstIndex; } VR = getValueProfRecordNext(VR); } } + +#endif diff --git a/lib/profile/InstrProfilingPlatformFuchsia.c b/lib/profile/InstrProfilingPlatformFuchsia.c new file mode 100644 index 000000000000..a50602dede7d --- /dev/null +++ b/lib/profile/InstrProfilingPlatformFuchsia.c @@ -0,0 +1,183 @@ +/*===- InstrProfilingPlatformFuchsia.c - Profile data Fuchsia platform ----===*\ +|* +|* The LLVM Compiler Infrastructure +|* +|* This file is distributed under the University of Illinois Open Source +|* License. See LICENSE.TXT for details. +|* +\*===----------------------------------------------------------------------===*/ +/* + * This file implements the profiling runtime for Fuchsia and defines the + * shared profile runtime interface. Each module (executable or DSO) statically + * links in the whole profile runtime to satisfy the calls from its + * instrumented code. Several modules in the same program might be separately + * compiled and even use different versions of the instrumentation ABI and data + * format. All they share in common is the VMO and the offset, which live in + * exported globals so that exactly one definition will be shared across all + * modules. Each module has its own independent runtime that registers its own + * atexit hook to append its own data into the shared VMO which is published + * via the data sink hook provided by Fuchsia's dynamic linker. + */ + +#if defined(__Fuchsia__) + +#include <inttypes.h> +#include <stdarg.h> +#include <stdbool.h> +#include <stdlib.h> + +#include <zircon/process.h> +#include <zircon/sanitizer.h> +#include <zircon/syscalls.h> + +#include "InstrProfiling.h" +#include "InstrProfilingInternal.h" +#include "InstrProfilingUtil.h" + +/* VMO that contains the coverage data shared across all modules. This symbol + * has default visibility and is exported in each module (executable or DSO) + * that statically links in the profiling runtime. + */ +zx_handle_t __llvm_profile_vmo; +/* Current offset within the VMO where data should be written next. This symbol + * has default visibility and is exported in each module (executable or DSO) + * that statically links in the profiling runtime. + */ +uint64_t __llvm_profile_offset; + +static const char ProfileSinkName[] = "llvm-profile"; + +static inline void lprofWrite(const char *fmt, ...) { + char s[256]; + + va_list ap; + va_start(ap, fmt); + int ret = vsnprintf(s, sizeof(s), fmt, ap); + va_end(ap); + + __sanitizer_log_write(s, ret + 1); +} + +static uint32_t lprofVMOWriter(ProfDataWriter *This, ProfDataIOVec *IOVecs, + uint32_t NumIOVecs) { + /* Allocate VMO if it hasn't been created yet. */ + if (__llvm_profile_vmo == ZX_HANDLE_INVALID) { + /* Get information about the current process. */ + zx_info_handle_basic_t Info; + zx_status_t Status = + _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &Info, + sizeof(Info), NULL, NULL); + if (Status != ZX_OK) + return -1; + + /* Create VMO to hold the profile data. */ + Status = _zx_vmo_create(0, 0, &__llvm_profile_vmo); + if (Status != ZX_OK) + return -1; + + /* Give the VMO a name including our process KOID so it's easy to spot. */ + char VmoName[ZX_MAX_NAME_LEN]; + snprintf(VmoName, sizeof(VmoName), "%s.%" PRIu64, ProfileSinkName, + Info.koid); + _zx_object_set_property(__llvm_profile_vmo, ZX_PROP_NAME, VmoName, + strlen(VmoName)); + + /* Duplicate the handle since __sanitizer_publish_data consumes it. */ + zx_handle_t Handle; + Status = + _zx_handle_duplicate(__llvm_profile_vmo, ZX_RIGHT_SAME_RIGHTS, &Handle); + if (Status != ZX_OK) + return -1; + + /* Publish the VMO which contains profile data to the system. */ + __sanitizer_publish_data(ProfileSinkName, Handle); + + /* Use the dumpfile symbolizer markup element to write the name of VMO. */ + lprofWrite("LLVM Profile: {{{dumpfile:%s:%s}}}\n", + ProfileSinkName, VmoName); + } + + /* Compute the total length of data to be written. */ + size_t Length = 0; + for (uint32_t I = 0; I < NumIOVecs; I++) + Length += IOVecs[I].ElmSize * IOVecs[I].NumElm; + + /* Resize the VMO to ensure there's sufficient space for the data. */ + zx_status_t Status = + _zx_vmo_set_size(__llvm_profile_vmo, __llvm_profile_offset + Length); + if (Status != ZX_OK) + return -1; + + /* Copy the data into VMO. */ + for (uint32_t I = 0; I < NumIOVecs; I++) { + size_t Length = IOVecs[I].ElmSize * IOVecs[I].NumElm; + if (IOVecs[I].Data) { + Status = _zx_vmo_write(__llvm_profile_vmo, IOVecs[I].Data, + __llvm_profile_offset, Length); + if (Status != ZX_OK) + return -1; + } + __llvm_profile_offset += Length; + } + + return 0; +} + +static void initVMOWriter(ProfDataWriter *This) { + This->Write = lprofVMOWriter; + This->WriterCtx = NULL; +} + +static int dump(void) { + if (lprofProfileDumped()) { + lprofWrite("Profile data not published: already written.\n"); + return 0; + } + + /* Check if there is llvm/runtime version mismatch. */ + if (GET_VERSION(__llvm_profile_get_version()) != INSTR_PROF_RAW_VERSION) { + lprofWrite("Runtime and instrumentation version mismatch : " + "expected %d, but got %d\n", + INSTR_PROF_RAW_VERSION, + (int)GET_VERSION(__llvm_profile_get_version())); + return -1; + } + + /* Write the profile data into the mapped region. */ + ProfDataWriter VMOWriter; + initVMOWriter(&VMOWriter); + if (lprofWriteData(&VMOWriter, lprofGetVPDataReader(), 0) != 0) + return -1; + + return 0; +} + +COMPILER_RT_VISIBILITY +int __llvm_profile_dump(void) { + int rc = dump(); + lprofSetProfileDumped(); + return rc; +} + +static void dumpWithoutReturn(void) { dump(); } + +/* This method is invoked by the runtime initialization hook + * InstrProfilingRuntime.o if it is linked in. + */ +COMPILER_RT_VISIBILITY +void __llvm_profile_initialize_file(void) {} + +COMPILER_RT_VISIBILITY +int __llvm_profile_register_write_file_atexit(void) { + static bool HasBeenRegistered = false; + + if (HasBeenRegistered) + return 0; + + lprofSetupValueProfiler(); + + HasBeenRegistered = true; + return atexit(dumpWithoutReturn); +} + +#endif diff --git a/lib/profile/InstrProfilingPlatformLinux.c b/lib/profile/InstrProfilingPlatformLinux.c index 89f1ab4cfeda..a517821a2fbd 100644 --- a/lib/profile/InstrProfilingPlatformLinux.c +++ b/lib/profile/InstrProfilingPlatformLinux.c @@ -7,7 +7,7 @@ |* \*===----------------------------------------------------------------------===*/ -#if defined(__linux__) || defined(__FreeBSD__) || \ +#if defined(__linux__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \ (defined(__sun__) && defined(__svr4__)) #include <stdlib.h> diff --git a/lib/profile/InstrProfilingPort.h b/lib/profile/InstrProfilingPort.h index cd1264b0e575..ede6aaaf7886 100644 --- a/lib/profile/InstrProfilingPort.h +++ b/lib/profile/InstrProfilingPort.h @@ -22,12 +22,14 @@ #define COMPILER_RT_ALLOCA _alloca /* Need to include <stdio.h> and <io.h> */ #define COMPILER_RT_FTRUNCATE(f,l) _chsize(_fileno(f),l) +#define COMPILER_RT_ALWAYS_INLINE __forceinline #elif __GNUC__ #define COMPILER_RT_ALIGNAS(x) __attribute__((aligned(x))) #define COMPILER_RT_VISIBILITY __attribute__((visibility("hidden"))) #define COMPILER_RT_WEAK __attribute__((weak)) #define COMPILER_RT_ALLOCA __builtin_alloca #define COMPILER_RT_FTRUNCATE(f,l) ftruncate(fileno(f),l) +#define COMPILER_RT_ALWAYS_INLINE inline __attribute((always_inline)) #endif #if defined(__APPLE__) diff --git a/lib/profile/InstrProfilingUtil.c b/lib/profile/InstrProfilingUtil.c index 7f49d0f9fae1..d053ab160ca7 100644 --- a/lib/profile/InstrProfilingUtil.c +++ b/lib/profile/InstrProfilingUtil.c @@ -243,23 +243,21 @@ lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix, COMPILER_RT_VISIBILITY const char * lprofFindFirstDirSeparator(const char *Path) { - const char *Sep; - Sep = strchr(Path, DIR_SEPARATOR); - if (Sep) - return Sep; + const char *Sep = strchr(Path, DIR_SEPARATOR); #if defined(DIR_SEPARATOR_2) - Sep = strchr(Path, DIR_SEPARATOR_2); + const char *Sep2 = strchr(Path, DIR_SEPARATOR_2); + if (Sep2 && (!Sep || Sep2 < Sep)) + Sep = Sep2; #endif return Sep; } COMPILER_RT_VISIBILITY const char *lprofFindLastDirSeparator(const char *Path) { - const char *Sep; - Sep = strrchr(Path, DIR_SEPARATOR); - if (Sep) - return Sep; + const char *Sep = strrchr(Path, DIR_SEPARATOR); #if defined(DIR_SEPARATOR_2) - Sep = strrchr(Path, DIR_SEPARATOR_2); + const char *Sep2 = strrchr(Path, DIR_SEPARATOR_2); + if (Sep2 && (!Sep || Sep2 > Sep)) + Sep = Sep2; #endif return Sep; } diff --git a/lib/profile/InstrProfilingValue.c b/lib/profile/InstrProfilingValue.c index 3db0de8a6723..674a48609878 100644 --- a/lib/profile/InstrProfilingValue.c +++ b/lib/profile/InstrProfilingValue.c @@ -132,13 +132,14 @@ static ValueProfNode *allocateOneNode(__llvm_profile_data *Data, uint32_t Index, return Node; } -COMPILER_RT_VISIBILITY void -__llvm_profile_instrument_target(uint64_t TargetValue, void *Data, - uint32_t CounterIndex) { +static COMPILER_RT_ALWAYS_INLINE void +instrumentTargetValueImpl(uint64_t TargetValue, void *Data, + uint32_t CounterIndex, uint64_t CountValue) { __llvm_profile_data *PData = (__llvm_profile_data *)Data; if (!PData) return; - + if (!CountValue) + return; if (!PData->Values) { if (!allocateValueProfileCounters(PData)) return; @@ -153,7 +154,7 @@ __llvm_profile_instrument_target(uint64_t TargetValue, void *Data, uint8_t VDataCount = 0; while (CurVNode) { if (TargetValue == CurVNode->Value) { - CurVNode->Count++; + CurVNode->Count += CountValue; return; } if (CurVNode->Count < MinCount) { @@ -194,11 +195,13 @@ __llvm_profile_instrument_target(uint64_t TargetValue, void *Data, * the runtime can wipe out more than one lowest count entries * to give space for hot targets. */ - if (!MinCountVNode->Count || !(--MinCountVNode->Count)) { + if (MinCountVNode->Count <= CountValue) { CurVNode = MinCountVNode; CurVNode->Value = TargetValue; - CurVNode->Count++; - } + CurVNode->Count = CountValue; + } else + MinCountVNode->Count -= CountValue; + return; } @@ -206,7 +209,7 @@ __llvm_profile_instrument_target(uint64_t TargetValue, void *Data, if (!CurVNode) return; CurVNode->Value = TargetValue; - CurVNode->Count++; + CurVNode->Count += CountValue; uint32_t Success = 0; if (!ValueCounters[CounterIndex]) @@ -221,6 +224,18 @@ __llvm_profile_instrument_target(uint64_t TargetValue, void *Data, } } +COMPILER_RT_VISIBILITY void +__llvm_profile_instrument_target(uint64_t TargetValue, void *Data, + uint32_t CounterIndex) { + instrumentTargetValueImpl(TargetValue, Data, CounterIndex, 1); +} +COMPILER_RT_VISIBILITY void +__llvm_profile_instrument_target_value(uint64_t TargetValue, void *Data, + uint32_t CounterIndex, + uint64_t CountValue) { + instrumentTargetValueImpl(TargetValue, Data, CounterIndex, CountValue); +} + /* * The target values are partitioned into multiple regions/ranges. There is one * contiguous region which is precise -- every value in the range is tracked diff --git a/lib/safestack/.clang-format b/lib/safestack/.clang-format index f6cb8ad931f5..560308c91dee 100644 --- a/lib/safestack/.clang-format +++ b/lib/safestack/.clang-format @@ -1 +1,2 @@ BasedOnStyle: Google +AllowShortIfStatementsOnASingleLine: false diff --git a/lib/safestack/safestack.cc b/lib/safestack/safestack.cc index d783cd5a9b29..8af93624b991 100644 --- a/lib/safestack/safestack.cc +++ b/lib/safestack/safestack.cc @@ -171,11 +171,13 @@ static void thread_cleanup_handler(void *_iter) { } } +static void EnsureInterceptorsInitialized(); + /// Intercept thread creation operation to allocate and setup the unsafe stack INTERCEPTOR(int, pthread_create, pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void*), void *arg) { - + EnsureInterceptorsInitialized(); size_t size = 0; size_t guard = 0; @@ -207,6 +209,19 @@ INTERCEPTOR(int, pthread_create, pthread_t *thread, return REAL(pthread_create)(thread, attr, thread_start, tinfo); } +static BlockingMutex interceptor_init_lock(LINKER_INITIALIZED); +static bool interceptors_inited = false; + +static void EnsureInterceptorsInitialized() { + BlockingMutexLock lock(&interceptor_init_lock); + if (interceptors_inited) return; + + // Initialize pthread interceptors for thread allocation + INTERCEPT_FUNCTION(pthread_create); + + interceptors_inited = true; +} + extern "C" __attribute__((visibility("default"))) #if !SANITIZER_CAN_USE_PREINIT_ARRAY // On ELF platforms, the constructor is invoked using .preinit_array (see below) @@ -227,9 +242,6 @@ void __safestack_init() { unsafe_stack_setup(addr, size, guard); pageSize = sysconf(_SC_PAGESIZE); - // Initialize pthread interceptors for thread allocation - INTERCEPT_FUNCTION(pthread_create); - // Setup the cleanup handler pthread_key_create(&thread_cleanup_key, thread_cleanup_handler); } @@ -245,6 +257,16 @@ __attribute__((section(".preinit_array"), #endif extern "C" + __attribute__((visibility("default"))) void *__get_unsafe_stack_bottom() { + return unsafe_stack_start; +} + +extern "C" + __attribute__((visibility("default"))) void *__get_unsafe_stack_top() { + return (char*)unsafe_stack_start + unsafe_stack_size; +} + +extern "C" __attribute__((visibility("default"))) void *__get_unsafe_stack_start() { return unsafe_stack_start; } diff --git a/lib/sanitizer_common/.clang-format b/lib/sanitizer_common/.clang-format index f6cb8ad931f5..560308c91dee 100644 --- a/lib/sanitizer_common/.clang-format +++ b/lib/sanitizer_common/.clang-format @@ -1 +1,2 @@ BasedOnStyle: Google +AllowShortIfStatementsOnASingleLine: false diff --git a/lib/sanitizer_common/CMakeLists.txt b/lib/sanitizer_common/CMakeLists.txt index e0226ae4975f..1be99616e232 100644 --- a/lib/sanitizer_common/CMakeLists.txt +++ b/lib/sanitizer_common/CMakeLists.txt @@ -16,29 +16,24 @@ set(SANITIZER_SOURCES_NOTERMINATION sanitizer_linux.cc sanitizer_linux_s390.cc sanitizer_mac.cc + sanitizer_openbsd.cc sanitizer_persistent_allocator.cc sanitizer_platform_limits_linux.cc sanitizer_platform_limits_netbsd.cc + sanitizer_platform_limits_openbsd.cc sanitizer_platform_limits_posix.cc sanitizer_platform_limits_solaris.cc sanitizer_posix.cc sanitizer_printf.cc sanitizer_procmaps_common.cc - sanitizer_procmaps_freebsd.cc + sanitizer_procmaps_bsd.cc sanitizer_procmaps_linux.cc sanitizer_procmaps_mac.cc sanitizer_procmaps_solaris.cc + sanitizer_rtems.cc sanitizer_solaris.cc - sanitizer_stackdepot.cc - sanitizer_stacktrace.cc - sanitizer_stacktrace_printer.cc sanitizer_stoptheworld_mac.cc sanitizer_suppressions.cc - sanitizer_symbolizer.cc - sanitizer_symbolizer_fuchsia.cc - sanitizer_symbolizer_libbacktrace.cc - sanitizer_symbolizer_mac.cc - sanitizer_symbolizer_win.cc sanitizer_tls_get_addr.cc sanitizer_thread_registry.cc sanitizer_win.cc) @@ -62,45 +57,73 @@ set(SANITIZER_NOLIBC_SOURCES set(SANITIZER_LIBCDEP_SOURCES sanitizer_common_libcdep.cc sanitizer_allocator_checks.cc - sancov_flags.cc - sanitizer_coverage_fuchsia.cc - sanitizer_coverage_libcdep_new.cc - sanitizer_coverage_win_sections.cc sanitizer_linux_libcdep.cc sanitizer_mac_libcdep.cc sanitizer_posix_libcdep.cc + sanitizer_stoptheworld_linux_libcdep.cc) + +set(SANITIZER_COVERAGE_SOURCES + sancov_flags.cc + sanitizer_coverage_fuchsia.cc + sanitizer_coverage_libcdep_new.cc + sanitizer_coverage_win_sections.cc) + +set(SANITIZER_SYMBOLIZER_SOURCES + sanitizer_allocator_report.cc + sanitizer_stackdepot.cc + sanitizer_stacktrace.cc sanitizer_stacktrace_libcdep.cc - sanitizer_stoptheworld_linux_libcdep.cc + sanitizer_stacktrace_printer.cc + sanitizer_stacktrace_sparc.cc + sanitizer_symbolizer.cc + sanitizer_symbolizer_libbacktrace.cc sanitizer_symbolizer_libcdep.cc + sanitizer_symbolizer_mac.cc + sanitizer_symbolizer_markup.cc sanitizer_symbolizer_posix_libcdep.cc - sanitizer_unwind_linux_libcdep.cc) + sanitizer_symbolizer_report.cc + sanitizer_symbolizer_win.cc + sanitizer_unwind_linux_libcdep.cc + sanitizer_unwind_win.cc) # Explicitly list all sanitizer_common headers. Not all of these are # included in sanitizer_common source files, but we need to depend on # headers when building our custom unit tests. -set(SANITIZER_HEADERS +set(SANITIZER_IMPL_HEADERS + sancov_flags.h + sancov_flags.inc sanitizer_addrhashmap.h sanitizer_allocator.h sanitizer_allocator_bytemap.h + sanitizer_allocator_checks.h sanitizer_allocator_combined.h sanitizer_allocator_interface.h sanitizer_allocator_internal.h sanitizer_allocator_local_cache.h sanitizer_allocator_primary32.h sanitizer_allocator_primary64.h + sanitizer_allocator_report.h sanitizer_allocator_secondary.h sanitizer_allocator_size_class_map.h sanitizer_allocator_stats.h + sanitizer_asm.h sanitizer_atomic.h sanitizer_atomic_clang.h + sanitizer_atomic_clang_mips.h + sanitizer_atomic_clang_other.h + sanitizer_atomic_clang_x86.h sanitizer_atomic_msvc.h sanitizer_bitvector.h sanitizer_bvgraph.h sanitizer_common.h sanitizer_common_interceptors.inc - sanitizer_common_interceptors_ioctl.inc sanitizer_common_interceptors_format.inc + sanitizer_common_interceptors_ioctl.inc + sanitizer_common_interface.inc + sanitizer_common_interface_posix.inc sanitizer_common_syscalls.inc + sanitizer_coverage_interface.inc + sanitizer_dbghelp.h sanitizer_deadlock_detector.h sanitizer_deadlock_detector_interface.h sanitizer_errno.h @@ -109,7 +132,10 @@ set(SANITIZER_HEADERS sanitizer_flag_parser.h sanitizer_flags.h sanitizer_flags.inc + sanitizer_freebsd.h sanitizer_fuchsia.h + sanitizer_getauxval.h + sanitizer_interceptors_ioctl_netbsd.inc sanitizer_interface_internal.h sanitizer_internal_defs.h sanitizer_lfstack.h @@ -118,18 +144,22 @@ set(SANITIZER_HEADERS sanitizer_linux.h sanitizer_list.h sanitizer_mac.h + sanitizer_malloc_mac.inc sanitizer_mutex.h sanitizer_persistent_allocator.h sanitizer_placement_new.h sanitizer_platform.h sanitizer_platform_interceptors.h sanitizer_platform_limits_netbsd.h + sanitizer_platform_limits_openbsd.h sanitizer_platform_limits_posix.h sanitizer_platform_limits_solaris.h sanitizer_posix.h sanitizer_procmaps.h sanitizer_quarantine.h sanitizer_report_decorator.h + sanitizer_rtems.h + sanitizer_signal_interceptors.inc sanitizer_stackdepot.h sanitizer_stackdepotbase.h sanitizer_stacktrace.h @@ -137,15 +167,23 @@ set(SANITIZER_HEADERS sanitizer_stoptheworld.h sanitizer_suppressions.h sanitizer_symbolizer.h + sanitizer_symbolizer_fuchsia.h sanitizer_symbolizer_internal.h sanitizer_symbolizer_libbacktrace.h sanitizer_symbolizer_mac.h + sanitizer_symbolizer_rtems.h sanitizer_syscall_generic.inc - sanitizer_syscall_linux_x86_64.inc sanitizer_syscall_linux_aarch64.inc + sanitizer_syscall_linux_arm.inc + sanitizer_syscall_linux_x86_64.inc + sanitizer_syscalls_netbsd.inc sanitizer_thread_registry.h + sanitizer_tls_get_addr.h sanitizer_vector.h - sanitizer_win.h) + sanitizer_win.h + sanitizer_win_defs.h + sanitizer_win_dll_thunk.h + sanitizer_win_weak_interception.h) include_directories(..) @@ -184,24 +222,42 @@ add_compiler_rt_object_libraries(RTSanitizerCommon ${OS_OPTION} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES ${SANITIZER_SOURCES} + ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS} CFLAGS ${SANITIZER_CFLAGS} DEFS ${SANITIZER_COMMON_DEFINITIONS}) add_compiler_rt_object_libraries(RTSanitizerCommonNoTermination ${OS_OPTION} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES ${SANITIZER_SOURCES_NOTERMINATION} + ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS} CFLAGS ${SANITIZER_CFLAGS} DEFS ${SANITIZER_COMMON_DEFINITIONS}) add_compiler_rt_object_libraries(RTSanitizerCommonNoLibc ${OS_OPTION} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES ${SANITIZER_NOLIBC_SOURCES} + ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS} CFLAGS ${SANITIZER_CFLAGS} DEFS ${SANITIZER_COMMON_DEFINITIONS}) add_compiler_rt_object_libraries(RTSanitizerCommonLibc ${OS_OPTION} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES ${SANITIZER_LIBCDEP_SOURCES} + ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS} + CFLAGS ${SANITIZER_CFLAGS} + DEFS ${SANITIZER_COMMON_DEFINITIONS}) +add_compiler_rt_object_libraries(RTSanitizerCommonCoverage + ${OS_OPTION} + ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} + SOURCES ${SANITIZER_COVERAGE_SOURCES} + ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS} + CFLAGS ${SANITIZER_CFLAGS} + DEFS ${SANITIZER_COMMON_DEFINITIONS}) +add_compiler_rt_object_libraries(RTSanitizerCommonSymbolizer + ${OS_OPTION} + ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} + SOURCES ${SANITIZER_SYMBOLIZER_SOURCES} + ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS} CFLAGS ${SANITIZER_CFLAGS} DEFS ${SANITIZER_COMMON_DEFINITIONS}) @@ -211,12 +267,21 @@ add_compiler_rt_object_libraries(RTSanitizerCommonNoHooks ${OS_OPTION} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES ${SANITIZER_SOURCES} + ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS} CFLAGS ${SANITIZER_NO_WEAK_HOOKS_CFLAGS} DEFS ${SANITIZER_COMMON_DEFINITIONS}) add_compiler_rt_object_libraries(RTSanitizerCommonLibcNoHooks ${OS_OPTION} ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} SOURCES ${SANITIZER_LIBCDEP_SOURCES} + ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS} + CFLAGS ${SANITIZER_NO_WEAK_HOOKS_CFLAGS} + DEFS ${SANITIZER_COMMON_DEFINITIONS}) +add_compiler_rt_object_libraries(RTSanitizerCommonSymbolizerNoHooks + ${OS_OPTION} + ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} + SOURCES ${SANITIZER_SYMBOLIZER_SOURCES} + ADDITIONAL_HEADERS ${SANITIZER_IMPL_HEADERS} CFLAGS ${SANITIZER_NO_WEAK_HOOKS_CFLAGS} DEFS ${SANITIZER_COMMON_DEFINITIONS}) diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc index fc4f7a75ae34..6bfd5e5eea50 100644 --- a/lib/sanitizer_common/sanitizer_allocator.cc +++ b/lib/sanitizer_common/sanitizer_allocator.cc @@ -21,6 +21,10 @@ namespace __sanitizer { +// Default allocator names. +const char *PrimaryAllocatorName = "SizeClassAllocator"; +const char *SecondaryAllocatorName = "LargeMmapAllocator"; + // ThreadSanitizer for Go uses libc malloc/free. #if SANITIZER_GO || defined(SANITIZER_USE_MALLOC) # if SANITIZER_LINUX && !SANITIZER_ANDROID @@ -136,12 +140,19 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; +static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) { + SetAllocatorOutOfMemory(); + Report("FATAL: %s: internal allocator is out of memory trying to allocate " + "0x%zx bytes\n", SanitizerToolName, requested_size); + Die(); +} + void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) { if (size + sizeof(u64) < size) return nullptr; void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment); - if (!p) - return nullptr; + if (UNLIKELY(!p)) + ReportInternalAllocatorOutOfMemory(size + sizeof(u64)); ((u64*)p)[0] = kBlockMagic; return (char*)p + sizeof(u64); } @@ -155,16 +166,21 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) { size = size + sizeof(u64); CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); void *p = RawInternalRealloc(addr, size, cache); - if (!p) - return nullptr; + if (UNLIKELY(!p)) + ReportInternalAllocatorOutOfMemory(size); return (char*)p + sizeof(u64); } void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { - if (UNLIKELY(CheckForCallocOverflow(count, size))) - return InternalAllocator::FailureHandler::OnBadRequest(); + if (UNLIKELY(CheckForCallocOverflow(count, size))) { + Report("FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) " + "cannot be represented in type size_t\n", SanitizerToolName, count, + size); + Die(); + } void *p = InternalAlloc(count * size, cache); - if (p) internal_memset(p, 0, count * size); + if (LIKELY(p)) + internal_memset(p, 0, count * size); return p; } @@ -210,6 +226,8 @@ void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { low_level_alloc_callback = callback; } +// Allocator's OOM and other errors handling support. + static atomic_uint8_t allocator_out_of_memory = {0}; static atomic_uint8_t allocator_may_return_null = {0}; @@ -217,13 +235,8 @@ bool IsAllocatorOutOfMemory() { return atomic_load_relaxed(&allocator_out_of_memory); } -// Prints error message and kills the program. -void NORETURN ReportAllocatorCannotReturnNull() { - Report("%s's allocator is terminating the process instead of returning 0\n", - SanitizerToolName); - Report("If you don't like this behavior set allocator_may_return_null=1\n"); - CHECK(0); - Die(); +void SetAllocatorOutOfMemory() { + atomic_store_relaxed(&allocator_out_of_memory, 1); } bool AllocatorMayReturnNull() { @@ -235,26 +248,9 @@ void SetAllocatorMayReturnNull(bool may_return_null) { memory_order_relaxed); } -void *ReturnNullOrDieOnFailure::OnBadRequest() { - if (AllocatorMayReturnNull()) - return nullptr; - ReportAllocatorCannotReturnNull(); -} - -void *ReturnNullOrDieOnFailure::OnOOM() { - atomic_store_relaxed(&allocator_out_of_memory, 1); - if (AllocatorMayReturnNull()) - return nullptr; - ReportAllocatorCannotReturnNull(); -} - -void NORETURN *DieOnFailure::OnBadRequest() { - ReportAllocatorCannotReturnNull(); -} - -void NORETURN *DieOnFailure::OnOOM() { - atomic_store_relaxed(&allocator_out_of_memory, 1); - ReportAllocatorCannotReturnNull(); +void PrintHintAllocatorCannotReturnNull() { + Report("HINT: if you don't care about these errors you may set " + "allocator_may_return_null=1\n"); } } // namespace __sanitizer diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h index 38368361de6f..9655a2264f34 100644 --- a/lib/sanitizer_common/sanitizer_allocator.h +++ b/lib/sanitizer_common/sanitizer_allocator.h @@ -24,28 +24,23 @@ namespace __sanitizer { +// Allows the tools to name their allocations appropriately. +extern const char *PrimaryAllocatorName; +extern const char *SecondaryAllocatorName; + // Since flags are immutable and allocator behavior can be changed at runtime // (unit tests or ASan on Android are some examples), allocator_may_return_null // flag value is cached here and can be altered later. bool AllocatorMayReturnNull(); void SetAllocatorMayReturnNull(bool may_return_null); -// Allocator failure handling policies: -// Implements AllocatorMayReturnNull policy, returns null when the flag is set, -// dies otherwise. -struct ReturnNullOrDieOnFailure { - static void *OnBadRequest(); - static void *OnOOM(); -}; -// Always dies on the failure. -struct DieOnFailure { - static void NORETURN *OnBadRequest(); - static void NORETURN *OnOOM(); -}; - // Returns true if allocator detected OOM condition. Can be used to avoid memory -// hungry operations. Set when AllocatorReturnNullOrDieOnOOM() is called. +// hungry operations. bool IsAllocatorOutOfMemory(); +// Should be called by a particular allocator when OOM is detected. +void SetAllocatorOutOfMemory(); + +void PrintHintAllocatorCannotReturnNull(); // Allocators call these callbacks on mmap/munmap. struct NoOpMapUnmapCallback { @@ -65,8 +60,10 @@ INLINE u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n) template<typename T> INLINE void RandomShuffle(T *a, u32 n, u32 *rand_state) { if (n <= 1) return; + u32 state = *rand_state; for (u32 i = n - 1; i > 0; i--) - Swap(a[i], a[RandN(rand_state, i + 1)]); + Swap(a[i], a[RandN(&state, i + 1)]); + *rand_state = state; } #include "sanitizer_allocator_size_class_map.h" diff --git a/lib/sanitizer_common/sanitizer_allocator_bytemap.h b/lib/sanitizer_common/sanitizer_allocator_bytemap.h index 92472cdf5150..7df3e4097bf2 100644 --- a/lib/sanitizer_common/sanitizer_allocator_bytemap.h +++ b/lib/sanitizer_common/sanitizer_allocator_bytemap.h @@ -18,7 +18,7 @@ template<u64 kSize> class FlatByteMap { public: - void TestOnlyInit() { + void Init() { internal_memset(map_, 0, sizeof(map_)); } @@ -44,7 +44,7 @@ class FlatByteMap { template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback> class TwoLevelByteMap { public: - void TestOnlyInit() { + void Init() { internal_memset(map1_, 0, sizeof(map1_)); mu_.Init(); } diff --git a/lib/sanitizer_common/sanitizer_allocator_checks.h b/lib/sanitizer_common/sanitizer_allocator_checks.h index b61a8b2eb3b6..61bd26e68cc6 100644 --- a/lib/sanitizer_common/sanitizer_allocator_checks.h +++ b/lib/sanitizer_common/sanitizer_allocator_checks.h @@ -44,16 +44,18 @@ INLINE void *SetErrnoOnNull(void *ptr) { // of alignment. INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) { #if SANITIZER_POSIX - return IsPowerOfTwo(alignment) && (size & (alignment - 1)) == 0; + return alignment != 0 && IsPowerOfTwo(alignment) && + (size & (alignment - 1)) == 0; #else - return size % alignment == 0; + return alignment != 0 && size % alignment == 0; #endif } // Checks posix_memalign() parameters, verifies that alignment is a power of two // and a multiple of sizeof(void *). INLINE bool CheckPosixMemalignAlignment(uptr alignment) { - return IsPowerOfTwo(alignment) && (alignment % sizeof(void *)) == 0; // NOLINT + return alignment != 0 && IsPowerOfTwo(alignment) && + (alignment % sizeof(void *)) == 0; // NOLINT } // Returns true if calloc(size, n) call overflows on size*n calculation. diff --git a/lib/sanitizer_common/sanitizer_allocator_combined.h b/lib/sanitizer_common/sanitizer_allocator_combined.h index 0d8a2a174bb5..1f874d60b92b 100644 --- a/lib/sanitizer_common/sanitizer_allocator_combined.h +++ b/lib/sanitizer_common/sanitizer_allocator_combined.h @@ -24,8 +24,6 @@ template <class PrimaryAllocator, class AllocatorCache, class SecondaryAllocator> // NOLINT class CombinedAllocator { public: - typedef typename SecondaryAllocator::FailureHandler FailureHandler; - void InitLinkerInitialized(s32 release_to_os_interval_ms) { primary_.Init(release_to_os_interval_ms); secondary_.InitLinkerInitialized(); @@ -42,8 +40,12 @@ class CombinedAllocator { // Returning 0 on malloc(0) may break a lot of code. if (size == 0) size = 1; - if (size + alignment < size) - return FailureHandler::OnBadRequest(); + if (size + alignment < size) { + Report("WARNING: %s: CombinedAllocator allocation overflow: " + "0x%zx bytes with 0x%zx alignment requested\n", + SanitizerToolName, size, alignment); + return nullptr; + } uptr original_size = size; // If alignment requirements are to be fulfilled by the frontend allocator // rather than by the primary or secondary, passing an alignment lower than @@ -62,8 +64,6 @@ class CombinedAllocator { res = cache->Allocate(&primary_, primary_.ClassID(size)); else res = secondary_.Allocate(&stats_, original_size, alignment); - if (!res) - return FailureHandler::OnOOM(); if (alignment > 8) CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0); return res; diff --git a/lib/sanitizer_common/sanitizer_allocator_internal.h b/lib/sanitizer_common/sanitizer_allocator_internal.h index a791d0d94894..c0c03d3f4345 100644 --- a/lib/sanitizer_common/sanitizer_allocator_internal.h +++ b/lib/sanitizer_common/sanitizer_allocator_internal.h @@ -46,9 +46,12 @@ typedef SizeClassAllocator32<AP32> PrimaryInternalAllocator; typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator> InternalAllocatorCache; +typedef LargeMmapAllocator<NoOpMapUnmapCallback, + LargeMmapAllocatorPtrArrayStatic> + SecondaryInternalAllocator; + typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache, - LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> - > InternalAllocator; + SecondaryInternalAllocator> InternalAllocator; void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr, uptr alignment = 0); @@ -59,15 +62,6 @@ void *InternalCalloc(uptr countr, uptr size, void InternalFree(void *p, InternalAllocatorCache *cache = nullptr); InternalAllocator *internal_allocator(); -enum InternalAllocEnum { - INTERNAL_ALLOC -}; - } // namespace __sanitizer -inline void *operator new(__sanitizer::operator_new_size_type size, - __sanitizer::InternalAllocEnum) { - return __sanitizer::InternalAlloc(size); -} - #endif // SANITIZER_ALLOCATOR_INTERNAL_H diff --git a/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/sanitizer_common/sanitizer_allocator_local_cache.h index 1b3c2c0c19d1..1bb8fc27defa 100644 --- a/lib/sanitizer_common/sanitizer_allocator_local_cache.h +++ b/lib/sanitizer_common/sanitizer_allocator_local_cache.h @@ -19,8 +19,7 @@ // object per thread in TLS, is has to be POD. template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache - : SizeClassAllocator::AllocatorCache { -}; + : SizeClassAllocator::AllocatorCache {}; // Cache used by SizeClassAllocator64. template <class SizeClassAllocator> @@ -46,13 +45,12 @@ struct SizeClassAllocator64LocalCache { if (UNLIKELY(c->count == 0)) { if (UNLIKELY(!Refill(c, allocator, class_id))) return nullptr; + DCHECK_GT(c->count, 0); } - stats_.Add(AllocatorStatAllocated, c->class_size); - CHECK_GT(c->count, 0); CompactPtrT chunk = c->chunks[--c->count]; - void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer( + stats_.Add(AllocatorStatAllocated, c->class_size); + return reinterpret_cast<void *>(allocator->CompactPtrToPointer( allocator->GetRegionBeginBySizeClass(class_id), chunk)); - return res; } void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) { @@ -60,20 +58,19 @@ struct SizeClassAllocator64LocalCache { CHECK_LT(class_id, kNumClasses); // If the first allocator call on a new thread is a deallocation, then // max_count will be zero, leading to check failure. - InitCache(); PerClass *c = &per_class_[class_id]; - stats_.Sub(AllocatorStatAllocated, c->class_size); - CHECK_NE(c->max_count, 0UL); + InitCache(c); if (UNLIKELY(c->count == c->max_count)) Drain(c, allocator, class_id, c->max_count / 2); CompactPtrT chunk = allocator->PointerToCompactPtr( allocator->GetRegionBeginBySizeClass(class_id), reinterpret_cast<uptr>(p)); c->chunks[c->count++] = chunk; + stats_.Sub(AllocatorStatAllocated, c->class_size); } void Drain(SizeClassAllocator *allocator) { - for (uptr i = 0; i < kNumClasses; i++) { + for (uptr i = 1; i < kNumClasses; i++) { PerClass *c = &per_class_[i]; while (c->count > 0) Drain(c, allocator, i, c->count); @@ -94,20 +91,22 @@ struct SizeClassAllocator64LocalCache { PerClass per_class_[kNumClasses]; AllocatorStats stats_; - void InitCache() { - if (LIKELY(per_class_[1].max_count)) + void InitCache(PerClass *c) { + if (LIKELY(c->max_count)) return; - for (uptr i = 0; i < kNumClasses; i++) { + for (uptr i = 1; i < kNumClasses; i++) { PerClass *c = &per_class_[i]; - c->max_count = 2 * SizeClassMap::MaxCachedHint(i); - c->class_size = Allocator::ClassIdToSize(i); + const uptr size = Allocator::ClassIdToSize(i); + c->max_count = 2 * SizeClassMap::MaxCachedHint(size); + c->class_size = size; } + DCHECK_NE(c->max_count, 0UL); } NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator, uptr class_id) { - InitCache(); - uptr num_requested_chunks = c->max_count / 2; + InitCache(c); + const uptr num_requested_chunks = c->max_count / 2; if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks, num_requested_chunks))) return false; @@ -117,9 +116,8 @@ struct SizeClassAllocator64LocalCache { NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id, uptr count) { - InitCache(); CHECK_GE(c->count, count); - uptr first_idx_to_drain = c->count - count; + const uptr first_idx_to_drain = c->count - count; c->count -= count; allocator->ReturnToAllocator(&stats_, class_id, &c->chunks[first_idx_to_drain], count); @@ -164,12 +162,13 @@ struct SizeClassAllocator32LocalCache { CHECK_LT(class_id, kNumClasses); PerClass *c = &per_class_[class_id]; if (UNLIKELY(c->count == 0)) { - if (UNLIKELY(!Refill(allocator, class_id))) + if (UNLIKELY(!Refill(c, allocator, class_id))) return nullptr; + DCHECK_GT(c->count, 0); } - stats_.Add(AllocatorStatAllocated, c->class_size); void *res = c->batch[--c->count]; PREFETCH(c->batch[c->count - 1]); + stats_.Add(AllocatorStatAllocated, c->class_size); return res; } @@ -178,20 +177,19 @@ struct SizeClassAllocator32LocalCache { CHECK_LT(class_id, kNumClasses); // If the first allocator call on a new thread is a deallocation, then // max_count will be zero, leading to check failure. - InitCache(); PerClass *c = &per_class_[class_id]; - stats_.Sub(AllocatorStatAllocated, c->class_size); - CHECK_NE(c->max_count, 0UL); + InitCache(c); if (UNLIKELY(c->count == c->max_count)) - Drain(allocator, class_id); + Drain(c, allocator, class_id); c->batch[c->count++] = p; + stats_.Sub(AllocatorStatAllocated, c->class_size); } void Drain(SizeClassAllocator *allocator) { - for (uptr i = 0; i < kNumClasses; i++) { + for (uptr i = 1; i < kNumClasses; i++) { PerClass *c = &per_class_[i]; while (c->count > 0) - Drain(allocator, i); + Drain(c, allocator, i); } } @@ -216,15 +214,16 @@ struct SizeClassAllocator32LocalCache { PerClass per_class_[kNumClasses]; AllocatorStats stats_; - void InitCache() { - if (LIKELY(per_class_[1].max_count)) + void InitCache(PerClass *c) { + if (LIKELY(c->max_count)) return; const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch)); - for (uptr i = 0; i < kNumClasses; i++) { + for (uptr i = 1; i < kNumClasses; i++) { PerClass *c = &per_class_[i]; - uptr max_cached = TransferBatch::MaxCached(i); + const uptr size = Allocator::ClassIdToSize(i); + const uptr max_cached = TransferBatch::MaxCached(size); c->max_count = 2 * max_cached; - c->class_size = Allocator::ClassIdToSize(i); + c->class_size = size; // Precompute the class id to use to store batches for the current class // id. 0 means the class size is large enough to store a batch within one // of the chunks. If using a separate size class, it will always be @@ -232,16 +231,17 @@ struct SizeClassAllocator32LocalCache { if (kUseSeparateSizeClassForBatch) { c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID; } else { - c->batch_class_id = (c->class_size < + c->batch_class_id = (size < TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ? batch_class_id : 0; } } + DCHECK_NE(c->max_count, 0UL); } - NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) { - InitCache(); - PerClass *c = &per_class_[class_id]; + NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator, + uptr class_id) { + InitCache(c); TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id); if (UNLIKELY(!b)) return false; @@ -252,21 +252,21 @@ struct SizeClassAllocator32LocalCache { return true; } - NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) { - InitCache(); - PerClass *c = &per_class_[class_id]; - uptr cnt = Min(c->max_count / 2, c->count); - uptr first_idx_to_drain = c->count - cnt; + NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, + uptr class_id) { + const uptr count = Min(c->max_count / 2, c->count); + const uptr first_idx_to_drain = c->count - count; TransferBatch *b = CreateBatch( class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]); // Failure to allocate a batch while releasing memory is non recoverable. // TODO(alekseys): Figure out how to do it without allocating a new batch. - if (UNLIKELY(!b)) - DieOnFailure::OnOOM(); - b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id), - &c->batch[first_idx_to_drain], cnt); - c->count -= cnt; + if (UNLIKELY(!b)) { + Report("FATAL: Internal error: %s's allocator failed to allocate a " + "transfer batch.\n", SanitizerToolName); + Die(); + } + b->SetFromArray(&c->batch[first_idx_to_drain], count); + c->count -= count; allocator->DeallocateBatch(&stats_, class_id, b); } }; - diff --git a/lib/sanitizer_common/sanitizer_allocator_primary32.h b/lib/sanitizer_common/sanitizer_allocator_primary32.h index 6c682aff64a2..67970e95b31e 100644 --- a/lib/sanitizer_common/sanitizer_allocator_primary32.h +++ b/lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -63,9 +63,9 @@ class SizeClassAllocator32 { struct TransferBatch { static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2; - void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) { + void SetFromArray(void *batch[], uptr count) { + DCHECK_LE(count, kMaxNumCached); count_ = count; - CHECK_LE(count_, kMaxNumCached); for (uptr i = 0; i < count; i++) batch_[i] = batch[i]; } @@ -73,9 +73,9 @@ class SizeClassAllocator32 { void Clear() { count_ = 0; } void Add(void *ptr) { batch_[count_++] = ptr; - CHECK_LE(count_, kMaxNumCached); + DCHECK_LE(count_, kMaxNumCached); } - void CopyToArray(void *to_batch[]) { + void CopyToArray(void *to_batch[]) const { for (uptr i = 0, n = Count(); i < n; i++) to_batch[i] = batch_[i]; } @@ -84,8 +84,8 @@ class SizeClassAllocator32 { static uptr AllocationSizeRequiredForNElements(uptr n) { return sizeof(uptr) * 2 + sizeof(void *) * n; } - static uptr MaxCached(uptr class_id) { - return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id)); + static uptr MaxCached(uptr size) { + return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size)); } TransferBatch *next; @@ -108,7 +108,7 @@ class SizeClassAllocator32 { typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache; void Init(s32 release_to_os_interval_ms) { - possible_regions.TestOnlyInit(); + possible_regions.Init(); internal_memset(size_class_info_array, 0, sizeof(size_class_info_array)); } @@ -125,7 +125,7 @@ class SizeClassAllocator32 { } void *MapWithCallback(uptr size) { - void *res = MmapOrDie(size, "SizeClassAllocator32"); + void *res = MmapOrDie(size, PrimaryAllocatorName); MapUnmapCallback().OnMap((uptr)res, size); return res; } @@ -153,13 +153,14 @@ class SizeClassAllocator32 { NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c, uptr class_id) { - CHECK_LT(class_id, kNumClasses); + DCHECK_LT(class_id, kNumClasses); SizeClassInfo *sci = GetSizeClassInfo(class_id); SpinMutexLock l(&sci->mutex); - if (sci->free_list.empty() && - UNLIKELY(!PopulateFreeList(stat, c, sci, class_id))) - return nullptr; - CHECK(!sci->free_list.empty()); + if (sci->free_list.empty()) { + if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id))) + return nullptr; + DCHECK(!sci->free_list.empty()); + } TransferBatch *b = sci->free_list.front(); sci->free_list.pop_front(); return b; @@ -167,15 +168,13 @@ class SizeClassAllocator32 { NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, TransferBatch *b) { - CHECK_LT(class_id, kNumClasses); + DCHECK_LT(class_id, kNumClasses); CHECK_GT(b->Count(), 0); SizeClassInfo *sci = GetSizeClassInfo(class_id); SpinMutexLock l(&sci->mutex); sci->free_list.push_front(b); } - uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; } - bool PointerIsMine(const void *p) { uptr mem = reinterpret_cast<uptr>(p); if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize) @@ -251,12 +250,9 @@ class SizeClassAllocator32 { } } - void PrintStats() { - } + void PrintStats() {} - static uptr AdditionalSize() { - return 0; - } + static uptr AdditionalSize() { return 0; } typedef SizeClassMap SizeClassMapT; static const uptr kNumClasses = SizeClassMap::kNumClasses; @@ -265,17 +261,15 @@ class SizeClassAllocator32 { static const uptr kRegionSize = 1 << kRegionSizeLog; static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize; - struct SizeClassInfo { - SpinMutex mutex; + struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo { + StaticSpinMutex mutex; IntrusiveList<TransferBatch> free_list; u32 rand_state; - char padding[kCacheLineSize - 2 * sizeof(uptr) - - sizeof(IntrusiveList<TransferBatch>)]; }; - COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize); + COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0); uptr ComputeRegionId(uptr mem) { - uptr res = mem >> kRegionSizeLog; + const uptr res = mem >> kRegionSizeLog; CHECK_LT(res, kNumPossibleRegions); return res; } @@ -285,9 +279,9 @@ class SizeClassAllocator32 { } uptr AllocateRegion(AllocatorStats *stat, uptr class_id) { - CHECK_LT(class_id, kNumClasses); - uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError( - kRegionSize, kRegionSize, "SizeClassAllocator32")); + DCHECK_LT(class_id, kNumClasses); + const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError( + kRegionSize, kRegionSize, PrimaryAllocatorName)); if (UNLIKELY(!res)) return 0; MapUnmapCallback().OnMap(res, kRegionSize); @@ -298,7 +292,7 @@ class SizeClassAllocator32 { } SizeClassInfo *GetSizeClassInfo(uptr class_id) { - CHECK_LT(class_id, kNumClasses); + DCHECK_LT(class_id, kNumClasses); return &size_class_info_array[class_id]; } @@ -329,22 +323,22 @@ class SizeClassAllocator32 { bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, SizeClassInfo *sci, uptr class_id) { - uptr size = ClassIdToSize(class_id); - uptr reg = AllocateRegion(stat, class_id); - if (UNLIKELY(!reg)) + const uptr region = AllocateRegion(stat, class_id); + if (UNLIKELY(!region)) return false; if (kRandomShuffleChunks) if (UNLIKELY(sci->rand_state == 0)) // The random state is initialized from ASLR (PIE) and time. sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime(); - uptr n_chunks = kRegionSize / (size + kMetadataSize); - uptr max_count = TransferBatch::MaxCached(class_id); - CHECK_GT(max_count, 0); + const uptr size = ClassIdToSize(class_id); + const uptr n_chunks = kRegionSize / (size + kMetadataSize); + const uptr max_count = TransferBatch::MaxCached(size); + DCHECK_GT(max_count, 0); TransferBatch *b = nullptr; - const uptr kShuffleArraySize = 48; + constexpr uptr kShuffleArraySize = 48; uptr shuffle_array[kShuffleArraySize]; uptr count = 0; - for (uptr i = reg; i < reg + n_chunks * size; i += size) { + for (uptr i = region; i < region + n_chunks * size; i += size) { shuffle_array[count++] = i; if (count == kShuffleArraySize) { if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count, diff --git a/lib/sanitizer_common/sanitizer_allocator_primary64.h b/lib/sanitizer_common/sanitizer_allocator_primary64.h index 651a64b04f2e..6acb4f8bc56d 100644 --- a/lib/sanitizer_common/sanitizer_allocator_primary64.h +++ b/lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -72,14 +72,17 @@ class SizeClassAllocator64 { void Init(s32 release_to_os_interval_ms) { uptr TotalSpaceSize = kSpaceSize + AdditionalSize(); if (kUsingConstantSpaceBeg) { - CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize, AllocatorName(), - kSpaceBeg)); + CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize, + PrimaryAllocatorName, kSpaceBeg)); } else { - NonConstSpaceBeg = address_range.Init(TotalSpaceSize, AllocatorName()); + NonConstSpaceBeg = address_range.Init(TotalSpaceSize, + PrimaryAllocatorName); CHECK_NE(NonConstSpaceBeg, ~(uptr)0); } SetReleaseToOSIntervalMs(release_to_os_interval_ms); MapWithCallbackOrDie(SpaceEnd(), AdditionalSize()); + // Check that the RegionInfo array is aligned on the CacheLine size. + DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0); } s32 ReleaseToOSIntervalMs() const { @@ -115,8 +118,12 @@ class SizeClassAllocator64 { // Failure to allocate free array space while releasing memory is non // recoverable. if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, - new_num_freed_chunks))) - DieOnFailure::OnOOM(); + new_num_freed_chunks))) { + Report("FATAL: Internal error: %s's allocator exhausted the free list " + "space for size class %zd (%zd bytes).\n", SanitizerToolName, + class_id, ClassIdToSize(class_id)); + Die(); + } for (uptr i = 0; i < n_chunks; i++) free_array[old_num_chunks + i] = chunks[i]; region->num_freed_chunks = new_num_freed_chunks; @@ -544,7 +551,6 @@ class SizeClassAllocator64 { friend class MemoryMapper; ReservedAddressRange address_range; - static const char *AllocatorName() { return "sanitizer_allocator"; } static const uptr kRegionSize = kSpaceSize / kNumClassesRounded; // FreeArray is the array of free-d chunks (stored as 4-byte offsets). @@ -584,7 +590,7 @@ class SizeClassAllocator64 { u64 last_released_bytes; }; - struct RegionInfo { + struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo { BlockingMutex mutex; uptr num_freed_chunks; // Number of elements in the freearray. uptr mapped_free_array; // Bytes mapped for freearray. @@ -597,12 +603,11 @@ class SizeClassAllocator64 { Stats stats; ReleaseToOsInfo rtoi; }; - COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize); + COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0); RegionInfo *GetRegionInfo(uptr class_id) const { - CHECK_LT(class_id, kNumClasses); - RegionInfo *regions = - reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize); + DCHECK_LT(class_id, kNumClasses); + RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd()); return ®ions[class_id]; } diff --git a/lib/sanitizer_common/sanitizer_allocator_report.cc b/lib/sanitizer_common/sanitizer_allocator_report.cc new file mode 100644 index 000000000000..e93f90c2a4b7 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_allocator_report.cc @@ -0,0 +1,125 @@ +//===-- sanitizer_allocator_report.cc ---------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// Shared allocator error reporting for ThreadSanitizer, MemorySanitizer, etc. +/// +//===----------------------------------------------------------------------===// + +#include "sanitizer_allocator.h" +#include "sanitizer_allocator_report.h" +#include "sanitizer_common.h" +#include "sanitizer_report_decorator.h" + +namespace __sanitizer { + +class ScopedAllocatorErrorReport { + public: + ScopedAllocatorErrorReport(const char *error_summary_, + const StackTrace *stack_) + : error_summary(error_summary_), + stack(stack_) { + Printf("%s", d.Error()); + } + ~ScopedAllocatorErrorReport() { + Printf("%s", d.Default()); + stack->Print(); + PrintHintAllocatorCannotReturnNull(); + ReportErrorSummary(error_summary, stack); + } + + private: + ScopedErrorReportLock lock; + const char *error_summary; + const StackTrace* const stack; + const SanitizerCommonDecorator d; +}; + +void NORETURN ReportCallocOverflow(uptr count, uptr size, + const StackTrace *stack) { + { + ScopedAllocatorErrorReport report("calloc-overflow", stack); + Report("ERROR: %s: calloc parameters overflow: count * size (%zd * %zd) " + "cannot be represented in type size_t\n", SanitizerToolName, count, + size); + } + Die(); +} + +void NORETURN ReportPvallocOverflow(uptr size, const StackTrace *stack) { + { + ScopedAllocatorErrorReport report("pvalloc-overflow", stack); + Report("ERROR: %s: pvalloc parameters overflow: size 0x%zx rounded up to " + "system page size 0x%zx cannot be represented in type size_t\n", + SanitizerToolName, size, GetPageSizeCached()); + } + Die(); +} + +void NORETURN ReportInvalidAllocationAlignment(uptr alignment, + const StackTrace *stack) { + { + ScopedAllocatorErrorReport report("invalid-allocation-alignment", stack); + Report("ERROR: %s: invalid allocation alignment: %zd, alignment must be a " + "power of two\n", SanitizerToolName, alignment); + } + Die(); +} + +void NORETURN ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment, + const StackTrace *stack) { + { + ScopedAllocatorErrorReport report("invalid-aligned-alloc-alignment", stack); +#if SANITIZER_POSIX + Report("ERROR: %s: invalid alignment requested in " + "aligned_alloc: %zd, alignment must be a power of two and the " + "requested size 0x%zx must be a multiple of alignment\n", + SanitizerToolName, alignment, size); +#else + Report("ERROR: %s: invalid alignment requested in aligned_alloc: %zd, " + "the requested size 0x%zx must be a multiple of alignment\n", + SanitizerToolName, alignment, size); +#endif + } + Die(); +} + +void NORETURN ReportInvalidPosixMemalignAlignment(uptr alignment, + const StackTrace *stack) { + { + ScopedAllocatorErrorReport report("invalid-posix-memalign-alignment", + stack); + Report("ERROR: %s: invalid alignment requested in " + "posix_memalign: %zd, alignment must be a power of two and a " + "multiple of sizeof(void*) == %zd\n", SanitizerToolName, alignment, + sizeof(void*)); // NOLINT + } + Die(); +} + +void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size, + const StackTrace *stack) { + { + ScopedAllocatorErrorReport report("allocation-size-too-big", stack); + Report("ERROR: %s: requested allocation size 0x%zx exceeds maximum " + "supported size of 0x%zx\n", SanitizerToolName, user_size, max_size); + } + Die(); +} + +void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack) { + { + ScopedAllocatorErrorReport report("out-of-memory", stack); + Report("ERROR: %s: allocator is out of memory trying to allocate 0x%zx " + "bytes\n", SanitizerToolName, requested_size); + } + Die(); +} + +} // namespace __sanitizer diff --git a/lib/sanitizer_common/sanitizer_allocator_report.h b/lib/sanitizer_common/sanitizer_allocator_report.h new file mode 100644 index 000000000000..b19b22fa5359 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_allocator_report.h @@ -0,0 +1,38 @@ +//===-- sanitizer_allocator_report.h ----------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// Shared allocator error reporting for ThreadSanitizer, MemorySanitizer, etc. +/// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_ALLOCATOR_REPORT_H +#define SANITIZER_ALLOCATOR_REPORT_H + +#include "sanitizer_internal_defs.h" +#include "sanitizer_stacktrace.h" + +namespace __sanitizer { + +void NORETURN ReportCallocOverflow(uptr count, uptr size, + const StackTrace *stack); +void NORETURN ReportPvallocOverflow(uptr size, const StackTrace *stack); +void NORETURN ReportInvalidAllocationAlignment(uptr alignment, + const StackTrace *stack); +void NORETURN ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment, + const StackTrace *stack); +void NORETURN ReportInvalidPosixMemalignAlignment(uptr alignment, + const StackTrace *stack); +void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size, + const StackTrace *stack); +void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack); + +} // namespace __sanitizer + +#endif // SANITIZER_ALLOCATOR_REPORT_H diff --git a/lib/sanitizer_common/sanitizer_allocator_secondary.h b/lib/sanitizer_common/sanitizer_allocator_secondary.h index 261dfb5e1a28..ab680b5e2d11 100644 --- a/lib/sanitizer_common/sanitizer_allocator_secondary.h +++ b/lib/sanitizer_common/sanitizer_allocator_secondary.h @@ -14,17 +14,66 @@ #error This file must be included inside sanitizer_allocator.h #endif +// Fixed array to store LargeMmapAllocator chunks list, limited to 32K total +// allocated chunks. To be used in memory constrained or not memory hungry cases +// (currently, 32 bits and internal allocator). +class LargeMmapAllocatorPtrArrayStatic { + public: + INLINE void *Init() { return &p_[0]; } + INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); } + private: + static const int kMaxNumChunks = 1 << 15; + uptr p_[kMaxNumChunks]; +}; + +// Much less restricted LargeMmapAllocator chunks list (comparing to +// PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks. +// ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the +// same functionality in Fuchsia case, which does not support MAP_NORESERVE. +class LargeMmapAllocatorPtrArrayDynamic { + public: + INLINE void *Init() { + uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr), + SecondaryAllocatorName); + CHECK(p); + return reinterpret_cast<void*>(p); + } + + INLINE void EnsureSpace(uptr n) { + CHECK_LT(n, kMaxNumChunks); + DCHECK(n <= n_reserved_); + if (UNLIKELY(n == n_reserved_)) { + address_range_.MapOrDie( + reinterpret_cast<uptr>(address_range_.base()) + + n_reserved_ * sizeof(uptr), + kChunksBlockCount * sizeof(uptr)); + n_reserved_ += kChunksBlockCount; + } + } + + private: + static const int kMaxNumChunks = 1 << 20; + static const int kChunksBlockCount = 1 << 14; + ReservedAddressRange address_range_; + uptr n_reserved_; +}; + +#if SANITIZER_WORDSIZE == 32 +typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray; +#else +typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray; +#endif + // This class can (de)allocate only large chunks of memory using mmap/unmap. // The main purpose of this allocator is to cover large and rare allocation // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64). template <class MapUnmapCallback = NoOpMapUnmapCallback, - class FailureHandlerT = ReturnNullOrDieOnFailure> + class PtrArrayT = DefaultLargeMmapAllocatorPtrArray> class LargeMmapAllocator { public: - typedef FailureHandlerT FailureHandler; - void InitLinkerInitialized() { page_size_ = GetPageSizeCached(); + chunks_ = reinterpret_cast<Header**>(ptr_array_.Init()); } void Init() { @@ -38,12 +87,16 @@ class LargeMmapAllocator { if (alignment > page_size_) map_size += alignment; // Overflow. - if (map_size < size) - return FailureHandler::OnBadRequest(); + if (map_size < size) { + Report("WARNING: %s: LargeMmapAllocator allocation overflow: " + "0x%zx bytes with 0x%zx alignment requested\n", + SanitizerToolName, map_size, alignment); + return nullptr; + } uptr map_beg = reinterpret_cast<uptr>( - MmapOrDieOnFatalError(map_size, "LargeMmapAllocator")); + MmapOrDieOnFatalError(map_size, SecondaryAllocatorName)); if (!map_beg) - return FailureHandler::OnOOM(); + return nullptr; CHECK(IsAligned(map_beg, page_size_)); MapUnmapCallback().OnMap(map_beg, map_size); uptr map_end = map_beg + map_size; @@ -62,11 +115,11 @@ class LargeMmapAllocator { CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log)); { SpinMutexLock l(&mutex_); + ptr_array_.EnsureSpace(n_chunks_); uptr idx = n_chunks_++; - chunks_sorted_ = false; - CHECK_LT(idx, kMaxNumChunks); h->chunk_idx = idx; chunks_[idx] = h; + chunks_sorted_ = false; stats.n_allocs++; stats.currently_allocated += map_size; stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated); @@ -84,9 +137,8 @@ class LargeMmapAllocator { uptr idx = h->chunk_idx; CHECK_EQ(chunks_[idx], h); CHECK_LT(idx, n_chunks_); - chunks_[idx] = chunks_[n_chunks_ - 1]; + chunks_[idx] = chunks_[--n_chunks_]; chunks_[idx]->chunk_idx = idx; - n_chunks_--; chunks_sorted_ = false; stats.n_frees++; stats.currently_allocated -= h->map_size; @@ -150,7 +202,7 @@ class LargeMmapAllocator { void EnsureSortedChunks() { if (chunks_sorted_) return; - SortArray(reinterpret_cast<uptr*>(chunks_), n_chunks_); + Sort(reinterpret_cast<uptr *>(chunks_), n_chunks_); for (uptr i = 0; i < n_chunks_; i++) chunks_[i]->chunk_idx = i; chunks_sorted_ = true; @@ -222,7 +274,7 @@ class LargeMmapAllocator { EnsureSortedChunks(); // Avoid doing the sort while iterating. for (uptr i = 0; i < n_chunks_; i++) { auto t = chunks_[i]; - callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg); + callback(reinterpret_cast<uptr>(GetUser(t)), arg); // Consistency check: verify that the array did not change. CHECK_EQ(chunks_[i], t); CHECK_EQ(chunks_[i]->chunk_idx, i); @@ -230,7 +282,6 @@ class LargeMmapAllocator { } private: - static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18); struct Header { uptr map_beg; uptr map_size; @@ -256,13 +307,13 @@ class LargeMmapAllocator { } uptr page_size_; - Header *chunks_[kMaxNumChunks]; + Header **chunks_; + PtrArrayT ptr_array_; uptr n_chunks_; bool chunks_sorted_; struct Stats { uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64]; } stats; - SpinMutex mutex_; + StaticSpinMutex mutex_; }; - diff --git a/lib/sanitizer_common/sanitizer_allocator_size_class_map.h b/lib/sanitizer_common/sanitizer_allocator_size_class_map.h index 2bd83b2eb5a4..77ab4fb544a2 100644 --- a/lib/sanitizer_common/sanitizer_allocator_size_class_map.h +++ b/lib/sanitizer_common/sanitizer_allocator_size_class_map.h @@ -161,23 +161,24 @@ class SizeClassMap { return 0; if (size <= kMidSize) return (size + kMinSize - 1) >> kMinSizeLog; - uptr l = MostSignificantSetBitIndex(size); - uptr hbits = (size >> (l - S)) & M; - uptr lbits = size & ((1 << (l - S)) - 1); - uptr l1 = l - kMidSizeLog; + const uptr l = MostSignificantSetBitIndex(size); + const uptr hbits = (size >> (l - S)) & M; + const uptr lbits = size & ((1U << (l - S)) - 1); + const uptr l1 = l - kMidSizeLog; return kMidClass + (l1 << S) + hbits + (lbits > 0); } - static uptr MaxCachedHint(uptr class_id) { - // Estimate the result for kBatchClassID because this class does not know - // the exact size of TransferBatch. We need to cache fewer batches than user - // chunks, so this number can be small. - if (UNLIKELY(class_id == kBatchClassID)) - return 16; - if (UNLIKELY(class_id == 0)) + static uptr MaxCachedHint(uptr size) { + DCHECK_LE(size, kMaxSize); + if (UNLIKELY(size == 0)) return 0; - uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id); - return Max<uptr>(1, Min(kMaxNumCachedHint, n)); + uptr n; + // Force a 32-bit division if the template parameters allow for it. + if (kMaxBytesCachedLog > 31 || kMaxSizeLog > 31) + n = (1UL << kMaxBytesCachedLog) / size; + else + n = (1U << kMaxBytesCachedLog) / static_cast<u32>(size); + return Max<uptr>(1U, Min(kMaxNumCachedHint, n)); } static void Print() { @@ -190,12 +191,12 @@ class SizeClassMap { uptr d = s - prev_s; uptr p = prev_s ? (d * 100 / prev_s) : 0; uptr l = s ? MostSignificantSetBitIndex(s) : 0; - uptr cached = MaxCachedHint(i) * s; + uptr cached = MaxCachedHint(s) * s; if (i == kBatchClassID) d = p = l = 0; Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd " "cached: %zd %zd; id %zd\n", - i, Size(i), d, p, l, MaxCachedHint(i), cached, ClassID(s)); + i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s)); total_cached += cached; prev_s = s; } diff --git a/lib/sanitizer_common/sanitizer_allocator_stats.h b/lib/sanitizer_common/sanitizer_allocator_stats.h index 38b088b8446e..76df3ed8de53 100644 --- a/lib/sanitizer_common/sanitizer_allocator_stats.h +++ b/lib/sanitizer_common/sanitizer_allocator_stats.h @@ -101,7 +101,7 @@ class AllocatorGlobalStats : public AllocatorStats { } private: - mutable SpinMutex mu_; + mutable StaticSpinMutex mu_; }; diff --git a/lib/sanitizer_common/sanitizer_atomic_clang_other.h b/lib/sanitizer_common/sanitizer_atomic_clang_other.h index 35e2d007eda8..2eea549c0fdd 100644 --- a/lib/sanitizer_common/sanitizer_atomic_clang_other.h +++ b/lib/sanitizer_common/sanitizer_atomic_clang_other.h @@ -86,7 +86,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { typename T::Type cur; for (;;) { cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v); - if (cmp == v) + if (cur == cmp || cur == v) break; cmp = cur; } diff --git a/lib/sanitizer_common/sanitizer_common.cc b/lib/sanitizer_common/sanitizer_common.cc index ae26d5efc24f..7d72b0cfe4ce 100644 --- a/lib/sanitizer_common/sanitizer_common.cc +++ b/lib/sanitizer_common/sanitizer_common.cc @@ -14,11 +14,10 @@ #include "sanitizer_common.h" #include "sanitizer_allocator_interface.h" #include "sanitizer_allocator_internal.h" +#include "sanitizer_atomic.h" #include "sanitizer_flags.h" #include "sanitizer_libc.h" #include "sanitizer_placement_new.h" -#include "sanitizer_stacktrace_printer.h" -#include "sanitizer_symbolizer.h" namespace __sanitizer { @@ -39,9 +38,10 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type, const char *mmap_type, error_t err, bool raw_report) { static int recursion_count; - if (raw_report || recursion_count) { - // If raw report is requested or we went into recursion, just die. - // The Report() and CHECK calls below may call mmap recursively and fail. + if (SANITIZER_RTEMS || raw_report || recursion_count) { + // If we are on RTEMS or raw report is requested or we went into recursion, + // just die. The Report() and CHECK calls below may call mmap recursively + // and fail. RawWrite("ERROR: Failed to mmap\n"); Die(); } @@ -58,19 +58,6 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type, typedef bool UptrComparisonFunction(const uptr &a, const uptr &b); typedef bool U32ComparisonFunction(const u32 &a, const u32 &b); -template<class T> -static inline bool CompareLess(const T &a, const T &b) { - return a < b; -} - -void SortArray(uptr *array, uptr size) { - InternalSort<uptr*, UptrComparisonFunction>(&array, size, CompareLess); -} - -void SortArray(u32 *array, uptr size) { - InternalSort<u32*, U32ComparisonFunction>(&array, size, CompareLess); -} - const char *StripPathPrefix(const char *filepath, const char *strip_path_prefix) { if (!filepath) return nullptr; @@ -107,18 +94,6 @@ void ReportErrorSummary(const char *error_message, const char *alt_tool_name) { __sanitizer_report_error_summary(buff.data()); } -#if !SANITIZER_GO -void ReportErrorSummary(const char *error_type, const AddressInfo &info, - const char *alt_tool_name) { - if (!common_flags()->print_summary) return; - InternalScopedString buff(kMaxSummaryLength); - buff.append("%s ", error_type); - RenderFrame(&buff, "%L %F", 0, info, common_flags()->symbolize_vs_style, - common_flags()->strip_path_prefix); - ReportErrorSummary(buff.data(), alt_tool_name); -} -#endif - // Removes the ANSI escape sequences from the input string (in-place). void RemoveANSIEscapeSequencesFromString(char *str) { if (!str) @@ -358,6 +333,12 @@ SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_report_error_summary, } SANITIZER_INTERFACE_ATTRIBUTE +int __sanitizer_acquire_crash_state() { + static atomic_uint8_t in_crash_state = {}; + return !atomic_exchange(&in_crash_state, 1, memory_order_relaxed); +} + +SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_set_death_callback(void (*callback)(void)) { SetUserDieCallback(callback); } diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h index 1fbaee7e39a1..3b999edfbe58 100644 --- a/lib/sanitizer_common/sanitizer_common.h +++ b/lib/sanitizer_common/sanitizer_common.h @@ -39,11 +39,7 @@ struct StackTrace; const uptr kWordSize = SANITIZER_WORDSIZE / 8; const uptr kWordSizeInBits = 8 * kWordSize; -#if defined(__powerpc__) || defined(__powerpc64__) - const uptr kCacheLineSize = 128; -#else - const uptr kCacheLineSize = 64; -#endif +const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE; const uptr kMaxPathLength = 4096; @@ -52,7 +48,7 @@ const uptr kMaxThreadStackSize = 1 << 30; // 1Gb static const uptr kErrorMessageBufferSize = 1 << 16; // Denotes fake PC values that come from JIT/JAVA/etc. -// For such PC values __tsan_symbolize_external() will be called. +// For such PC values __tsan_symbolize_external_ex() will be called. const u64 kExternalPCBit = 1ULL << 60; extern const char *SanitizerToolName; // Can be changed by the tool. @@ -92,8 +88,8 @@ void UnmapOrDie(void *addr, uptr size); // Behaves just like MmapOrDie, but tolerates out of memory condition, in that // case returns nullptr. void *MmapOrDieOnFatalError(uptr size, const char *mem_type); -void *MmapFixedNoReserve(uptr fixed_addr, uptr size, - const char *name = nullptr); +bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr) + WARN_UNUSED_RESULT; void *MmapNoReserveOrDie(uptr size, const char *mem_type); void *MmapFixedOrDie(uptr fixed_addr, uptr size); // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in @@ -110,9 +106,11 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, bool MprotectNoAccess(uptr addr, uptr size); bool MprotectReadOnly(uptr addr, uptr size); +void MprotectMallocZones(void *addr, int prot); + // Find an available address space. uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, - uptr *largest_gap_found); + uptr *largest_gap_found, uptr *max_occupied_addr); // Used to check if we can map shadow memory to a fixed location. bool MemoryRangeIsAvailable(uptr range_start, uptr range_end); @@ -122,8 +120,8 @@ void ReleaseMemoryPagesToOS(uptr beg, uptr end); void IncreaseTotalMmap(uptr size); void DecreaseTotalMmap(uptr size); uptr GetRSS(); -void NoHugePagesInRegion(uptr addr, uptr length); -void DontDumpShadowMemory(uptr addr, uptr length); +bool NoHugePagesInRegion(uptr addr, uptr length); +bool DontDumpShadowMemory(uptr addr, uptr length); // Check if the built VMA size matches the runtime one. void CheckVMASize(); void RunMallocHooks(const void *ptr, uptr size); @@ -153,49 +151,6 @@ typedef void (*fill_profile_f)(uptr start, uptr rss, bool file, // |stats_size| elements. void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size); -// InternalScopedBuffer can be used instead of large stack arrays to -// keep frame size low. -// FIXME: use InternalAlloc instead of MmapOrDie once -// InternalAlloc is made libc-free. -template <typename T> -class InternalScopedBuffer { - public: - explicit InternalScopedBuffer(uptr cnt) { - cnt_ = cnt; - ptr_ = (T *)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer"); - } - ~InternalScopedBuffer() { UnmapOrDie(ptr_, cnt_ * sizeof(T)); } - T &operator[](uptr i) { return ptr_[i]; } - T *data() { return ptr_; } - uptr size() { return cnt_ * sizeof(T); } - - private: - T *ptr_; - uptr cnt_; - // Disallow copies and moves. - InternalScopedBuffer(const InternalScopedBuffer &) = delete; - InternalScopedBuffer &operator=(const InternalScopedBuffer &) = delete; - InternalScopedBuffer(InternalScopedBuffer &&) = delete; - InternalScopedBuffer &operator=(InternalScopedBuffer &&) = delete; -}; - -class InternalScopedString : public InternalScopedBuffer<char> { - public: - explicit InternalScopedString(uptr max_length) - : InternalScopedBuffer<char>(max_length), length_(0) { - (*this)[0] = '\0'; - } - uptr length() { return length_; } - void clear() { - (*this)[0] = '\0'; - length_ = 0; - } - void append(const char *format, ...); - - private: - uptr length_; -}; - // Simple low-level (mmap-based) allocator for internal use. Doesn't have // constructor, so all instances of LowLevelAllocator should be // linker initialized. @@ -243,15 +198,6 @@ class ScopedErrorReportLock { extern uptr stoptheworld_tracer_pid; extern uptr stoptheworld_tracer_ppid; -// Opens the file 'file_name" and reads up to 'max_len' bytes. -// The resulting buffer is mmaped and stored in '*buff'. -// The size of the mmaped region is stored in '*buff_size'. -// The total number of read bytes is stored in '*read_len'. -// Returns true if file was successfully opened and read. -bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, - uptr *read_len, uptr max_len = 1 << 26, - error_t *errno_p = nullptr); - bool IsAccessibleMemoryRange(uptr beg, uptr size); // Error report formatting. @@ -275,6 +221,7 @@ bool SetEnv(const char *name, const char *value); u32 GetUid(); void ReExec(); +void CheckASLR(); char **GetArgv(); void PrintCmdline(); bool StackSizeIsUnlimited(); @@ -283,7 +230,7 @@ void SetStackSizeLimitInBytes(uptr limit); bool AddressSpaceIsUnlimited(); void SetAddressSpaceUnlimited(); void AdjustStackSize(void *attr); -void PrepareForSandboxing(__sanitizer_sandbox_arguments *args); +void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args); void SetSandboxingCallback(void (*f)()); void InitializeCoverage(bool enabled, const char *coverage_dir); @@ -297,8 +244,6 @@ void SleepForMillis(int millis); u64 NanoTime(); u64 MonotonicNanoTime(); int Atexit(void (*function)(void)); -void SortArray(uptr *array, uptr size); -void SortArray(u32 *array, uptr size); bool TemplateMatch(const char *templ, const char *str); // Exit @@ -310,13 +255,6 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type, const char *mmap_type, error_t err, bool raw_report = false); -// Set the name of the current thread to 'name', return true on succees. -// The name may be truncated to a system-dependent limit. -bool SanitizerSetThreadName(const char *name); -// Get the name of the current thread (no more than max_len bytes), -// return true on succees. name should have space for at least max_len+1 bytes. -bool SanitizerGetThreadName(char *name, int max_len); - // Specific tools may override behavior of "Die" and "CheckFailed" functions // to do tool-specific job. typedef void (*DieCallbackType)(void); @@ -382,6 +320,8 @@ void ReportErrorSummary(const char *error_type, const AddressInfo &info, void ReportErrorSummary(const char *error_type, const StackTrace *trace, const char *alt_tool_name = nullptr); +void ReportMmapWriteExec(int prot); + // Math #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__) extern "C" { @@ -489,13 +429,12 @@ template<typename T> class InternalMmapVectorNoCtor { public: void Initialize(uptr initial_capacity) { - capacity_ = Max(initial_capacity, (uptr)1); + capacity_bytes_ = 0; size_ = 0; - data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor"); - } - void Destroy() { - UnmapOrDie(data_, capacity_ * sizeof(T)); + data_ = 0; + reserve(initial_capacity); } + void Destroy() { UnmapOrDie(data_, capacity_bytes_); } T &operator[](uptr i) { CHECK_LT(i, size_); return data_[i]; @@ -505,10 +444,10 @@ class InternalMmapVectorNoCtor { return data_[i]; } void push_back(const T &element) { - CHECK_LE(size_, capacity_); - if (size_ == capacity_) { + CHECK_LE(size_, capacity()); + if (size_ == capacity()) { uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1); - Resize(new_capacity); + Realloc(new_capacity); } internal_memcpy(&data_[size_++], &element, sizeof(T)); } @@ -529,12 +468,15 @@ class InternalMmapVectorNoCtor { T *data() { return data_; } - uptr capacity() const { - return capacity_; + uptr capacity() const { return capacity_bytes_ / sizeof(T); } + void reserve(uptr new_size) { + // Never downsize internal buffer. + if (new_size > capacity()) + Realloc(new_size); } void resize(uptr new_size) { - Resize(new_size); if (new_size > size_) { + reserve(new_size); internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_)); } size_ = new_size; @@ -556,39 +498,84 @@ class InternalMmapVectorNoCtor { return data() + size(); } + void swap(InternalMmapVectorNoCtor &other) { + Swap(data_, other.data_); + Swap(capacity_bytes_, other.capacity_bytes_); + Swap(size_, other.size_); + } + private: - void Resize(uptr new_capacity) { + void Realloc(uptr new_capacity) { CHECK_GT(new_capacity, 0); CHECK_LE(size_, new_capacity); - T *new_data = (T *)MmapOrDie(new_capacity * sizeof(T), - "InternalMmapVector"); + uptr new_capacity_bytes = + RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached()); + T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector"); internal_memcpy(new_data, data_, size_ * sizeof(T)); - T *old_data = data_; + UnmapOrDie(data_, capacity_bytes_); data_ = new_data; - UnmapOrDie(old_data, capacity_ * sizeof(T)); - capacity_ = new_capacity; + capacity_bytes_ = new_capacity_bytes; } T *data_; - uptr capacity_; + uptr capacity_bytes_; uptr size_; }; +template <typename T> +bool operator==(const InternalMmapVectorNoCtor<T> &lhs, + const InternalMmapVectorNoCtor<T> &rhs) { + if (lhs.size() != rhs.size()) return false; + return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0; +} + +template <typename T> +bool operator!=(const InternalMmapVectorNoCtor<T> &lhs, + const InternalMmapVectorNoCtor<T> &rhs) { + return !(lhs == rhs); +} + template<typename T> class InternalMmapVector : public InternalMmapVectorNoCtor<T> { public: - explicit InternalMmapVector(uptr initial_capacity) { - InternalMmapVectorNoCtor<T>::Initialize(initial_capacity); + InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(1); } + explicit InternalMmapVector(uptr cnt) { + InternalMmapVectorNoCtor<T>::Initialize(cnt); + this->resize(cnt); } ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); } - // Disallow evil constructors. - InternalMmapVector(const InternalMmapVector&); - void operator=(const InternalMmapVector&); + // Disallow copies and moves. + InternalMmapVector(const InternalMmapVector &) = delete; + InternalMmapVector &operator=(const InternalMmapVector &) = delete; + InternalMmapVector(InternalMmapVector &&) = delete; + InternalMmapVector &operator=(InternalMmapVector &&) = delete; +}; + +class InternalScopedString : public InternalMmapVector<char> { + public: + explicit InternalScopedString(uptr max_length) + : InternalMmapVector<char>(max_length), length_(0) { + (*this)[0] = '\0'; + } + uptr length() { return length_; } + void clear() { + (*this)[0] = '\0'; + length_ = 0; + } + void append(const char *format, ...); + + private: + uptr length_; +}; + +template <class T> +struct CompareLess { + bool operator()(const T &a, const T &b) const { return a < b; } }; // HeapSort for arrays and InternalMmapVector. -template<class Container, class Compare> -void InternalSort(Container *v, uptr size, Compare comp) { +template <class T, class Compare = CompareLess<T>> +void Sort(T *v, uptr size, Compare comp = {}) { if (size < 2) return; // Stage 1: insert elements to the heap. @@ -596,8 +583,8 @@ void InternalSort(Container *v, uptr size, Compare comp) { uptr j, p; for (j = i; j > 0; j = p) { p = (j - 1) / 2; - if (comp((*v)[p], (*v)[j])) - Swap((*v)[j], (*v)[p]); + if (comp(v[p], v[j])) + Swap(v[j], v[p]); else break; } @@ -605,18 +592,18 @@ void InternalSort(Container *v, uptr size, Compare comp) { // Stage 2: swap largest element with the last one, // and sink the new top. for (uptr i = size - 1; i > 0; i--) { - Swap((*v)[0], (*v)[i]); + Swap(v[0], v[i]); uptr j, max_ind; for (j = 0; j < i; j = max_ind) { uptr left = 2 * j + 1; uptr right = 2 * j + 2; max_ind = j; - if (left < i && comp((*v)[max_ind], (*v)[left])) + if (left < i && comp(v[max_ind], v[left])) max_ind = left; - if (right < i && comp((*v)[max_ind], (*v)[right])) + if (right < i && comp(v[max_ind], v[right])) max_ind = right; if (max_ind != j) - Swap((*v)[j], (*v)[max_ind]); + Swap(v[j], v[max_ind]); else break; } @@ -650,6 +637,25 @@ enum ModuleArch { kModuleArchARM64 }; +// Opens the file 'file_name" and reads up to 'max_len' bytes. +// The resulting buffer is mmaped and stored in '*buff'. +// Returns true if file was successfully opened and read. +bool ReadFileToVector(const char *file_name, + InternalMmapVectorNoCtor<char> *buff, + uptr max_len = 1 << 26, error_t *errno_p = nullptr); + +// Opens the file 'file_name" and reads up to 'max_len' bytes. +// This function is less I/O efficient than ReadFileToVector as it may reread +// file multiple times to avoid mmap during read attempts. It's used to read +// procmap, so short reads with mmap in between can produce inconsistent result. +// The resulting buffer is mmaped and stored in '*buff'. +// The size of the mmaped region is stored in '*buff_size'. +// The total number of read bytes is stored in '*read_len'. +// Returns true if file was successfully opened and read. +bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, + uptr *read_len, uptr max_len = 1 << 26, + error_t *errno_p = nullptr); + // When adding a new architecture, don't forget to also update // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cc. inline const char *ModuleArchToString(ModuleArch arch) { diff --git a/lib/sanitizer_common/sanitizer_common_interceptors.inc b/lib/sanitizer_common/sanitizer_common_interceptors.inc index 32342682a595..ff18bc801749 100644 --- a/lib/sanitizer_common/sanitizer_common_interceptors.inc +++ b/lib/sanitizer_common/sanitizer_common_interceptors.inc @@ -34,6 +34,7 @@ // COMMON_INTERCEPTOR_MEMSET_IMPL // COMMON_INTERCEPTOR_MEMMOVE_IMPL // COMMON_INTERCEPTOR_MEMCPY_IMPL +// COMMON_INTERCEPTOR_MMAP_IMPL // COMMON_INTERCEPTOR_COPY_STRING // COMMON_INTERCEPTOR_STRNDUP_IMPL //===----------------------------------------------------------------------===// @@ -75,6 +76,7 @@ #define clock_settime __clock_settime50 #define ctime __ctime50 #define ctime_r __ctime_r50 +#define devname __devname50 #define getitimer __getitimer50 #define getpwent __getpwent50 #define getpwnam __getpwnam50 @@ -88,8 +90,10 @@ #define glob __glob30 #define gmtime __gmtime50 #define gmtime_r __gmtime_r50 +#define localtime __locatime50 #define localtime_r __localtime_r50 #define mktime __mktime50 +#define lstat __lstat50 #define opendir __opendir30 #define readdir __readdir30 #define readdir_r __readdir_r30 @@ -107,6 +111,9 @@ #define times __times13 #define wait3 __wait350 #define wait4 __wait450 +extern const unsigned short *_ctype_tab_; +extern const short *_toupper_tab_; +extern const short *_tolower_tab_; #endif // Platform-specific options. @@ -261,6 +268,12 @@ bool PlatformHasDifferentMemcpyAndMemmove(); } #endif +#ifndef COMMON_INTERCEPTOR_MMAP_IMPL +#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \ + off) \ + { return REAL(mmap)(addr, sz, prot, flags, fd, off); } +#endif + #ifndef COMMON_INTERCEPTOR_COPY_STRING #define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) {} #endif @@ -1176,6 +1189,50 @@ INTERCEPTOR(SSIZE_T, pwritev64, int fd, __sanitizer_iovec *iov, int iovcnt, #define INIT_PWRITEV64 #endif +#if SANITIZER_INTERCEPT_FGETS +INTERCEPTOR(char *, fgets, char *s, SIZE_T size, void *file) { + // libc file streams can call user-supplied functions, see fopencookie. + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, fgets, s, size, file); + // FIXME: under ASan the call below may write to freed memory and corrupt + // its metadata. See + // https://github.com/google/sanitizers/issues/321. + char *res = REAL(fgets)(s, size, file); + if (res) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1); + return res; +} +#define INIT_FGETS COMMON_INTERCEPT_FUNCTION(fgets) +#else +#define INIT_FGETS +#endif + +#if SANITIZER_INTERCEPT_FPUTS +INTERCEPTOR_WITH_SUFFIX(int, fputs, char *s, void *file) { + // libc file streams can call user-supplied functions, see fopencookie. + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, fputs, s, file); + COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1); + return REAL(fputs)(s, file); +} +#define INIT_FPUTS COMMON_INTERCEPT_FUNCTION(fputs) +#else +#define INIT_FPUTS +#endif + +#if SANITIZER_INTERCEPT_PUTS +INTERCEPTOR(int, puts, char *s) { + // libc file streams can call user-supplied functions, see fopencookie. + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, puts, s); + COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1); + return REAL(puts)(s); +} +#define INIT_PUTS COMMON_INTERCEPT_FUNCTION(puts) +#else +#define INIT_PUTS +#endif + #if SANITIZER_INTERCEPT_PRCTL INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3, // NOLINT @@ -1703,6 +1760,7 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size, #if SANITIZER_INTERCEPT_IOCTL #include "sanitizer_common_interceptors_ioctl.inc" +#include "sanitizer_interceptors_ioctl_netbsd.inc" INTERCEPTOR(int, ioctl, int d, unsigned long request, ...) { // We need a frame pointer, because we call into ioctl_common_[pre|post] which // can trigger a report and we need to be able to unwind through this @@ -2125,8 +2183,19 @@ INTERCEPTOR(int, getitimer, int which, void *curr_value) { INTERCEPTOR(int, setitimer, int which, const void *new_value, void *old_value) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, setitimer, which, new_value, old_value); - if (new_value) - COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerval_sz); + if (new_value) { + // itimerval can contain padding that may be legitimately uninitialized + const struct __sanitizer_itimerval *nv = + (const struct __sanitizer_itimerval *)new_value; + COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_interval.tv_sec, + sizeof(__sanitizer_time_t)); + COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_interval.tv_usec, + sizeof(__sanitizer_suseconds_t)); + COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_value.tv_sec, + sizeof(__sanitizer_time_t)); + COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_value.tv_usec, + sizeof(__sanitizer_suseconds_t)); + } // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://github.com/google/sanitizers/issues/321. @@ -2765,6 +2834,30 @@ INTERCEPTOR(int, accept4, int fd, void *addr, unsigned *addrlen, int f) { #define INIT_ACCEPT4 #endif +#if SANITIZER_INTERCEPT_PACCEPT +INTERCEPTOR(int, paccept, int fd, void *addr, unsigned *addrlen, + __sanitizer_sigset_t *set, int f) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, paccept, fd, addr, addrlen, set, f); + unsigned addrlen0 = 0; + if (addrlen) { + COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen)); + addrlen0 = *addrlen; + } + if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set)); + int fd2 = REAL(paccept)(fd, addr, addrlen, set, f); + if (fd2 >= 0) { + if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2); + if (addr && addrlen) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(*addrlen, addrlen0)); + } + return fd2; +} +#define INIT_PACCEPT COMMON_INTERCEPT_FUNCTION(paccept); +#else +#define INIT_PACCEPT +#endif + #if SANITIZER_INTERCEPT_MODF INTERCEPTOR(double, modf, double x, double *iptr) { void *ctx; @@ -2810,7 +2903,7 @@ INTERCEPTOR(long double, modfl, long double x, long double *iptr) { #define INIT_MODF #endif -#if SANITIZER_INTERCEPT_RECVMSG +#if SANITIZER_INTERCEPT_RECVMSG || SANITIZER_INTERCEPT_RECVMMSG static void write_msghdr(void *ctx, struct __sanitizer_msghdr *msg, SSIZE_T maxlen) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg, sizeof(*msg)); @@ -2823,7 +2916,9 @@ static void write_msghdr(void *ctx, struct __sanitizer_msghdr *msg, if (msg->msg_control && msg->msg_controllen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg->msg_control, msg->msg_controllen); } +#endif +#if SANITIZER_INTERCEPT_RECVMSG INTERCEPTOR(SSIZE_T, recvmsg, int fd, struct __sanitizer_msghdr *msg, int flags) { void *ctx; @@ -2846,7 +2941,30 @@ INTERCEPTOR(SSIZE_T, recvmsg, int fd, struct __sanitizer_msghdr *msg, #define INIT_RECVMSG #endif -#if SANITIZER_INTERCEPT_SENDMSG +#if SANITIZER_INTERCEPT_RECVMMSG +INTERCEPTOR(int, recvmmsg, int fd, struct __sanitizer_mmsghdr *msgvec, + unsigned int vlen, int flags, void *timeout) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, recvmmsg, fd, msgvec, vlen, flags, timeout); + if (timeout) COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout, struct_timespec_sz); + int res = REAL(recvmmsg)(fd, msgvec, vlen, flags, timeout); + if (res >= 0) { + if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd); + for (int i = 0; i < res; ++i) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &msgvec[i].msg_len, + sizeof(msgvec[i].msg_len)); + write_msghdr(ctx, &msgvec[i].msg_hdr, msgvec[i].msg_len); + COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, &msgvec[i].msg_hdr); + } + } + return res; +} +#define INIT_RECVMMSG COMMON_INTERCEPT_FUNCTION(recvmmsg); +#else +#define INIT_RECVMMSG +#endif + +#if SANITIZER_INTERCEPT_SENDMSG || SANITIZER_INTERCEPT_SENDMMSG static void read_msghdr_control(void *ctx, void *control, uptr controllen) { const unsigned kCmsgDataOffset = RoundUpTo(sizeof(__sanitizer_cmsghdr), sizeof(uptr)); @@ -2896,7 +3014,9 @@ static void read_msghdr(void *ctx, struct __sanitizer_msghdr *msg, if (msg->msg_control && msg->msg_controllen) read_msghdr_control(ctx, msg->msg_control, msg->msg_controllen); } +#endif +#if SANITIZER_INTERCEPT_SENDMSG INTERCEPTOR(SSIZE_T, sendmsg, int fd, struct __sanitizer_msghdr *msg, int flags) { void *ctx; @@ -2915,6 +3035,30 @@ INTERCEPTOR(SSIZE_T, sendmsg, int fd, struct __sanitizer_msghdr *msg, #define INIT_SENDMSG #endif +#if SANITIZER_INTERCEPT_SENDMMSG +INTERCEPTOR(int, sendmmsg, int fd, struct __sanitizer_mmsghdr *msgvec, + unsigned vlen, int flags) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, sendmmsg, fd, msgvec, vlen, flags); + if (fd >= 0) { + COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd); + COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd); + } + int res = REAL(sendmmsg)(fd, msgvec, vlen, flags); + if (res >= 0 && msgvec) + for (int i = 0; i < res; ++i) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &msgvec[i].msg_len, + sizeof(msgvec[i].msg_len)); + if (common_flags()->intercept_send) + read_msghdr(ctx, &msgvec[i].msg_hdr, msgvec[i].msg_len); + } + return res; +} +#define INIT_SENDMMSG COMMON_INTERCEPT_FUNCTION(sendmmsg); +#else +#define INIT_SENDMMSG +#endif + #if SANITIZER_INTERCEPT_GETPEERNAME INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) { void *ctx; @@ -3096,13 +3240,25 @@ INTERCEPTOR(uptr, ptrace, int request, int pid, void *addr, void *data) { #endif #if SANITIZER_INTERCEPT_SETLOCALE +static void unpoison_ctype_arrays(void *ctx) { +#if SANITIZER_NETBSD + // These arrays contain 256 regular elements in unsigned char range + 1 EOF + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, _ctype_tab_, 257 * sizeof(short)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, _toupper_tab_, 257 * sizeof(short)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, _tolower_tab_, 257 * sizeof(short)); +#endif +} + INTERCEPTOR(char *, setlocale, int category, char *locale) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, setlocale, category, locale); if (locale) COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, REAL(strlen)(locale) + 1); char *res = REAL(setlocale)(category, locale); - if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); + if (res) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1); + unpoison_ctype_arrays(ctx); + } return res; } @@ -3485,7 +3641,8 @@ INTERCEPTOR(char *, strerror, int errnum) { // * GNU version returns message pointer, which points to either buf or some // static storage. #if ((_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE) || \ - SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD + SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD || \ + SANITIZER_FREEBSD || SANITIZER_OPENBSD // POSIX version. Spec is not clear on whether buf is NULL-terminated. // At least on OSX, buf contents are valid even when the call fails. INTERCEPTOR(int, strerror_r, int errnum, char *buf, SIZE_T buflen) { @@ -3908,7 +4065,7 @@ INTERCEPTOR(void, _exit, int status) { #define INIT__EXIT #endif -#if SANITIZER_INTERCEPT_PHTREAD_MUTEX +#if SANITIZER_INTERCEPT_PTHREAD_MUTEX INTERCEPTOR(int, pthread_mutex_lock, void *m) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_lock, m); @@ -3941,13 +4098,42 @@ INTERCEPTOR(int, pthread_mutex_unlock, void *m) { #define INIT_PTHREAD_MUTEX_UNLOCK #endif -#if SANITIZER_NETBSD -INTERCEPTOR(int, __libc_mutex_lock, void *m) \ - ALIAS(WRAPPER_NAME(pthread_mutex_lock)); -INTERCEPTOR(int, __libc_mutex_unlock, void *m) \ - ALIAS(WRAPPER_NAME(pthread_mutex_unlock)); -INTERCEPTOR(int, __libc_thr_setcancelstate, int state, int *oldstate) \ - ALIAS(WRAPPER_NAME(pthread_setcancelstate)); +#if SANITIZER_INTERCEPT___PTHREAD_MUTEX +INTERCEPTOR(int, __pthread_mutex_lock, void *m) { + return WRAP(pthread_mutex_lock)(m); +} + +INTERCEPTOR(int, __pthread_mutex_unlock, void *m) { + return WRAP(pthread_mutex_unlock)(m); +} + +#define INIT___PTHREAD_MUTEX_LOCK \ + COMMON_INTERCEPT_FUNCTION(__pthread_mutex_lock) +#define INIT___PTHREAD_MUTEX_UNLOCK \ + COMMON_INTERCEPT_FUNCTION(__pthread_mutex_unlock) +#else +#define INIT___PTHREAD_MUTEX_LOCK +#define INIT___PTHREAD_MUTEX_UNLOCK +#endif + +#if SANITIZER_INTERCEPT___LIBC_MUTEX +INTERCEPTOR(int, __libc_mutex_lock, void *m) +ALIAS(WRAPPER_NAME(pthread_mutex_lock)); + +INTERCEPTOR(int, __libc_mutex_unlock, void *m) +ALIAS(WRAPPER_NAME(pthread_mutex_unlock)); + +INTERCEPTOR(int, __libc_thr_setcancelstate, int state, int *oldstate) +ALIAS(WRAPPER_NAME(pthread_setcancelstate)); + +#define INIT___LIBC_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(__libc_mutex_lock) +#define INIT___LIBC_MUTEX_UNLOCK COMMON_INTERCEPT_FUNCTION(__libc_mutex_unlock) +#define INIT___LIBC_THR_SETCANCELSTATE \ + COMMON_INTERCEPT_FUNCTION(__libc_thr_setcancelstate) +#else +#define INIT___LIBC_MUTEX_LOCK +#define INIT___LIBC_MUTEX_UNLOCK +#define INIT___LIBC_THR_SETCANCELSTATE #endif #if SANITIZER_INTERCEPT_GETMNTENT || SANITIZER_INTERCEPT_GETMNTENT_R @@ -4302,8 +4488,6 @@ INTERCEPTOR(int, random_r, void *buf, u32 *result) { #if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET INTERCEPTOR_PTHREAD_ATTR_GET(detachstate, sizeof(int)) INTERCEPTOR_PTHREAD_ATTR_GET(guardsize, sizeof(SIZE_T)) -INTERCEPTOR_PTHREAD_ATTR_GET(schedparam, struct_sched_param_sz) -INTERCEPTOR_PTHREAD_ATTR_GET(schedpolicy, sizeof(int)) INTERCEPTOR_PTHREAD_ATTR_GET(scope, sizeof(int)) INTERCEPTOR_PTHREAD_ATTR_GET(stacksize, sizeof(SIZE_T)) INTERCEPTOR(int, pthread_attr_getstack, void *attr, void **addr, SIZE_T *size) { @@ -4334,8 +4518,6 @@ int real_pthread_attr_getstack(void *attr, void **addr, SIZE_T *size) { #define INIT_PTHREAD_ATTR_GET \ COMMON_INTERCEPT_FUNCTION(pthread_attr_getdetachstate); \ COMMON_INTERCEPT_FUNCTION(pthread_attr_getguardsize); \ - COMMON_INTERCEPT_FUNCTION(pthread_attr_getschedparam); \ - COMMON_INTERCEPT_FUNCTION(pthread_attr_getschedpolicy); \ COMMON_INTERCEPT_FUNCTION(pthread_attr_getscope); \ COMMON_INTERCEPT_FUNCTION(pthread_attr_getstacksize); \ COMMON_INTERCEPT_FUNCTION(pthread_attr_getstack); @@ -4343,6 +4525,17 @@ int real_pthread_attr_getstack(void *attr, void **addr, SIZE_T *size) { #define INIT_PTHREAD_ATTR_GET #endif +#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED +INTERCEPTOR_PTHREAD_ATTR_GET(schedparam, struct_sched_param_sz) +INTERCEPTOR_PTHREAD_ATTR_GET(schedpolicy, sizeof(int)) + +#define INIT_PTHREAD_ATTR_GET_SCHED \ + COMMON_INTERCEPT_FUNCTION(pthread_attr_getschedparam); \ + COMMON_INTERCEPT_FUNCTION(pthread_attr_getschedpolicy); +#else +#define INIT_PTHREAD_ATTR_GET_SCHED +#endif + #if SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED INTERCEPTOR_PTHREAD_ATTR_GET(inheritsched, sizeof(int)) @@ -6189,6 +6382,22 @@ INTERCEPTOR(int, stat, const char *path, void *buf) { #define INIT_STAT #endif +#if SANITIZER_INTERCEPT_LSTAT +INTERCEPTOR(int, lstat, const char *path, void *buf) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, lstat, path, buf); + if (common_flags()->intercept_stat) + COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0); + int res = REAL(lstat)(path, buf); + if (!res) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz); + return res; +} +#define INIT_LSTAT COMMON_INTERCEPT_FUNCTION(lstat) +#else +#define INIT_LSTAT +#endif + #if SANITIZER_INTERCEPT___XSTAT INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) { void *ctx; @@ -6401,10 +6610,636 @@ INTERCEPTOR(wchar_t *, wcsncat, wchar_t *dst, const wchar_t *src, SIZE_T n) { #define INIT_WCSCAT #endif +#if SANITIZER_INTERCEPT_STRXFRM +static SIZE_T RealStrLen(const char *str) { return REAL(strlen)(str); } + +static SIZE_T RealStrLen(const wchar_t *str) { return REAL(wcslen)(str); } + +#define STRXFRM_INTERCEPTOR_IMPL(strxfrm, dest, src, len, ...) \ + { \ + void *ctx; \ + COMMON_INTERCEPTOR_ENTER(ctx, strxfrm, dest, src, len, ##__VA_ARGS__); \ + COMMON_INTERCEPTOR_READ_RANGE(ctx, src, \ + sizeof(*src) * (RealStrLen(src) + 1)); \ + SIZE_T res = REAL(strxfrm)(dest, src, len, ##__VA_ARGS__); \ + if (res < len) \ + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, sizeof(*src) * (res + 1)); \ + return res; \ + } + +INTERCEPTOR(SIZE_T, strxfrm, char *dest, const char *src, SIZE_T len) { + STRXFRM_INTERCEPTOR_IMPL(strxfrm, dest, src, len); +} + +INTERCEPTOR(SIZE_T, strxfrm_l, char *dest, const char *src, SIZE_T len, + void *locale) { + STRXFRM_INTERCEPTOR_IMPL(strxfrm_l, dest, src, len, locale); +} + +#define INIT_STRXFRM \ + COMMON_INTERCEPT_FUNCTION(strxfrm); \ + COMMON_INTERCEPT_FUNCTION(strxfrm_l); +#else +#define INIT_STRXFRM +#endif + +#if SANITIZER_INTERCEPT___STRXFRM_L +INTERCEPTOR(SIZE_T, __strxfrm_l, char *dest, const char *src, SIZE_T len, + void *locale) { + STRXFRM_INTERCEPTOR_IMPL(__strxfrm_l, dest, src, len, locale); +} + +#define INIT___STRXFRM_L COMMON_INTERCEPT_FUNCTION(__strxfrm_l); +#else +#define INIT___STRXFRM_L +#endif + +#if SANITIZER_INTERCEPT_WCSXFRM +INTERCEPTOR(SIZE_T, wcsxfrm, wchar_t *dest, const wchar_t *src, SIZE_T len) { + STRXFRM_INTERCEPTOR_IMPL(wcsxfrm, dest, src, len); +} + +INTERCEPTOR(SIZE_T, wcsxfrm_l, wchar_t *dest, const wchar_t *src, SIZE_T len, + void *locale) { + STRXFRM_INTERCEPTOR_IMPL(wcsxfrm_l, dest, src, len, locale); +} + +#define INIT_WCSXFRM \ + COMMON_INTERCEPT_FUNCTION(wcsxfrm); \ + COMMON_INTERCEPT_FUNCTION(wcsxfrm_l); +#else +#define INIT_WCSXFRM +#endif + +#if SANITIZER_INTERCEPT___WCSXFRM_L +INTERCEPTOR(SIZE_T, __wcsxfrm_l, wchar_t *dest, const wchar_t *src, SIZE_T len, + void *locale) { + STRXFRM_INTERCEPTOR_IMPL(__wcsxfrm_l, dest, src, len, locale); +} + +#define INIT___WCSXFRM_L COMMON_INTERCEPT_FUNCTION(__wcsxfrm_l); +#else +#define INIT___WCSXFRM_L +#endif + +#if SANITIZER_INTERCEPT_ACCT +INTERCEPTOR(int, acct, const char *file) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, acct, file); + if (file) + COMMON_INTERCEPTOR_READ_RANGE(ctx, file, REAL(strlen)(file) + 1); + return REAL(acct)(file); +} +#define INIT_ACCT COMMON_INTERCEPT_FUNCTION(acct) +#else +#define INIT_ACCT +#endif + +#if SANITIZER_INTERCEPT_USER_FROM_UID +INTERCEPTOR(const char *, user_from_uid, u32 uid, int nouser) { + void *ctx; + const char *user; + COMMON_INTERCEPTOR_ENTER(ctx, user_from_uid, uid, nouser); + user = REAL(user_from_uid)(uid, nouser); + if (user) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, user, REAL(strlen)(user) + 1); + return user; +} +#define INIT_USER_FROM_UID COMMON_INTERCEPT_FUNCTION(user_from_uid) +#else +#define INIT_USER_FROM_UID +#endif + +#if SANITIZER_INTERCEPT_UID_FROM_USER +INTERCEPTOR(int, uid_from_user, const char *name, u32 *uid) { + void *ctx; + int res; + COMMON_INTERCEPTOR_ENTER(ctx, uid_from_user, name, uid); + if (name) + COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); + res = REAL(uid_from_user)(name, uid); + if (uid) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, uid, sizeof(*uid)); + return res; +} +#define INIT_UID_FROM_USER COMMON_INTERCEPT_FUNCTION(uid_from_user) +#else +#define INIT_UID_FROM_USER +#endif + +#if SANITIZER_INTERCEPT_GROUP_FROM_GID +INTERCEPTOR(const char *, group_from_gid, u32 gid, int nogroup) { + void *ctx; + const char *group; + COMMON_INTERCEPTOR_ENTER(ctx, group_from_gid, gid, nogroup); + group = REAL(group_from_gid)(gid, nogroup); + if (group) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, group, REAL(strlen)(group) + 1); + return group; +} +#define INIT_GROUP_FROM_GID COMMON_INTERCEPT_FUNCTION(group_from_gid) +#else +#define INIT_GROUP_FROM_GID +#endif + +#if SANITIZER_INTERCEPT_GID_FROM_GROUP +INTERCEPTOR(int, gid_from_group, const char *group, u32 *gid) { + void *ctx; + int res; + COMMON_INTERCEPTOR_ENTER(ctx, gid_from_group, group, gid); + if (group) + COMMON_INTERCEPTOR_READ_RANGE(ctx, group, REAL(strlen)(group) + 1); + res = REAL(gid_from_group)(group, gid); + if (gid) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, gid, sizeof(*gid)); + return res; +} +#define INIT_GID_FROM_GROUP COMMON_INTERCEPT_FUNCTION(gid_from_group) +#else +#define INIT_GID_FROM_GROUP +#endif + +#if SANITIZER_INTERCEPT_ACCESS +INTERCEPTOR(int, access, const char *path, int mode) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, access, path, mode); + if (path) + COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); + return REAL(access)(path, mode); +} +#define INIT_ACCESS COMMON_INTERCEPT_FUNCTION(access) +#else +#define INIT_ACCESS +#endif + +#if SANITIZER_INTERCEPT_FACCESSAT +INTERCEPTOR(int, faccessat, int fd, const char *path, int mode, int flags) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, faccessat, fd, path, mode, flags); + if (path) + COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); + return REAL(faccessat)(fd, path, mode, flags); +} +#define INIT_FACCESSAT COMMON_INTERCEPT_FUNCTION(faccessat) +#else +#define INIT_FACCESSAT +#endif + +#if SANITIZER_INTERCEPT_GETGROUPLIST +INTERCEPTOR(int, getgrouplist, const char *name, u32 basegid, u32 *groups, + int *ngroups) { + void *ctx; + int res; + COMMON_INTERCEPTOR_ENTER(ctx, getgrouplist, name, basegid, groups, ngroups); + if (name) + COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); + if (ngroups) + COMMON_INTERCEPTOR_READ_RANGE(ctx, ngroups, sizeof(*ngroups)); + res = REAL(getgrouplist)(name, basegid, groups, ngroups); + if (!res && groups && ngroups) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, groups, sizeof(*groups) * (*ngroups)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ngroups, sizeof(*ngroups)); + } + return res; +} + +#define INIT_GETGROUPLIST COMMON_INTERCEPT_FUNCTION(getgrouplist); +#else +#define INIT_GETGROUPLIST +#endif + +#if SANITIZER_INTERCEPT_GETGROUPMEMBERSHIP +INTERCEPTOR(int, getgroupmembership, const char *name, u32 basegid, u32 *groups, + int maxgrp, int *ngroups) { + void *ctx; + int res; + COMMON_INTERCEPTOR_ENTER(ctx, getgroupmembership, name, basegid, groups, + maxgrp, ngroups); + if (name) + COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); + res = REAL(getgroupmembership)(name, basegid, groups, maxgrp, ngroups); + if (!res && groups && ngroups) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, groups, sizeof(*groups) * (*ngroups)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ngroups, sizeof(*ngroups)); + } + return res; +} + +#define INIT_GETGROUPMEMBERSHIP COMMON_INTERCEPT_FUNCTION(getgroupmembership); +#else +#define INIT_GETGROUPMEMBERSHIP +#endif + +#if SANITIZER_INTERCEPT_READLINK +INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) { + void* ctx; + COMMON_INTERCEPTOR_ENTER(ctx, readlink, path, buf, bufsiz); + COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); + SSIZE_T res = REAL(readlink)(path, buf, bufsiz); + if (res > 0) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res); + return res; +} + +#define INIT_READLINK COMMON_INTERCEPT_FUNCTION(readlink) +#else +#define INIT_READLINK +#endif + +#if SANITIZER_INTERCEPT_READLINKAT +INTERCEPTOR(SSIZE_T, readlinkat, int dirfd, const char *path, char *buf, + SIZE_T bufsiz) { + void* ctx; + COMMON_INTERCEPTOR_ENTER(ctx, readlinkat, dirfd, path, buf, bufsiz); + COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); + SSIZE_T res = REAL(readlinkat)(dirfd, path, buf, bufsiz); + if (res > 0) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res); + return res; +} + +#define INIT_READLINKAT COMMON_INTERCEPT_FUNCTION(readlinkat) +#else +#define INIT_READLINKAT +#endif + +#if SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT +INTERCEPTOR(int, name_to_handle_at, int dirfd, const char *pathname, + struct file_handle *handle, int *mount_id, int flags) { + void* ctx; + COMMON_INTERCEPTOR_ENTER(ctx, name_to_handle_at, dirfd, pathname, handle, + mount_id, flags); + COMMON_INTERCEPTOR_READ_RANGE(ctx, pathname, REAL(strlen)(pathname) + 1); + + __sanitizer_file_handle *sanitizer_handle = + reinterpret_cast<__sanitizer_file_handle*>(handle); + COMMON_INTERCEPTOR_READ_RANGE( + ctx, &sanitizer_handle->handle_bytes, + sizeof(sanitizer_handle->handle_bytes)); + + int res = REAL(name_to_handle_at)(dirfd, pathname, handle, mount_id, flags); + if (!res) { + COMMON_INTERCEPTOR_WRITE_RANGE( + ctx, &sanitizer_handle->handle_bytes, + sizeof(sanitizer_handle->handle_bytes)); + COMMON_INTERCEPTOR_WRITE_RANGE( + ctx, &sanitizer_handle->handle_type, + sizeof(sanitizer_handle->handle_type)); + COMMON_INTERCEPTOR_WRITE_RANGE( + ctx, &sanitizer_handle->f_handle, sanitizer_handle->handle_bytes); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mount_id, sizeof(*mount_id)); + } + return res; +} + +#define INIT_NAME_TO_HANDLE_AT COMMON_INTERCEPT_FUNCTION(name_to_handle_at) +#else +#define INIT_NAME_TO_HANDLE_AT +#endif + +#if SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT +INTERCEPTOR(int, open_by_handle_at, int mount_fd, struct file_handle* handle, + int flags) { + void* ctx; + COMMON_INTERCEPTOR_ENTER(ctx, open_by_handle_at, mount_fd, handle, flags); + + __sanitizer_file_handle *sanitizer_handle = + reinterpret_cast<__sanitizer_file_handle*>(handle); + COMMON_INTERCEPTOR_READ_RANGE( + ctx, &sanitizer_handle->handle_bytes, + sizeof(sanitizer_handle->handle_bytes)); + COMMON_INTERCEPTOR_READ_RANGE( + ctx, &sanitizer_handle->handle_type, + sizeof(sanitizer_handle->handle_type)); + COMMON_INTERCEPTOR_READ_RANGE( + ctx, &sanitizer_handle->f_handle, sanitizer_handle->handle_bytes); + + return REAL(open_by_handle_at)(mount_fd, handle, flags); +} + +#define INIT_OPEN_BY_HANDLE_AT COMMON_INTERCEPT_FUNCTION(open_by_handle_at) +#else +#define INIT_OPEN_BY_HANDLE_AT +#endif + +#if SANITIZER_INTERCEPT_STRLCPY +INTERCEPTOR(SIZE_T, strlcpy, char *dst, char *src, SIZE_T size) { + void *ctx; + SIZE_T res; + COMMON_INTERCEPTOR_ENTER(ctx, strlcpy, dst, src, size); + if (src) { + // Keep strnlen as macro argument, as macro may ignore it. + COMMON_INTERCEPTOR_READ_STRING( + ctx, src, Min(internal_strnlen(src, size), size - 1) + 1); + } + res = REAL(strlcpy)(dst, src, size); + COMMON_INTERCEPTOR_COPY_STRING(ctx, dst, src, REAL(strlen)(dst) + 1); + return res; +} + +INTERCEPTOR(SIZE_T, strlcat, char *dst, char *src, SIZE_T size) { + void *ctx; + SIZE_T len = 0; + COMMON_INTERCEPTOR_ENTER(ctx, strlcat, dst, src, size); + // src is checked in the strlcpy() interceptor + if (dst) { + len = internal_strnlen(dst, size); + COMMON_INTERCEPTOR_READ_STRING(ctx, dst, Min(len, size - 1) + 1); + } + // Reuse the rest of the code in the strlcpy() interceptor + return WRAP(strlcpy)(dst + len, src, size - len) + len; +} +#define INIT_STRLCPY \ + COMMON_INTERCEPT_FUNCTION(strlcpy); \ + COMMON_INTERCEPT_FUNCTION(strlcat); +#else +#define INIT_STRLCPY +#endif + +#if SANITIZER_INTERCEPT_MMAP +INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags, int fd, + OFF_T off) { + void *ctx; + if (common_flags()->detect_write_exec) + ReportMmapWriteExec(prot); + if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) + return (void *)internal_mmap(addr, sz, prot, flags, fd, off); + COMMON_INTERCEPTOR_ENTER(ctx, mmap, addr, sz, prot, flags, fd, off); + COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, off); +} + +INTERCEPTOR(int, mprotect, void *addr, SIZE_T sz, int prot) { + void *ctx; + if (common_flags()->detect_write_exec) + ReportMmapWriteExec(prot); + if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) + return (int)internal_mprotect(addr, sz, prot); + COMMON_INTERCEPTOR_ENTER(ctx, mprotect, addr, sz, prot); + MprotectMallocZones(addr, prot); + return REAL(mprotect)(addr, sz, prot); +} +#define INIT_MMAP \ + COMMON_INTERCEPT_FUNCTION(mmap); \ + COMMON_INTERCEPT_FUNCTION(mprotect); +#else +#define INIT_MMAP +#endif + +#if SANITIZER_INTERCEPT_MMAP64 +INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags, int fd, + OFF64_T off) { + void *ctx; + if (common_flags()->detect_write_exec) + ReportMmapWriteExec(prot); + if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) + return (void *)internal_mmap(addr, sz, prot, flags, fd, off); + COMMON_INTERCEPTOR_ENTER(ctx, mmap64, addr, sz, prot, flags, fd, off); + COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap64, addr, sz, prot, flags, fd, off); +} +#define INIT_MMAP64 COMMON_INTERCEPT_FUNCTION(mmap64); +#else +#define INIT_MMAP64 +#endif + +#if SANITIZER_INTERCEPT_DEVNAME +INTERCEPTOR(char *, devname, u64 dev, u32 type) { + void *ctx; + char *name; + COMMON_INTERCEPTOR_ENTER(ctx, devname, dev, type); + name = REAL(devname)(dev, type); + if (name) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1); + return name; +} +#define INIT_DEVNAME COMMON_INTERCEPT_FUNCTION(devname); +#else +#define INIT_DEVNAME +#endif + +#if SANITIZER_INTERCEPT_DEVNAME_R +INTERCEPTOR(int, devname_r, u64 dev, u32 type, char *path, uptr len) { + void *ctx; + int res; + COMMON_INTERCEPTOR_ENTER(ctx, devname_r, dev, type, path, len); + res = REAL(devname_r)(dev, type, path, len); + if (!res) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, path, REAL(strlen)(path) + 1); + return res; +} +#define INIT_DEVNAME_R COMMON_INTERCEPT_FUNCTION(devname_r); +#else +#define INIT_DEVNAME_R +#endif + +#if SANITIZER_INTERCEPT_FGETLN +INTERCEPTOR(char *, fgetln, __sanitizer_FILE *stream, SIZE_T *len) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, fgetln, stream, len); + char *str = REAL(fgetln)(stream, len); + if (str && len) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, *len); + } + return str; +} +#define INIT_FGETLN COMMON_INTERCEPT_FUNCTION(fgetln) +#else +#define INIT_FGETLN +#endif + +#if SANITIZER_INTERCEPT_STRMODE +INTERCEPTOR(void, strmode, u32 mode, char *bp) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, strmode, mode, bp); + REAL(strmode)(mode, bp); + if (bp) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, bp, REAL(strlen)(bp) + 1); +} +#define INIT_STRMODE COMMON_INTERCEPT_FUNCTION(strmode) +#else +#define INIT_STRMODE +#endif + +#if SANITIZER_INTERCEPT_TTYENT +INTERCEPTOR(struct __sanitizer_ttyent *, getttyent, void) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getttyent); + struct __sanitizer_ttyent *ttyent = REAL(getttyent)(); + if (ttyent) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ttyent, struct_ttyent_sz); + return ttyent; +} +INTERCEPTOR(struct __sanitizer_ttyent *, getttynam, char *name) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getttynam, name); + if (name) + COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); + struct __sanitizer_ttyent *ttyent = REAL(getttynam)(name); + if (ttyent) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ttyent, struct_ttyent_sz); + return ttyent; +} +INTERCEPTOR(int, setttyentpath, char *path) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, setttyentpath, path); + if (path) + COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); + return REAL(setttyentpath)(path); +} +#define INIT_TTYENT \ + COMMON_INTERCEPT_FUNCTION(getttyent); \ + COMMON_INTERCEPT_FUNCTION(getttynam); \ + COMMON_INTERCEPT_FUNCTION(setttyentpath) +#else +#define INIT_TTYENT +#endif + +#if SANITIZER_INTERCEPT_PROTOENT +INTERCEPTOR(struct __sanitizer_protoent *, getprotoent) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getprotoent); + struct __sanitizer_protoent *p = REAL(getprotoent)(); + if (p) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1); + + SIZE_T pp_size = 1; // One handles the trailing \0 + + for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases, + pp_size * sizeof(char **)); + } + return p; +} + +INTERCEPTOR(struct __sanitizer_protoent *, getprotobyname, const char *name) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname, name); + if (name) + COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); + struct __sanitizer_protoent *p = REAL(getprotobyname)(name); + if (p) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1); + + SIZE_T pp_size = 1; // One handles the trailing \0 + + for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases, + pp_size * sizeof(char **)); + } + return p; +} + +INTERCEPTOR(struct __sanitizer_protoent *, getprotobynumber, int proto) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getprotobynumber, proto); + struct __sanitizer_protoent *p = REAL(getprotobynumber)(proto); + if (p) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1); + + SIZE_T pp_size = 1; // One handles the trailing \0 + + for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases, + pp_size * sizeof(char **)); + } + return p; +} +#define INIT_PROTOENT \ + COMMON_INTERCEPT_FUNCTION(getprotoent); \ + COMMON_INTERCEPT_FUNCTION(getprotobyname); \ + COMMON_INTERCEPT_FUNCTION(getprotobynumber) +#else +#define INIT_PROTOENT +#endif + +#if SANITIZER_INTERCEPT_NETENT +INTERCEPTOR(struct __sanitizer_netent *, getnetent) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getnetent); + struct __sanitizer_netent *n = REAL(getnetent)(); + if (n) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n)); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1); + + SIZE_T nn_size = 1; // One handles the trailing \0 + + for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases, + nn_size * sizeof(char **)); + } + return n; +} + +INTERCEPTOR(struct __sanitizer_netent *, getnetbyname, const char *name) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getnetbyname, name); + if (name) + COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); + struct __sanitizer_netent *n = REAL(getnetbyname)(name); + if (n) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n)); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1); + + SIZE_T nn_size = 1; // One handles the trailing \0 + + for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases, + nn_size * sizeof(char **)); + } + return n; +} + +INTERCEPTOR(struct __sanitizer_netent *, getnetbyaddr, u32 net, int type) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, getnetbyaddr, net, type); + struct __sanitizer_netent *n = REAL(getnetbyaddr)(net, type); + if (n) { + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n)); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1); + + SIZE_T nn_size = 1; // One handles the trailing \0 + + for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1); + + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases, + nn_size * sizeof(char **)); + } + return n; +} +#define INIT_NETENT \ + COMMON_INTERCEPT_FUNCTION(getnetent); \ + COMMON_INTERCEPT_FUNCTION(getnetbyname); \ + COMMON_INTERCEPT_FUNCTION(getnetbyaddr) +#else +#define INIT_NETENT +#endif + static void InitializeCommonInterceptors() { static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1]; interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap(); + INIT_MMAP; + INIT_MMAP64; INIT_TEXTDOMAIN; INIT_STRLEN; INIT_STRNLEN; @@ -6422,6 +7257,8 @@ static void InitializeCommonInterceptors() { INIT_STRSPN; INIT_STRTOK; INIT_STRPBRK; + INIT_STRXFRM; + INIT___STRXFRM_L; INIT_MEMSET; INIT_MEMMOVE; INIT_MEMCPY; @@ -6443,6 +7280,9 @@ static void InitializeCommonInterceptors() { INIT_WRITEV; INIT_PWRITEV; INIT_PWRITEV64; + INIT_FGETS; + INIT_FPUTS; + INIT_PUTS; INIT_PRCTL; INIT_LOCALTIME_AND_FRIENDS; INIT_STRPTIME; @@ -6480,9 +7320,12 @@ static void InitializeCommonInterceptors() { INIT_GETSOCKOPT; INIT_ACCEPT; INIT_ACCEPT4; + INIT_PACCEPT; INIT_MODF; INIT_RECVMSG; INIT_SENDMSG; + INIT_RECVMMSG; + INIT_SENDMMSG; INIT_GETPEERNAME; INIT_IOCTL; INIT_INET_ATON; @@ -6524,6 +7367,11 @@ static void InitializeCommonInterceptors() { INIT__EXIT; INIT_PTHREAD_MUTEX_LOCK; INIT_PTHREAD_MUTEX_UNLOCK; + INIT___PTHREAD_MUTEX_LOCK; + INIT___PTHREAD_MUTEX_UNLOCK; + INIT___LIBC_MUTEX_LOCK; + INIT___LIBC_MUTEX_UNLOCK; + INIT___LIBC_THR_SETCANCELSTATE; INIT_GETMNTENT; INIT_GETMNTENT_R; INIT_STATFS; @@ -6537,6 +7385,7 @@ static void InitializeCommonInterceptors() { INIT_SHMCTL; INIT_RANDOM_R; INIT_PTHREAD_ATTR_GET; + INIT_PTHREAD_ATTR_GET_SCHED; INIT_PTHREAD_ATTR_GETINHERITSCHED; INIT_PTHREAD_ATTR_GETAFFINITY_NP; INIT_PTHREAD_MUTEXATTR_GETPSHARED; @@ -6600,6 +7449,7 @@ static void InitializeCommonInterceptors() { INIT_SEND_SENDTO; INIT_STAT; INIT_EVENTFD_READ_WRITE; + INIT_LSTAT; INIT___XSTAT; INIT___XSTAT64; INIT___LXSTAT; @@ -6610,12 +7460,29 @@ static void InitializeCommonInterceptors() { INIT_GETLOADAVG; INIT_WCSLEN; INIT_WCSCAT; - -#if SANITIZER_NETBSD - COMMON_INTERCEPT_FUNCTION(__libc_mutex_lock); - COMMON_INTERCEPT_FUNCTION(__libc_mutex_unlock); - COMMON_INTERCEPT_FUNCTION(__libc_thr_setcancelstate); -#endif + INIT_WCSXFRM; + INIT___WCSXFRM_L; + INIT_ACCT; + INIT_USER_FROM_UID; + INIT_UID_FROM_USER; + INIT_GROUP_FROM_GID; + INIT_GID_FROM_GROUP; + INIT_ACCESS; + INIT_FACCESSAT; + INIT_GETGROUPLIST; + INIT_GETGROUPMEMBERSHIP; + INIT_READLINK; + INIT_READLINKAT; + INIT_NAME_TO_HANDLE_AT; + INIT_OPEN_BY_HANDLE_AT; + INIT_STRLCPY; + INIT_DEVNAME; + INIT_DEVNAME_R; + INIT_FGETLN; + INIT_STRMODE; + INIT_TTYENT; + INIT_PROTOENT; + INIT_NETENT; INIT___PRINTF_CHK; } diff --git a/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc index 24e7548a597e..2d633c173894 100755 --- a/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc +++ b/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc @@ -10,6 +10,8 @@ // Ioctl handling in common sanitizer interceptors. //===----------------------------------------------------------------------===// +#if !SANITIZER_NETBSD + #include "sanitizer_flags.h" struct ioctl_desc { @@ -478,7 +480,7 @@ struct ioctl_desc_compare { static void ioctl_init() { ioctl_table_fill(); - InternalSort(&ioctl_table, ioctl_table_size, ioctl_desc_compare()); + Sort(ioctl_table, ioctl_table_size, ioctl_desc_compare()); bool bad = false; for (unsigned i = 0; i < ioctl_table_size - 1; ++i) { @@ -604,3 +606,5 @@ static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d, COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len); } } + +#endif diff --git a/lib/sanitizer_common/sanitizer_common_interface.inc b/lib/sanitizer_common/sanitizer_common_interface.inc index 2568df3a5bff..377580cb0b4e 100644 --- a/lib/sanitizer_common/sanitizer_common_interface.inc +++ b/lib/sanitizer_common/sanitizer_common_interface.inc @@ -8,6 +8,7 @@ //===----------------------------------------------------------------------===// // Sanitizer Common interface list. //===----------------------------------------------------------------------===// +INTERFACE_FUNCTION(__sanitizer_acquire_crash_state) INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container) INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address) INTERFACE_FUNCTION(__sanitizer_set_death_callback) diff --git a/lib/sanitizer_common/sanitizer_common_libcdep.cc b/lib/sanitizer_common/sanitizer_common_libcdep.cc index 5cdfbbb275f7..796a054858bc 100644 --- a/lib/sanitizer_common/sanitizer_common_libcdep.cc +++ b/lib/sanitizer_common/sanitizer_common_libcdep.cc @@ -11,76 +11,14 @@ // run-time libraries. //===----------------------------------------------------------------------===// -#include "sanitizer_common.h" - #include "sanitizer_allocator_interface.h" -#include "sanitizer_file.h" +#include "sanitizer_common.h" #include "sanitizer_flags.h" #include "sanitizer_procmaps.h" -#include "sanitizer_report_decorator.h" -#include "sanitizer_stackdepot.h" -#include "sanitizer_stacktrace.h" -#include "sanitizer_symbolizer.h" -#if SANITIZER_POSIX -#include "sanitizer_posix.h" -#endif namespace __sanitizer { -#if !SANITIZER_FUCHSIA - -bool ReportFile::SupportsColors() { - SpinMutexLock l(mu); - ReopenIfNecessary(); - return SupportsColoredOutput(fd); -} - -static INLINE bool ReportSupportsColors() { - return report_file.SupportsColors(); -} - -#else // SANITIZER_FUCHSIA - -// Fuchsia's logs always go through post-processing that handles colorization. -static INLINE bool ReportSupportsColors() { return true; } - -#endif // !SANITIZER_FUCHSIA - -bool ColorizeReports() { - // FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color - // printing on Windows. - if (SANITIZER_WINDOWS) - return false; - - const char *flag = common_flags()->color; - return internal_strcmp(flag, "always") == 0 || - (internal_strcmp(flag, "auto") == 0 && ReportSupportsColors()); -} - -static void (*sandboxing_callback)(); -void SetSandboxingCallback(void (*f)()) { - sandboxing_callback = f; -} - -void ReportErrorSummary(const char *error_type, const StackTrace *stack, - const char *alt_tool_name) { -#if !SANITIZER_GO - if (!common_flags()->print_summary) - return; - if (stack->size == 0) { - ReportErrorSummary(error_type); - return; - } - // Currently, we include the first stack frame into the report summary. - // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc). - uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]); - SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc); - ReportErrorSummary(error_type, frame->info, alt_tool_name); - frame->ClearAll(); -#endif -} - static void (*SoftRssLimitExceededCallback)(bool exceeded); void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) { CHECK_EQ(SoftRssLimitExceededCallback, nullptr); @@ -88,17 +26,22 @@ void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) { } #if SANITIZER_LINUX && !SANITIZER_GO +// Weak default implementation for when sanitizer_stackdepot is not linked in. +SANITIZER_WEAK_ATTRIBUTE StackDepotStats *StackDepotGetStats() { + return nullptr; +} + void BackgroundThread(void *arg) { - uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb; - uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb; - bool heap_profile = common_flags()->heap_profile; + const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb; + const uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb; + const bool heap_profile = common_flags()->heap_profile; uptr prev_reported_rss = 0; uptr prev_reported_stack_depot_size = 0; bool reached_soft_rss_limit = false; uptr rss_during_last_reported_profile = 0; while (true) { SleepForMillis(100); - uptr current_rss_mb = GetRSS() >> 20; + const uptr current_rss_mb = GetRSS() >> 20; if (Verbosity()) { // If RSS has grown 10% since last time, print some information. if (prev_reported_rss * 11 / 10 < current_rss_mb) { @@ -107,13 +50,15 @@ void BackgroundThread(void *arg) { } // If stack depot has grown 10% since last time, print it too. StackDepotStats *stack_depot_stats = StackDepotGetStats(); - if (prev_reported_stack_depot_size * 11 / 10 < - stack_depot_stats->allocated) { - Printf("%s: StackDepot: %zd ids; %zdM allocated\n", - SanitizerToolName, - stack_depot_stats->n_uniq_ids, - stack_depot_stats->allocated >> 20); - prev_reported_stack_depot_size = stack_depot_stats->allocated; + if (stack_depot_stats) { + if (prev_reported_stack_depot_size * 11 / 10 < + stack_depot_stats->allocated) { + Printf("%s: StackDepot: %zd ids; %zdM allocated\n", + SanitizerToolName, + stack_depot_stats->n_uniq_ids, + stack_depot_stats->allocated >> 20); + prev_reported_stack_depot_size = stack_depot_stats->allocated; + } } } // Check RSS against the limit. @@ -147,127 +92,6 @@ void BackgroundThread(void *arg) { } #endif -#if !SANITIZER_FUCHSIA && !SANITIZER_GO -void StartReportDeadlySignal() { - // Write the first message using fd=2, just in case. - // It may actually fail to write in case stderr is closed. - CatastrophicErrorWrite(SanitizerToolName, internal_strlen(SanitizerToolName)); - static const char kDeadlySignal[] = ":DEADLYSIGNAL\n"; - CatastrophicErrorWrite(kDeadlySignal, sizeof(kDeadlySignal) - 1); -} - -static void MaybeReportNonExecRegion(uptr pc) { -#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD - MemoryMappingLayout proc_maps(/*cache_enabled*/ true); - MemoryMappedSegment segment; - while (proc_maps.Next(&segment)) { - if (pc >= segment.start && pc < segment.end && !segment.IsExecutable()) - Report("Hint: PC is at a non-executable region. Maybe a wild jump?\n"); - } -#endif -} - -static void PrintMemoryByte(InternalScopedString *str, const char *before, - u8 byte) { - SanitizerCommonDecorator d; - str->append("%s%s%x%x%s ", before, d.MemoryByte(), byte >> 4, byte & 15, - d.Default()); -} - -static void MaybeDumpInstructionBytes(uptr pc) { - if (!common_flags()->dump_instruction_bytes || (pc < GetPageSizeCached())) - return; - InternalScopedString str(1024); - str.append("First 16 instruction bytes at pc: "); - if (IsAccessibleMemoryRange(pc, 16)) { - for (int i = 0; i < 16; ++i) { - PrintMemoryByte(&str, "", ((u8 *)pc)[i]); - } - str.append("\n"); - } else { - str.append("unaccessible\n"); - } - Report("%s", str.data()); -} - -static void MaybeDumpRegisters(void *context) { - if (!common_flags()->dump_registers) return; - SignalContext::DumpAllRegisters(context); -} - -static void ReportStackOverflowImpl(const SignalContext &sig, u32 tid, - UnwindSignalStackCallbackType unwind, - const void *unwind_context) { - SanitizerCommonDecorator d; - Printf("%s", d.Warning()); - static const char kDescription[] = "stack-overflow"; - Report("ERROR: %s: %s on address %p (pc %p bp %p sp %p T%d)\n", - SanitizerToolName, kDescription, (void *)sig.addr, (void *)sig.pc, - (void *)sig.bp, (void *)sig.sp, tid); - Printf("%s", d.Default()); - InternalScopedBuffer<BufferedStackTrace> stack_buffer(1); - BufferedStackTrace *stack = stack_buffer.data(); - stack->Reset(); - unwind(sig, unwind_context, stack); - stack->Print(); - ReportErrorSummary(kDescription, stack); -} - -static void ReportDeadlySignalImpl(const SignalContext &sig, u32 tid, - UnwindSignalStackCallbackType unwind, - const void *unwind_context) { - SanitizerCommonDecorator d; - Printf("%s", d.Warning()); - const char *description = sig.Describe(); - Report("ERROR: %s: %s on unknown address %p (pc %p bp %p sp %p T%d)\n", - SanitizerToolName, description, (void *)sig.addr, (void *)sig.pc, - (void *)sig.bp, (void *)sig.sp, tid); - Printf("%s", d.Default()); - if (sig.pc < GetPageSizeCached()) - Report("Hint: pc points to the zero page.\n"); - if (sig.is_memory_access) { - const char *access_type = - sig.write_flag == SignalContext::WRITE - ? "WRITE" - : (sig.write_flag == SignalContext::READ ? "READ" : "UNKNOWN"); - Report("The signal is caused by a %s memory access.\n", access_type); - if (sig.addr < GetPageSizeCached()) - Report("Hint: address points to the zero page.\n"); - } - MaybeReportNonExecRegion(sig.pc); - InternalScopedBuffer<BufferedStackTrace> stack_buffer(1); - BufferedStackTrace *stack = stack_buffer.data(); - stack->Reset(); - unwind(sig, unwind_context, stack); - stack->Print(); - MaybeDumpInstructionBytes(sig.pc); - MaybeDumpRegisters(sig.context); - Printf("%s can not provide additional info.\n", SanitizerToolName); - ReportErrorSummary(description, stack); -} - -void ReportDeadlySignal(const SignalContext &sig, u32 tid, - UnwindSignalStackCallbackType unwind, - const void *unwind_context) { - if (sig.IsStackOverflow()) - ReportStackOverflowImpl(sig, tid, unwind, unwind_context); - else - ReportDeadlySignalImpl(sig, tid, unwind, unwind_context); -} - -void HandleDeadlySignal(void *siginfo, void *context, u32 tid, - UnwindSignalStackCallbackType unwind, - const void *unwind_context) { - StartReportDeadlySignal(); - ScopedErrorReportLock rl; - SignalContext sig(siginfo, context); - ReportDeadlySignal(sig, tid, unwind, unwind_context); - Report("ABORTING\n"); - Die(); -} - -#endif // !SANITIZER_FUCHSIA && !SANITIZER_GO - void WriteToSyslog(const char *msg) { InternalScopedString msg_copy(kErrorMessageBufferSize); msg_copy.append("%s", msg); @@ -298,52 +122,16 @@ void MaybeStartBackgroudThread() { #endif } -static atomic_uintptr_t reporting_thread = {0}; -static StaticSpinMutex CommonSanitizerReportMutex; - -ScopedErrorReportLock::ScopedErrorReportLock() { - uptr current = GetThreadSelf(); - for (;;) { - uptr expected = 0; - if (atomic_compare_exchange_strong(&reporting_thread, &expected, current, - memory_order_relaxed)) { - // We've claimed reporting_thread so proceed. - CommonSanitizerReportMutex.Lock(); - return; - } - - if (expected == current) { - // This is either asynch signal or nested error during error reporting. - // Fail simple to avoid deadlocks in Report(). - - // Can't use Report() here because of potential deadlocks in nested - // signal handlers. - CatastrophicErrorWrite(SanitizerToolName, - internal_strlen(SanitizerToolName)); - static const char msg[] = ": nested bug in the same thread, aborting.\n"; - CatastrophicErrorWrite(msg, sizeof(msg) - 1); - - internal__exit(common_flags()->exitcode); - } - - internal_sched_yield(); - } -} - -ScopedErrorReportLock::~ScopedErrorReportLock() { - CommonSanitizerReportMutex.Unlock(); - atomic_store_relaxed(&reporting_thread, 0); -} - -void ScopedErrorReportLock::CheckLocked() { - CommonSanitizerReportMutex.CheckLocked(); +static void (*sandboxing_callback)(); +void SetSandboxingCallback(void (*f)()) { + sandboxing_callback = f; } } // namespace __sanitizer SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify, __sanitizer_sandbox_arguments *args) { - __sanitizer::PrepareForSandboxing(args); + __sanitizer::PlatformPrepareForSandboxing(args); if (__sanitizer::sandboxing_callback) __sanitizer::sandboxing_callback(); } diff --git a/lib/sanitizer_common/sanitizer_common_nolibc.cc b/lib/sanitizer_common/sanitizer_common_nolibc.cc index ba54c739a9e0..5d322907343e 100644 --- a/lib/sanitizer_common/sanitizer_common_nolibc.cc +++ b/lib/sanitizer_common/sanitizer_common_nolibc.cc @@ -21,7 +21,6 @@ namespace __sanitizer { // bypassing libc. #if !SANITIZER_WINDOWS #if SANITIZER_LINUX -bool ShouldLogAfterPrintf() { return false; } void LogMessageOnPrintf(const char *str) {} #endif void WriteToSyslog(const char *buffer) {} diff --git a/lib/sanitizer_common/sanitizer_coverage_fuchsia.cc b/lib/sanitizer_common/sanitizer_coverage_fuchsia.cc index c5be48bceace..b4ffcca5c30b 100644 --- a/lib/sanitizer_common/sanitizer_coverage_fuchsia.cc +++ b/lib/sanitizer_common/sanitizer_coverage_fuchsia.cc @@ -1,11 +1,11 @@ -//===-- sanitizer_coverage_fuchsia.cc ------------------------------------===// +//===-- sanitizer_coverage_fuchsia.cc -------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // -//===---------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// // // Sanitizer Coverage Controller for Trace PC Guard, Fuchsia-specific version. // @@ -49,7 +49,7 @@ constexpr const char kSancovSinkName[] = "sancov"; // Collects trace-pc guard coverage. // This class relies on zero-initialization. -class TracePcGuardController { +class TracePcGuardController final { public: // For each PC location being tracked, there is a u32 reserved in global // data called the "guard". At startup, we assign each guard slot a @@ -113,11 +113,11 @@ class TracePcGuardController { // We can always spare the 32G of address space. static constexpr size_t MappingSize = sizeof(uptr) << 32; - BlockingMutex setup_lock_; - uptr *array_; - u32 next_index_; - zx_handle_t vmo_; - char vmo_name_[ZX_MAX_NAME_LEN]; + BlockingMutex setup_lock_ = BlockingMutex(LINKER_INITIALIZED); + uptr *array_ = nullptr; + u32 next_index_ = 0; + zx_handle_t vmo_ = {}; + char vmo_name_[ZX_MAX_NAME_LEN] = {}; size_t DataSize() const { return next_index_ * sizeof(uintptr_t); } @@ -146,9 +146,9 @@ class TracePcGuardController { // indices, but we'll never move the mapping address so we don't have // any multi-thread synchronization issues with that. uintptr_t mapping; - status = - _zx_vmar_map(_zx_vmar_root_self(), 0, vmo_, 0, MappingSize, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &mapping); + status = _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo_, 0, MappingSize, + ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, + &mapping); CHECK_EQ(status, ZX_OK); // Hereafter other threads are free to start storing into diff --git a/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc b/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc index 3c5f29b2899b..bea277587289 100644 --- a/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc +++ b/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc @@ -16,7 +16,6 @@ #include "sanitizer_atomic.h" #include "sanitizer_common.h" #include "sanitizer_file.h" -#include "sanitizer_symbolizer.h" using namespace __sanitizer; @@ -64,7 +63,7 @@ static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) { uptr* pcs = static_cast<uptr*>(InternalAlloc(len * sizeof(uptr))); internal_memcpy(pcs, unsorted_pcs, len * sizeof(uptr)); - SortArray(pcs, len); + Sort(pcs, len); bool module_found = false; uptr last_base = 0; diff --git a/lib/sanitizer_common/sanitizer_deadlock_detector2.cc b/lib/sanitizer_common/sanitizer_deadlock_detector2.cc index 87d4a4d9a838..0b0085a48bd5 100644 --- a/lib/sanitizer_common/sanitizer_deadlock_detector2.cc +++ b/lib/sanitizer_common/sanitizer_deadlock_detector2.cc @@ -111,7 +111,7 @@ struct DD : public DDetector { SpinMutex mtx; InternalMmapVector<u32> free_id; - int id_gen; + int id_gen = 0; }; DDetector *DDetector::Create(const DDFlags *flags) { @@ -120,11 +120,7 @@ DDetector *DDetector::Create(const DDFlags *flags) { return new(mem) DD(flags); } -DD::DD(const DDFlags *flags) - : flags(*flags) - , free_id(1024) { - id_gen = 0; -} +DD::DD(const DDFlags *flags) : flags(*flags) { free_id.reserve(1024); } DDPhysicalThread* DD::CreatePhysicalThread() { DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread), diff --git a/lib/sanitizer_common/sanitizer_errno.h b/lib/sanitizer_common/sanitizer_errno.h index 42cc290502bf..9322608aa8f5 100644 --- a/lib/sanitizer_common/sanitizer_errno.h +++ b/lib/sanitizer_common/sanitizer_errno.h @@ -24,7 +24,8 @@ #if SANITIZER_FREEBSD || SANITIZER_MAC # define __errno_location __error -#elif SANITIZER_ANDROID || SANITIZER_NETBSD +#elif SANITIZER_ANDROID || SANITIZER_NETBSD || SANITIZER_OPENBSD || \ + SANITIZER_RTEMS # define __errno_location __errno #elif SANITIZER_SOLARIS # define __errno_location ___errno diff --git a/lib/sanitizer_common/sanitizer_file.cc b/lib/sanitizer_common/sanitizer_file.cc index cde54bfde804..278d75c520ae 100644 --- a/lib/sanitizer_common/sanitizer_file.cc +++ b/lib/sanitizer_common/sanitizer_file.cc @@ -95,32 +95,40 @@ void ReportFile::SetReportPath(const char *path) { bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, uptr *read_len, uptr max_len, error_t *errno_p) { - uptr PageSize = GetPageSizeCached(); - uptr kMinFileLen = PageSize; *buff = nullptr; *buff_size = 0; *read_len = 0; + if (!max_len) + return true; + uptr PageSize = GetPageSizeCached(); + uptr kMinFileLen = Min(PageSize, max_len); + // The files we usually open are not seekable, so try different buffer sizes. - for (uptr size = kMinFileLen; size <= max_len; size *= 2) { - fd_t fd = OpenFile(file_name, RdOnly, errno_p); - if (fd == kInvalidFd) return false; + for (uptr size = kMinFileLen;; size = Min(size * 2, max_len)) { UnmapOrDie(*buff, *buff_size); *buff = (char*)MmapOrDie(size, __func__); *buff_size = size; + fd_t fd = OpenFile(file_name, RdOnly, errno_p); + if (fd == kInvalidFd) { + UnmapOrDie(*buff, *buff_size); + return false; + } *read_len = 0; // Read up to one page at a time. bool reached_eof = false; - while (*read_len + PageSize <= size) { + while (*read_len < size) { uptr just_read; - if (!ReadFromFile(fd, *buff + *read_len, PageSize, &just_read, errno_p)) { + if (!ReadFromFile(fd, *buff + *read_len, size - *read_len, &just_read, + errno_p)) { UnmapOrDie(*buff, *buff_size); + CloseFile(fd); return false; } - if (just_read == 0) { + *read_len += just_read; + if (just_read == 0 || *read_len == max_len) { reached_eof = true; break; } - *read_len += just_read; } CloseFile(fd); if (reached_eof) // We've read the whole file. @@ -129,6 +137,37 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, return true; } +bool ReadFileToVector(const char *file_name, + InternalMmapVectorNoCtor<char> *buff, uptr max_len, + error_t *errno_p) { + buff->clear(); + if (!max_len) + return true; + uptr PageSize = GetPageSizeCached(); + fd_t fd = OpenFile(file_name, RdOnly, errno_p); + if (fd == kInvalidFd) + return false; + uptr read_len = 0; + while (read_len < max_len) { + if (read_len >= buff->size()) + buff->resize(Min(Max(PageSize, read_len * 2), max_len)); + CHECK_LT(read_len, buff->size()); + CHECK_LE(buff->size(), max_len); + uptr just_read; + if (!ReadFromFile(fd, buff->data() + read_len, buff->size() - read_len, + &just_read, errno_p)) { + CloseFile(fd); + return false; + } + read_len += just_read; + if (!just_read) + break; + } + CloseFile(fd); + buff->resize(read_len); + return true; +} + static const char kPathSeparator = SANITIZER_WINDOWS ? ';' : ':'; char *FindPathToBinary(const char *name) { @@ -140,7 +179,7 @@ char *FindPathToBinary(const char *name) { if (!path) return nullptr; uptr name_len = internal_strlen(name); - InternalScopedBuffer<char> buffer(kMaxPathLength); + InternalMmapVector<char> buffer(kMaxPathLength); const char *beg = path; while (true) { const char *end = internal_strchrnul(beg, kPathSeparator); diff --git a/lib/sanitizer_common/sanitizer_flag_parser.h b/lib/sanitizer_common/sanitizer_flag_parser.h index f649f5bffea7..705bfe23b309 100644 --- a/lib/sanitizer_common/sanitizer_flag_parser.h +++ b/lib/sanitizer_common/sanitizer_flag_parser.h @@ -81,7 +81,7 @@ inline bool FlagHandler<const char *>::Parse(const char *value) { template <> inline bool FlagHandler<int>::Parse(const char *value) { - char *value_end; + const char *value_end; *t_ = internal_simple_strtoll(value, &value_end, 10); bool ok = *value_end == 0; if (!ok) Printf("ERROR: Invalid value for int option: '%s'\n", value); @@ -90,7 +90,7 @@ inline bool FlagHandler<int>::Parse(const char *value) { template <> inline bool FlagHandler<uptr>::Parse(const char *value) { - char *value_end; + const char *value_end; *t_ = internal_simple_strtoll(value, &value_end, 10); bool ok = *value_end == 0; if (!ok) Printf("ERROR: Invalid value for uptr option: '%s'\n", value); diff --git a/lib/sanitizer_common/sanitizer_flags.cc b/lib/sanitizer_common/sanitizer_flags.cc index 913ce3cb423e..4631dfb7120c 100644 --- a/lib/sanitizer_common/sanitizer_flags.cc +++ b/lib/sanitizer_common/sanitizer_flags.cc @@ -22,14 +22,6 @@ namespace __sanitizer { CommonFlags common_flags_dont_use; -struct FlagDescription { - const char *name; - const char *description; - FlagDescription *next; -}; - -IntrusiveList<FlagDescription> flag_descriptions; - void CommonFlags::SetDefaults() { #define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; #include "sanitizer_flags.inc" diff --git a/lib/sanitizer_common/sanitizer_flags.inc b/lib/sanitizer_common/sanitizer_flags.inc index 445e25f9df70..cfe8af893423 100644 --- a/lib/sanitizer_common/sanitizer_flags.inc +++ b/lib/sanitizer_common/sanitizer_flags.inc @@ -56,7 +56,7 @@ COMMON_FLAG( "Mention name of executable when reporting error and " "append executable name to logs (as in \"log_path.exe_name.pid\").") COMMON_FLAG( - bool, log_to_syslog, SANITIZER_ANDROID || SANITIZER_MAC, + bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC, "Write all sanitizer output to syslog in addition to other means of " "logging.") COMMON_FLAG( @@ -93,6 +93,8 @@ COMMON_FLAG(HandleSignalMode, handle_abort, kHandleSignalNo, COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGABRT)) COMMON_FLAG(HandleSignalMode, handle_sigill, kHandleSignalNo, COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGILL)) +COMMON_FLAG(HandleSignalMode, handle_sigtrap, kHandleSignalNo, + COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGTRAP)) COMMON_FLAG(HandleSignalMode, handle_sigfpe, kHandleSignalYes, COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGFPE)) #undef COMMON_FLAG_HANDLE_SIGNAL_HELP @@ -132,7 +134,8 @@ COMMON_FLAG(uptr, soft_rss_limit_mb, 0, " This limit does not affect memory allocations other than" " malloc/new.") COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only") -COMMON_FLAG(s32, allocator_release_to_os_interval_ms, 5000, +COMMON_FLAG(s32, allocator_release_to_os_interval_ms, + ((bool)SANITIZER_FUCHSIA || (bool)SANITIZER_WINDOWS) ? -1 : 5000, "Only affects a 64-bit allocator. If set, tries to release unused " "memory to the OS, but not more often than this interval (in " "milliseconds). Negative values mean do not attempt to release " @@ -222,7 +225,7 @@ COMMON_FLAG(bool, decorate_proc_maps, false, "If set, decorate sanitizer " COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool " "found an error") COMMON_FLAG( - bool, abort_on_error, SANITIZER_ANDROID || SANITIZER_MAC, + bool, abort_on_error, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC, "If set, the tool calls abort() instead of _exit() after printing the " "error report.") COMMON_FLAG(bool, suppress_equal_pcs, true, @@ -237,3 +240,6 @@ COMMON_FLAG(bool, dump_instruction_bytes, false, COMMON_FLAG(bool, dump_registers, true, "If true, dump values of CPU registers when SEGV happens. Only " "available on OS X for now.") +COMMON_FLAG(bool, detect_write_exec, false, + "If true, triggers warning when writable-executable pages requests " + "are being made") diff --git a/lib/sanitizer_common/sanitizer_fuchsia.cc b/lib/sanitizer_common/sanitizer_fuchsia.cc index 936ec794b8e8..391620690f30 100644 --- a/lib/sanitizer_common/sanitizer_fuchsia.cc +++ b/lib/sanitizer_common/sanitizer_fuchsia.cc @@ -1,16 +1,16 @@ -//===-- sanitizer_fuchsia.cc ---------------------------------------------===// +//===-- sanitizer_fuchsia.cc ----------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // -//===---------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// // // This file is shared between AddressSanitizer and other sanitizer // run-time libraries and implements Fuchsia-specific functions from // sanitizer_common.h. -//===---------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// #include "sanitizer_fuchsia.h" #if SANITIZER_FUCHSIA @@ -18,19 +18,20 @@ #include "sanitizer_common.h" #include "sanitizer_libc.h" #include "sanitizer_mutex.h" -#include "sanitizer_stacktrace.h" #include <limits.h> #include <pthread.h> #include <stdlib.h> #include <unistd.h> -#include <unwind.h> #include <zircon/errors.h> #include <zircon/process.h> #include <zircon/syscalls.h> namespace __sanitizer { +// TODO(phosek): remove this and replace it with ZX_TIME_INFINITE +#define ZX_TIME_INFINITE_OLD INT64_MAX + void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); } uptr internal_sched_yield() { @@ -49,9 +50,9 @@ unsigned int internal_sleep(unsigned int seconds) { return 0; } -u64 NanoTime() { return _zx_time_get(ZX_CLOCK_UTC); } +u64 NanoTime() { return _zx_clock_get(ZX_CLOCK_UTC); } -u64 MonotonicNanoTime() { return _zx_time_get(ZX_CLOCK_MONOTONIC); } +u64 MonotonicNanoTime() { return _zx_clock_get(ZX_CLOCK_MONOTONIC); } uptr internal_getpid() { zx_info_handle_basic_t info; @@ -66,7 +67,7 @@ uptr internal_getpid() { uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); } -uptr GetTid() { return GetThreadSelf(); } +tid_t GetTid() { return GetThreadSelf(); } void Abort() { abort(); } @@ -89,13 +90,10 @@ void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) { } void MaybeReexec() {} -void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {} +void CheckASLR() {} +void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {} void DisableCoreDumperIfNecessary() {} void InstallDeadlySignalHandlers(SignalHandlerType handler) {} -void StartReportDeadlySignal() {} -void ReportDeadlySignal(const SignalContext &sig, u32 tid, - UnwindSignalStackCallbackType unwind, - const void *unwind_context) {} void SetAlternateSignalStack() {} void UnsetAlternateSignalStack() {} void InitTlsSize() {} @@ -106,42 +104,6 @@ bool SignalContext::IsStackOverflow() const { return false; } void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); } const char *SignalContext::Describe() const { UNIMPLEMENTED(); } -struct UnwindTraceArg { - BufferedStackTrace *stack; - u32 max_depth; -}; - -_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) { - UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param); - CHECK_LT(arg->stack->size, arg->max_depth); - uptr pc = _Unwind_GetIP(ctx); - if (pc < PAGE_SIZE) return _URC_NORMAL_STOP; - arg->stack->trace_buffer[arg->stack->size++] = pc; - return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP - : _URC_NO_REASON); -} - -void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) { - CHECK_GE(max_depth, 2); - size = 0; - UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)}; - _Unwind_Backtrace(Unwind_Trace, &arg); - CHECK_GT(size, 0); - // We need to pop a few frames so that pc is on top. - uptr to_pop = LocatePcInTrace(pc); - // trace_buffer[0] belongs to the current function so we always pop it, - // unless there is only 1 frame in the stack trace (1 frame is always better - // than 0!). - PopStackFrames(Min(to_pop, static_cast<uptr>(1))); - trace_buffer[0] = pc; -} - -void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context, - u32 max_depth) { - CHECK_NE(context, nullptr); - UNREACHABLE("signal context doesn't exist"); -} - enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 }; BlockingMutex::BlockingMutex() { @@ -161,7 +123,7 @@ void BlockingMutex::Lock() { return; while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) { zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), - MtxSleeping, ZX_TIME_INFINITE); + MtxSleeping, ZX_TIME_INFINITE_OLD); if (status != ZX_ERR_BAD_STATE) // Normal race. CHECK_EQ(status, ZX_OK); } @@ -212,8 +174,9 @@ static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type, // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that? uintptr_t addr; - status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, size, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); + status = + _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, size, + ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); _zx_handle_close(vmo); if (status != ZX_OK) { @@ -247,10 +210,10 @@ uptr ReservedAddressRange::Init(uptr init_size, const char *name, uintptr_t base; zx_handle_t vmar; zx_status_t status = - _zx_vmar_allocate(_zx_vmar_root_self(), 0, init_size, - ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE | - ZX_VM_FLAG_CAN_MAP_SPECIFIC, - &vmar, &base); + _zx_vmar_allocate_old(_zx_vmar_root_self(), 0, init_size, + ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE | + ZX_VM_FLAG_CAN_MAP_SPECIFIC, + &vmar, &base); if (status != ZX_OK) ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status); base_ = reinterpret_cast<void *>(base); @@ -272,11 +235,11 @@ static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size, ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status); return 0; } - _zx_object_set_property(vmo, ZX_PROP_NAME, name, sizeof(name) - 1); + _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name)); DCHECK_GE(base + size_, map_size + offset); uintptr_t addr; - status = _zx_vmar_map( + status = _zx_vmar_map_old( vmar, offset, vmo, 0, map_size, ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | ZX_VM_FLAG_SPECIFIC, &addr); @@ -316,20 +279,16 @@ void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) { DecreaseTotalMmap(size); } -void ReservedAddressRange::Unmap(uptr fixed_addr, uptr size) { - uptr offset = fixed_addr - reinterpret_cast<uptr>(base_); - uptr addr = reinterpret_cast<uptr>(base_) + offset; - void *addr_as_void = reinterpret_cast<void *>(addr); - uptr base_as_uptr = reinterpret_cast<uptr>(base_); - // Only unmap at the beginning or end of the range. - CHECK((addr_as_void == base_) || (addr + size == base_as_uptr + size_)); +void ReservedAddressRange::Unmap(uptr addr, uptr size) { CHECK_LE(size, size_); + if (addr == reinterpret_cast<uptr>(base_)) + // If we unmap the whole range, just null out the base. + base_ = (size == size_) ? nullptr : reinterpret_cast<void*>(addr + size); + else + CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_); + size_ -= size; UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, static_cast<zx_handle_t>(os_handle_)); - if (addr_as_void == base_) { - base_ = reinterpret_cast<void *>(addr + size); - } - size_ = size_ - size; } // This should never be called. @@ -361,8 +320,9 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, // beginning of the VMO, and unmap the excess before and after. size_t map_size = size + alignment; uintptr_t addr; - status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); + status = + _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, map_size, + ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); if (status == ZX_OK) { uintptr_t map_addr = addr; uintptr_t map_end = map_addr + map_size; @@ -374,11 +334,11 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, sizeof(info), NULL, NULL); if (status == ZX_OK) { uintptr_t new_addr; - status = - _zx_vmar_map(_zx_vmar_root_self(), addr - info.base, vmo, 0, size, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | - ZX_VM_FLAG_SPECIFIC_OVERWRITE, - &new_addr); + status = _zx_vmar_map_old(_zx_vmar_root_self(), addr - info.base, vmo, + 0, size, + ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | + ZX_VM_FLAG_SPECIFIC_OVERWRITE, + &new_addr); if (status == ZX_OK) CHECK_EQ(new_addr, addr); } } @@ -418,16 +378,7 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) { zx_handle_t vmo; zx_status_t status = _zx_vmo_create(size, 0, &vmo); if (status == ZX_OK) { - while (size > 0) { - size_t wrote; - status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size, - &wrote); - if (status != ZX_OK) break; - CHECK_GT(wrote, 0); - CHECK_LE(wrote, size); - beg += wrote; - size -= wrote; - } + status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size); _zx_handle_close(vmo); } return status == ZX_OK; @@ -447,8 +398,8 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, if (vmo_size < max_len) max_len = vmo_size; size_t map_size = RoundUpTo(max_len, PAGE_SIZE); uintptr_t addr; - status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size, - ZX_VM_FLAG_PERM_READ, &addr); + status = _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, map_size, + ZX_VM_FLAG_PERM_READ, &addr); if (status == ZX_OK) { *buff = reinterpret_cast<char *>(addr); *buff_size = map_size; @@ -462,7 +413,31 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, } void RawWrite(const char *buffer) { - __sanitizer_log_write(buffer, internal_strlen(buffer)); + constexpr size_t size = 128; + static _Thread_local char line[size]; + static _Thread_local size_t lastLineEnd = 0; + static _Thread_local size_t cur = 0; + + while (*buffer) { + if (cur >= size) { + if (lastLineEnd == 0) + lastLineEnd = size; + __sanitizer_log_write(line, lastLineEnd); + internal_memmove(line, line + lastLineEnd, cur - lastLineEnd); + cur = cur - lastLineEnd; + lastLineEnd = 0; + } + if (*buffer == '\n') + lastLineEnd = cur + 1; + line[cur++] = *buffer++; + } + // Flush all complete lines before returning. + if (lastLineEnd != 0) { + __sanitizer_log_write(line, lastLineEnd); + internal_memmove(line, line + lastLineEnd, cur - lastLineEnd); + cur = cur - lastLineEnd; + lastLineEnd = 0; + } } void CatastrophicErrorWrite(const char *buffer, uptr length) { @@ -486,8 +461,10 @@ const char *GetEnv(const char *name) { } uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) { - const char *argv0 = StoredArgv[0]; - if (!argv0) argv0 = "<UNKNOWN>"; + const char *argv0 = "<UNKNOWN>"; + if (StoredArgv && StoredArgv[0]) { + argv0 = StoredArgv[0]; + } internal_strncpy(buf, argv0, buf_len); return internal_strlen(buf); } @@ -500,9 +477,7 @@ uptr MainThreadStackBase, MainThreadStackSize; bool GetRandom(void *buffer, uptr length, bool blocking) { CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN); - size_t size; - CHECK_EQ(_zx_cprng_draw(buffer, length, &size), ZX_OK); - CHECK_EQ(size, length); + _zx_cprng_draw(buffer, length); return true; } diff --git a/lib/sanitizer_common/sanitizer_getauxval.h b/lib/sanitizer_common/sanitizer_getauxval.h index 934e311a7851..b22fcade5f9d 100644 --- a/lib/sanitizer_common/sanitizer_getauxval.h +++ b/lib/sanitizer_common/sanitizer_getauxval.h @@ -18,7 +18,7 @@ #include "sanitizer_platform.h" -#if SANITIZER_LINUX +#if SANITIZER_LINUX || SANITIZER_FUCHSIA # include <features.h> @@ -26,7 +26,8 @@ # define __GLIBC_PREREQ(x, y) 0 # endif -# if __GLIBC_PREREQ(2, 16) || (SANITIZER_ANDROID && __ANDROID_API__ >= 21) +# if __GLIBC_PREREQ(2, 16) || (SANITIZER_ANDROID && __ANDROID_API__ >= 21) || \ + SANITIZER_FUCHSIA # define SANITIZER_USE_GETAUXVAL 1 # else # define SANITIZER_USE_GETAUXVAL 0 @@ -42,6 +43,6 @@ extern "C" SANITIZER_WEAK_ATTRIBUTE unsigned long getauxval(unsigned long type); // NOLINT # endif -#endif // SANITIZER_LINUX +#endif // SANITIZER_LINUX || SANITIZER_FUCHSIA #endif // SANITIZER_GETAUXVAL_H diff --git a/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc b/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc new file mode 100644 index 000000000000..9a2a9f120684 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc @@ -0,0 +1,1489 @@ +//===-- sanitizer_interceptors_ioctl_netbsd.inc -----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Ioctl handling in common sanitizer interceptors. +//===----------------------------------------------------------------------===// + +#if SANITIZER_NETBSD + +#include "sanitizer_flags.h" + +struct ioctl_desc { + unsigned req; + // FIXME: support read+write arguments. Currently READWRITE and WRITE do the + // same thing. + // XXX: The declarations below may use WRITE instead of READWRITE, unless + // explicitly noted. + enum { NONE, READ, WRITE, READWRITE, CUSTOM } type : 3; + unsigned size : 29; + const char *name; +}; + +const unsigned ioctl_table_max = 1198; +static ioctl_desc ioctl_table[ioctl_table_max]; +static unsigned ioctl_table_size = 0; + +// This can not be declared as a global, because references to struct_*_sz +// require a global initializer. And this table must be available before global +// initializers are run. +static void ioctl_table_fill() { +#define _(rq, tp, sz) \ + if (IOCTL_##rq != IOCTL_NOT_PRESENT) { \ + CHECK(ioctl_table_size < ioctl_table_max); \ + ioctl_table[ioctl_table_size].req = IOCTL_##rq; \ + ioctl_table[ioctl_table_size].type = ioctl_desc::tp; \ + ioctl_table[ioctl_table_size].size = sz; \ + ioctl_table[ioctl_table_size].name = #rq; \ + ++ioctl_table_size; \ + } + + /* Entries from file: altq/altq_afmap.h */ + _(AFM_ADDFMAP, READWRITE, struct_atm_flowmap_sz); + _(AFM_DELFMAP, READWRITE, struct_atm_flowmap_sz); + _(AFM_CLEANFMAP, READWRITE, struct_atm_flowmap_sz); + _(AFM_GETFMAP, READWRITE, struct_atm_flowmap_sz); + /* Entries from file: altq/altq.h */ + _(ALTQGTYPE, READWRITE, struct_altqreq_sz); + _(ALTQTBRSET, READ, struct_tbrreq_sz); + _(ALTQTBRGET, READWRITE, struct_tbrreq_sz); + /* Entries from file: altq/altq_blue.h */ + _(BLUE_IF_ATTACH, READ, struct_blue_interface_sz); + _(BLUE_DISABLE, READ, struct_blue_interface_sz); + _(BLUE_CONFIG, READWRITE, struct_blue_conf_sz); + _(BLUE_GETSTATS, READWRITE, struct_blue_stats_sz); + /* Entries from file: altq/altq_cbq.h */ + _(CBQ_ENABLE, READ, struct_cbq_interface_sz); + _(CBQ_ADD_CLASS, READWRITE, struct_cbq_add_class_sz); + _(CBQ_DEL_CLASS, READ, struct_cbq_delete_class_sz); + _(CBQ_MODIFY_CLASS, READWRITE, struct_cbq_modify_class_sz); + _(CBQ_DEL_FILTER, READ, struct_cbq_delete_filter_sz); + _(CBQ_GETSTATS, READWRITE, struct_cbq_getstats_sz); + /* Entries from file: altq/altq_cdnr.h */ + _(CDNR_IF_DETACH, READ, struct_cdnr_interface_sz); + _(CDNR_ADD_FILTER, READWRITE, struct_cdnr_add_filter_sz); + _(CDNR_GETSTATS, READWRITE, struct_cdnr_get_stats_sz); + _(CDNR_ADD_ELEM, READWRITE, struct_cdnr_add_element_sz); + _(CDNR_DEL_ELEM, READ, struct_cdnr_delete_element_sz); + _(CDNR_ADD_TBM, READWRITE, struct_cdnr_add_tbmeter_sz); + _(CDNR_MOD_TBM, READ, struct_cdnr_modify_tbmeter_sz); + _(CDNR_TBM_STATS, READWRITE, struct_cdnr_tbmeter_stats_sz); + _(CDNR_ADD_TCM, READWRITE, struct_cdnr_add_trtcm_sz); + _(CDNR_MOD_TCM, READWRITE, struct_cdnr_modify_trtcm_sz); + _(CDNR_TCM_STATS, READWRITE, struct_cdnr_tcm_stats_sz); + _(CDNR_ADD_TSW, READWRITE, struct_cdnr_add_tswtcm_sz); + _(CDNR_MOD_TSW, READWRITE, struct_cdnr_modify_tswtcm_sz); + /* Entries from file: altq/altq_fifoq.h */ + _(FIFOQ_CONFIG, READWRITE, struct_fifoq_conf_sz); + _(FIFOQ_GETSTATS, READWRITE, struct_fifoq_getstats_sz); + /* Entries from file: altq/altq_hfsc.h */ + _(HFSC_CLEAR_HIERARCHY, READ, struct_hfsc_interface_sz); + _(HFSC_ADD_CLASS, READWRITE, struct_hfsc_add_class_sz); + _(HFSC_GETSTATS, READWRITE, struct_hfsc_class_stats_sz); + /* Entries from file: altq/altq_jobs.h */ + _(JOBS_IF_ATTACH, READ, struct_jobs_attach_sz); + _(JOBS_IF_DETACH, READ, struct_jobs_interface_sz); + _(JOBS_ENABLE, READ, struct_jobs_interface_sz); + _(JOBS_DISABLE, READ, struct_jobs_interface_sz); + _(JOBS_CLEAR, READ, struct_jobs_interface_sz); + _(JOBS_ADD_CLASS, READWRITE, struct_jobs_add_class_sz); + _(JOBS_MOD_CLASS, READ, struct_jobs_modify_class_sz); + /* Entries from file: altq/altq_priq.h */ + _(PRIQ_IF_ATTACH, READ, struct_priq_interface_sz); + _(PRIQ_CLEAR, READ, struct_priq_interface_sz); + _(PRIQ_ADD_CLASS, READWRITE, struct_priq_add_class_sz); + _(PRIQ_DEL_CLASS, READ, struct_priq_delete_class_sz); + _(PRIQ_MOD_CLASS, READ, struct_priq_modify_class_sz); + _(PRIQ_ADD_FILTER, READWRITE, struct_priq_add_filter_sz); + _(PRIQ_DEL_FILTER, READ, struct_priq_delete_filter_sz); + _(PRIQ_GETSTATS, READWRITE, struct_priq_class_stats_sz); + /* Entries from file: altq/altq_red.h */ + _(RED_CONFIG, READWRITE, struct_red_conf_sz); + _(RED_GETSTATS, READWRITE, struct_red_stats_sz); + _(RED_SETDEFAULTS, READ, struct_redparams_sz); + /* Entries from file: altq/altq_rio.h */ + _(RIO_CONFIG, READWRITE, struct_rio_conf_sz); + _(RIO_GETSTATS, READWRITE, struct_rio_stats_sz); + _(RIO_SETDEFAULTS, READ, struct_redparams_sz); + /* Entries from file: altq/altq_wfq.h */ + _(WFQ_CONFIG, READWRITE, struct_wfq_conf_sz); + _(WFQ_GET_QID, READWRITE, struct_wfq_getqid_sz); + _(WFQ_SET_WEIGHT, READWRITE, struct_wfq_setweight_sz); + /* Entries from file: crypto/cryptodev.h */ + _(CRIOGET, READWRITE, sizeof(u32)); + _(CIOCFSESSION, READ, sizeof(u32)); + _(CIOCKEY, READWRITE, struct_crypt_kop_sz); + _(CIOCNFKEYM, READWRITE, struct_crypt_mkop_sz); + _(CIOCNFSESSION, READ, struct_crypt_sfop_sz); + _(CIOCNCRYPTRETM, READWRITE, struct_cryptret_sz); + _(CIOCNCRYPTRET, READWRITE, struct_crypt_result_sz); + _(CIOCGSESSION, READWRITE, struct_session_op_sz); + _(CIOCNGSESSION, READWRITE, struct_crypt_sgop_sz); + _(CIOCCRYPT, READWRITE, struct_crypt_op_sz); + _(CIOCNCRYPTM, READWRITE, struct_crypt_mop_sz); + _(CIOCASYMFEAT, WRITE, sizeof(u32)); + /* Entries from file: dev/apm/apmio.h */ + _(APM_IOC_REJECT, READ, struct_apm_event_info_sz); + _(OAPM_IOC_GETPOWER, WRITE, struct_apm_power_info_sz); + _(APM_IOC_GETPOWER, READWRITE, struct_apm_power_info_sz); + _(APM_IOC_NEXTEVENT, WRITE, struct_apm_event_info_sz); + _(APM_IOC_DEV_CTL, READ, struct_apm_ctl_sz); + /* Entries from file: dev/dm/netbsd-dm.h */ + _(NETBSD_DM_IOCTL, READWRITE, struct_plistref_sz); + /* Entries from file: dev/dmover/dmover_io.h */ + _(DMIO_SETFUNC, READ, struct_dmio_setfunc_sz); + /* Entries from file: dev/dtv/dtvio_demux.h */ + _(DMX_START, NONE, 0); + _(DMX_STOP, NONE, 0); + _(DMX_SET_FILTER, READ, struct_dmx_sct_filter_params_sz); + _(DMX_SET_PES_FILTER, READ, struct_dmx_pes_filter_params_sz); + _(DMX_SET_BUFFER_SIZE, NONE, 0); + _(DMX_GET_STC, READWRITE, struct_dmx_stc_sz); + _(DMX_ADD_PID, READ, sizeof(u16)); + _(DMX_REMOVE_PID, READ, sizeof(u16)); + _(DMX_GET_CAPS, WRITE, struct_dmx_caps_sz); + _(DMX_SET_SOURCE, READ, enum_dmx_source_sz); + /* Entries from file: dev/dtv/dtvio_frontend.h */ + _(FE_READ_STATUS, WRITE, enum_fe_status_sz); + _(FE_READ_BER, WRITE, sizeof(u32)); + _(FE_READ_SNR, WRITE, sizeof(u16)); + _(FE_READ_SIGNAL_STRENGTH, WRITE, sizeof(u16)); + _(FE_READ_UNCORRECTED_BLOCKS, WRITE, sizeof(u32)); + _(FE_SET_FRONTEND, READWRITE, struct_dvb_frontend_parameters_sz); + _(FE_GET_FRONTEND, WRITE, struct_dvb_frontend_parameters_sz); + _(FE_GET_EVENT, WRITE, struct_dvb_frontend_event_sz); + _(FE_GET_INFO, WRITE, struct_dvb_frontend_info_sz); + _(FE_DISEQC_RESET_OVERLOAD, NONE, 0); + _(FE_DISEQC_SEND_MASTER_CMD, READ, struct_dvb_diseqc_master_cmd_sz); + _(FE_DISEQC_RECV_SLAVE_REPLY, WRITE, struct_dvb_diseqc_slave_reply_sz); + _(FE_DISEQC_SEND_BURST, READ, enum_fe_sec_mini_cmd_sz); + _(FE_SET_TONE, READ, enum_fe_sec_tone_mode_sz); + _(FE_SET_VOLTAGE, READ, enum_fe_sec_voltage_sz); + _(FE_ENABLE_HIGH_LNB_VOLTAGE, READ, sizeof(int)); + _(FE_SET_FRONTEND_TUNE_MODE, READ, sizeof(unsigned int)); + _(FE_DISHNETWORK_SEND_LEGACY_CMD, READ, sizeof(unsigned long)); + /* Entries from file: dev/filemon/filemon.h */ + _(FILEMON_SET_FD, READWRITE, sizeof(int)); + _(FILEMON_SET_PID, READWRITE, sizeof(int)); + /* Entries from file: dev/hdaudio/hdaudioio.h */ + _(HDAUDIO_FGRP_INFO, READWRITE, struct_plistref_sz); + _(HDAUDIO_FGRP_GETCONFIG, READWRITE, struct_plistref_sz); + _(HDAUDIO_FGRP_SETCONFIG, READWRITE, struct_plistref_sz); + _(HDAUDIO_FGRP_WIDGET_INFO, READWRITE, struct_plistref_sz); + _(HDAUDIO_FGRP_CODEC_INFO, READWRITE, struct_plistref_sz); + _(HDAUDIO_AFG_WIDGET_INFO, READWRITE, struct_plistref_sz); + _(HDAUDIO_AFG_CODEC_INFO, READWRITE, struct_plistref_sz); + /* Entries from file: dev/hdmicec/hdmicecio.h */ + _(CEC_GET_PHYS_ADDR, WRITE, sizeof(u16)); + _(CEC_GET_LOG_ADDRS, WRITE, sizeof(u16)); + _(CEC_SET_LOG_ADDRS, READ, sizeof(u16)); + _(CEC_GET_VENDOR_ID, WRITE, sizeof(u32)); + /* Entries from file: dev/hpc/hpcfbio.h */ + _(HPCFBIO_GCONF, READWRITE, struct_hpcfb_fbconf_sz); + _(HPCFBIO_SCONF, READ, struct_hpcfb_fbconf_sz); + _(HPCFBIO_GDSPCONF, READWRITE, struct_hpcfb_dspconf_sz); + _(HPCFBIO_SDSPCONF, READ, struct_hpcfb_dspconf_sz); + _(HPCFBIO_GOP, WRITE, struct_hpcfb_dsp_op_sz); + _(HPCFBIO_SOP, READWRITE, struct_hpcfb_dsp_op_sz); + /* Entries from file: dev/i2o/iopio.h */ + _(IOPIOCPT, READWRITE, struct_ioppt_sz); + _(IOPIOCGLCT, READWRITE, struct_iovec_sz); + _(IOPIOCGSTATUS, READWRITE, struct_iovec_sz); + _(IOPIOCRECONFIG, NONE, 0); + _(IOPIOCGTIDMAP, READWRITE, struct_iovec_sz); + /* Entries from file: dev/ic/athioctl.h */ + _(SIOCGATHSTATS, READWRITE, struct_ifreq_sz); + _(SIOCGATHDIAG, READWRITE, struct_ath_diag_sz); + /* Entries from file: dev/ic/bt8xx.h */ + _(METEORCAPTUR, READ, sizeof(int)); + _(METEORCAPFRM, READ, struct_meteor_capframe_sz); + _(METEORSETGEO, READ, struct_meteor_geomet_sz); + _(METEORGETGEO, WRITE, struct_meteor_geomet_sz); + _(METEORSTATUS, WRITE, sizeof(unsigned short)); + _(METEORSHUE, READ, sizeof(signed char)); + _(METEORGHUE, WRITE, sizeof(signed char)); + _(METEORSFMT, READ, sizeof(unsigned int)); + _(METEORGFMT, WRITE, sizeof(unsigned int)); + _(METEORSINPUT, READ, sizeof(unsigned int)); + _(METEORGINPUT, WRITE, sizeof(unsigned int)); + _(METEORSCHCV, READ, sizeof(unsigned char)); + _(METEORGCHCV, WRITE, sizeof(unsigned char)); + _(METEORSCOUNT, READ, struct_meteor_counts_sz); + _(METEORGCOUNT, WRITE, struct_meteor_counts_sz); + _(METEORSFPS, READ, sizeof(unsigned short)); + _(METEORGFPS, WRITE, sizeof(unsigned short)); + _(METEORSSIGNAL, READ, sizeof(unsigned int)); + _(METEORGSIGNAL, WRITE, sizeof(unsigned int)); + _(METEORSVIDEO, READ, struct_meteor_video_sz); + _(METEORGVIDEO, WRITE, struct_meteor_video_sz); + _(METEORSBRIG, READ, sizeof(unsigned char)); + _(METEORGBRIG, WRITE, sizeof(unsigned char)); + _(METEORSCSAT, READ, sizeof(unsigned char)); + _(METEORGCSAT, WRITE, sizeof(unsigned char)); + _(METEORSCONT, READ, sizeof(unsigned char)); + _(METEORGCONT, WRITE, sizeof(unsigned char)); + _(METEORSHWS, READ, sizeof(unsigned char)); + _(METEORGHWS, WRITE, sizeof(unsigned char)); + _(METEORSVWS, READ, sizeof(unsigned char)); + _(METEORGVWS, WRITE, sizeof(unsigned char)); + _(METEORSTS, READ, sizeof(unsigned char)); + _(METEORGTS, WRITE, sizeof(unsigned char)); + _(TVTUNER_SETCHNL, READ, sizeof(unsigned int)); + _(TVTUNER_GETCHNL, WRITE, sizeof(unsigned int)); + _(TVTUNER_SETTYPE, READ, sizeof(unsigned int)); + _(TVTUNER_GETTYPE, WRITE, sizeof(unsigned int)); + _(TVTUNER_GETSTATUS, WRITE, sizeof(unsigned int)); + _(TVTUNER_SETFREQ, READ, sizeof(unsigned int)); + _(TVTUNER_GETFREQ, WRITE, sizeof(unsigned int)); + _(TVTUNER_SETAFC, READ, sizeof(int)); + _(TVTUNER_GETAFC, WRITE, sizeof(int)); + _(RADIO_SETMODE, READ, sizeof(unsigned int)); + _(RADIO_GETMODE, WRITE, sizeof(unsigned char)); + _(RADIO_SETFREQ, READ, sizeof(unsigned int)); + _(RADIO_GETFREQ, WRITE, sizeof(unsigned int)); + _(METEORSACTPIXFMT, READ, sizeof(int)); + _(METEORGACTPIXFMT, WRITE, sizeof(int)); + _(METEORGSUPPIXFMT, READWRITE, struct_meteor_pixfmt_sz); + _(TVTUNER_GETCHNLSET, READWRITE, struct_bktr_chnlset_sz); + _(REMOTE_GETKEY, WRITE, struct_bktr_remote_sz); + /* Entries from file: dev/ic/icp_ioctl.h */ + _(GDT_IOCTL_GENERAL, READWRITE, struct_gdt_ucmd_sz); + _(GDT_IOCTL_DRVERS, WRITE, sizeof(int)); + _(GDT_IOCTL_CTRTYPE, READWRITE, struct_gdt_ctrt_sz); + _(GDT_IOCTL_OSVERS, WRITE, struct_gdt_osv_sz); + _(GDT_IOCTL_CTRCNT, WRITE, sizeof(int)); + _(GDT_IOCTL_EVENT, READWRITE, struct_gdt_event_sz); + _(GDT_IOCTL_STATIST, WRITE, struct_gdt_statist_sz); + _(GDT_IOCTL_RESCAN, READWRITE, struct_gdt_rescan_sz); + /* Entries from file: dev/ic/isp_ioctl.h */ + _(ISP_SDBLEV, READWRITE, sizeof(int)); + _(ISP_RESETHBA, NONE, 0); + _(ISP_RESCAN, NONE, 0); + _(ISP_SETROLE, READWRITE, sizeof(int)); + _(ISP_GETROLE, WRITE, sizeof(int)); + _(ISP_GET_STATS, WRITE, struct_isp_stats_sz); + _(ISP_CLR_STATS, NONE, 0); + _(ISP_FC_LIP, NONE, 0); + _(ISP_FC_GETDINFO, READWRITE, struct_isp_fc_device_sz); + _(ISP_GET_FW_CRASH_DUMP, NONE, 0); + _(ISP_FORCE_CRASH_DUMP, NONE, 0); + _(ISP_FC_GETHINFO, READWRITE, struct_isp_hba_device_sz); + _(ISP_TSK_MGMT, READWRITE, struct_isp_fc_tsk_mgmt_sz); + _(ISP_FC_GETDLIST, NONE, 0); + /* Entries from file: dev/ic/mlxio.h */ + _(MLXD_STATUS, WRITE, sizeof(int)); + _(MLXD_CHECKASYNC, WRITE, sizeof(int)); + _(MLXD_DETACH, READ, sizeof(int)); + _(MLX_RESCAN_DRIVES, NONE, 0); + _(MLX_PAUSE_CHANNEL, READ, struct_mlx_pause_sz); + _(MLX_COMMAND, READWRITE, struct_mlx_usercommand_sz); + _(MLX_REBUILDASYNC, READWRITE, struct_mlx_rebuild_request_sz); + _(MLX_REBUILDSTAT, WRITE, struct_mlx_rebuild_status_sz); + _(MLX_GET_SYSDRIVE, READWRITE, sizeof(int)); + _(MLX_GET_CINFO, WRITE, struct_mlx_cinfo_sz); + /* Entries from file: dev/ic/nvmeio.h */ + _(NVME_PASSTHROUGH_CMD, READWRITE, struct_nvme_pt_command_sz); + /* Entries from file: dev/ir/irdaio.h */ + _(IRDA_RESET_PARAMS, NONE, 0); + _(IRDA_SET_PARAMS, READ, struct_irda_params_sz); + _(IRDA_GET_SPEEDMASK, WRITE, sizeof(unsigned int)); + _(IRDA_GET_TURNAROUNDMASK, WRITE, sizeof(unsigned int)); + _(IRFRAMETTY_GET_DEVICE, WRITE, sizeof(unsigned int)); + _(IRFRAMETTY_GET_DONGLE, WRITE, sizeof(unsigned int)); + _(IRFRAMETTY_SET_DONGLE, READ, sizeof(unsigned int)); + /* Entries from file: dev/isa/satlinkio.h */ + _(SATIORESET, NONE, 0); + _(SATIOGID, WRITE, struct_satlink_id_sz); + /* Entries from file: dev/isa/isvio.h */ + _(ISV_CMD, READWRITE, struct_isv_cmd_sz); + /* Entries from file: dev/isa/wtreg.h */ + _(WTQICMD, NONE, 0); + /* Entries from file: dev/iscsi/iscsi_ioctl.h */ + _(ISCSI_GET_VERSION, READWRITE, struct_iscsi_get_version_parameters_sz); + _(ISCSI_LOGIN, READWRITE, struct_iscsi_login_parameters_sz); + _(ISCSI_LOGOUT, READWRITE, struct_iscsi_logout_parameters_sz); + _(ISCSI_ADD_CONNECTION, READWRITE, struct_iscsi_login_parameters_sz); + _(ISCSI_RESTORE_CONNECTION, READWRITE, struct_iscsi_login_parameters_sz); + _(ISCSI_REMOVE_CONNECTION, READWRITE, struct_iscsi_remove_parameters_sz); + _(ISCSI_CONNECTION_STATUS, READWRITE, struct_iscsi_conn_status_parameters_sz); + _(ISCSI_SEND_TARGETS, READWRITE, struct_iscsi_send_targets_parameters_sz); + _(ISCSI_SET_NODE_NAME, READWRITE, struct_iscsi_set_node_name_parameters_sz); + _(ISCSI_IO_COMMAND, READWRITE, struct_iscsi_iocommand_parameters_sz); + _(ISCSI_REGISTER_EVENT, READWRITE, struct_iscsi_register_event_parameters_sz); + _(ISCSI_DEREGISTER_EVENT, READWRITE, + struct_iscsi_register_event_parameters_sz); + _(ISCSI_WAIT_EVENT, READWRITE, struct_iscsi_wait_event_parameters_sz); + _(ISCSI_POLL_EVENT, READWRITE, struct_iscsi_wait_event_parameters_sz); + /* Entries from file: dev/ofw/openfirmio.h */ + _(OFIOCGET, READWRITE, struct_ofiocdesc_sz); + _(OFIOCSET, READ, struct_ofiocdesc_sz); + _(OFIOCNEXTPROP, READWRITE, struct_ofiocdesc_sz); + _(OFIOCGETOPTNODE, WRITE, sizeof(int)); + _(OFIOCGETNEXT, READWRITE, sizeof(int)); + _(OFIOCGETCHILD, READWRITE, sizeof(int)); + _(OFIOCFINDDEVICE, READWRITE, struct_ofiocdesc_sz); + /* Entries from file: dev/pci/amrio.h */ + _(AMR_IO_VERSION, WRITE, sizeof(int)); + _(AMR_IO_COMMAND, READWRITE, struct_amr_user_ioctl_sz); + /* Entries from file: dev/pci/mlyio.h */ + _(MLYIO_COMMAND, READWRITE, struct_mly_user_command_sz); + _(MLYIO_HEALTH, READ, struct_mly_user_health_sz); + /* Entries from file: dev/pci/pciio.h */ + _(PCI_IOC_CFGREAD, READWRITE, struct_pciio_cfgreg_sz); + _(PCI_IOC_CFGWRITE, READ, struct_pciio_cfgreg_sz); + _(PCI_IOC_BDF_CFGREAD, READWRITE, struct_pciio_bdf_cfgreg_sz); + _(PCI_IOC_BDF_CFGWRITE, READ, struct_pciio_bdf_cfgreg_sz); + _(PCI_IOC_BUSINFO, WRITE, struct_pciio_businfo_sz); + _(PCI_IOC_DRVNAME, READWRITE, struct_pciio_drvname_sz); + _(PCI_IOC_DRVNAMEONBUS, READWRITE, struct_pciio_drvnameonbus_sz); + /* Entries from file: dev/pci/tweio.h */ + _(TWEIO_COMMAND, READWRITE, struct_twe_usercommand_sz); + _(TWEIO_STATS, READWRITE, union_twe_statrequest_sz); + _(TWEIO_AEN_POLL, WRITE, sizeof(int)); + _(TWEIO_AEN_WAIT, WRITE, sizeof(int)); + _(TWEIO_SET_PARAM, READ, struct_twe_paramcommand_sz); + _(TWEIO_GET_PARAM, READ, struct_twe_paramcommand_sz); + _(TWEIO_RESET, NONE, 0); + _(TWEIO_ADD_UNIT, READ, struct_twe_drivecommand_sz); + _(TWEIO_DEL_UNIT, READ, struct_twe_drivecommand_sz); + /* Entries from file: dev/pcmcia/if_cnwioctl.h */ + _(SIOCSCNWDOMAIN, READ, struct_ifreq_sz); + _(SIOCGCNWDOMAIN, READWRITE, struct_ifreq_sz); + _(SIOCSCNWKEY, READWRITE, struct_ifreq_sz); + _(SIOCGCNWSTATUS, READWRITE, struct_cnwstatus_sz); + _(SIOCGCNWSTATS, READWRITE, struct_cnwistats_sz); + _(SIOCGCNWTRAIL, READWRITE, struct_cnwitrail_sz); + /* Entries from file: dev/pcmcia/if_rayreg.h */ + _(SIOCGRAYSIGLEV, READWRITE, struct_ifreq_sz); + /* Entries from file: dev/raidframe/raidframeio.h */ + _(RAIDFRAME_SHUTDOWN, NONE, 0); + _(RAIDFRAME_TUR, READ, sizeof(u64)); + _(RAIDFRAME_FAIL_DISK, READ, struct_rf_recon_req_sz); + _(RAIDFRAME_CHECK_RECON_STATUS, READWRITE, sizeof(int)); + _(RAIDFRAME_REWRITEPARITY, NONE, 0); + _(RAIDFRAME_COPYBACK, NONE, 0); + _(RAIDFRAME_SPARET_WAIT, WRITE, struct_RF_SparetWait_sz); + _(RAIDFRAME_SEND_SPARET, READ, sizeof(uptr)); + _(RAIDFRAME_ABORT_SPARET_WAIT, NONE, 0); + _(RAIDFRAME_START_ATRACE, NONE, 0); + _(RAIDFRAME_STOP_ATRACE, NONE, 0); + _(RAIDFRAME_GET_SIZE, WRITE, sizeof(int)); + _(RAIDFRAME_RESET_ACCTOTALS, NONE, 0); + _(RAIDFRAME_KEEP_ACCTOTALS, READ, sizeof(int)); + _(RAIDFRAME_GET_COMPONENT_LABEL, READWRITE, struct_RF_ComponentLabel_sz); + _(RAIDFRAME_SET_COMPONENT_LABEL, READ, struct_RF_ComponentLabel_sz); + _(RAIDFRAME_INIT_LABELS, READ, struct_RF_ComponentLabel_sz); + _(RAIDFRAME_ADD_HOT_SPARE, READ, struct_RF_SingleComponent_sz); + _(RAIDFRAME_REMOVE_HOT_SPARE, READ, struct_RF_SingleComponent_sz); + _(RAIDFRAME_REBUILD_IN_PLACE, READ, struct_RF_SingleComponent_sz); + _(RAIDFRAME_CHECK_PARITY, READWRITE, sizeof(int)); + _(RAIDFRAME_CHECK_PARITYREWRITE_STATUS, READWRITE, sizeof(int)); + _(RAIDFRAME_CHECK_COPYBACK_STATUS, READWRITE, sizeof(int)); + _(RAIDFRAME_SET_AUTOCONFIG, READWRITE, sizeof(int)); + _(RAIDFRAME_SET_ROOT, READWRITE, sizeof(int)); + _(RAIDFRAME_DELETE_COMPONENT, READ, struct_RF_SingleComponent_sz); + _(RAIDFRAME_INCORPORATE_HOT_SPARE, READ, struct_RF_SingleComponent_sz); + _(RAIDFRAME_CHECK_RECON_STATUS_EXT, READWRITE, struct_RF_ProgressInfo_sz); + _(RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT, READWRITE, + struct_RF_ProgressInfo_sz); + _(RAIDFRAME_CHECK_COPYBACK_STATUS_EXT, READWRITE, struct_RF_ProgressInfo_sz); + _(RAIDFRAME_PARITYMAP_STATUS, WRITE, struct_rf_pmstat_sz); + _(RAIDFRAME_PARITYMAP_GET_DISABLE, WRITE, sizeof(int)); + _(RAIDFRAME_PARITYMAP_SET_DISABLE, READ, sizeof(int)); + _(RAIDFRAME_PARITYMAP_SET_PARAMS, READ, struct_rf_pmparams_sz); + _(RAIDFRAME_SET_LAST_UNIT, READ, sizeof(int)); + _(RAIDFRAME_GET_INFO, READWRITE, sizeof(uptr)); + _(RAIDFRAME_CONFIGURE, READ, sizeof(uptr)); + /* Entries from file: dev/sbus/mbppio.h */ + _(MBPPIOCSPARAM, READ, struct_mbpp_param_sz); + _(MBPPIOCGPARAM, WRITE, struct_mbpp_param_sz); + _(MBPPIOCGSTAT, WRITE, sizeof(int)); + /* Entries from file: dev/scsipi/ses.h */ + _(SESIOC_GETNOBJ, NONE, 0); + _(SESIOC_GETOBJMAP, NONE, 0); + _(SESIOC_GETENCSTAT, NONE, 0); + _(SESIOC_SETENCSTAT, NONE, 0); + _(SESIOC_GETOBJSTAT, NONE, 0); + _(SESIOC_SETOBJSTAT, NONE, 0); + _(SESIOC_GETTEXT, NONE, 0); + _(SESIOC_INIT, NONE, 0); + /* Entries from file: dev/sun/disklabel.h */ + _(SUN_DKIOCGGEOM, WRITE, struct_sun_dkgeom_sz); + _(SUN_DKIOCINFO, WRITE, struct_sun_dkctlr_sz); + _(SUN_DKIOCGPART, WRITE, struct_sun_dkpart_sz); + /* Entries from file: dev/sun/fbio.h */ + _(FBIOGTYPE, WRITE, struct_fbtype_sz); + _(FBIOPUTCMAP, READ, struct_fbcmap_sz); + _(FBIOGETCMAP, READ, struct_fbcmap_sz); + _(FBIOGATTR, WRITE, struct_fbgattr_sz); + _(FBIOSVIDEO, READ, sizeof(int)); + _(FBIOGVIDEO, WRITE, sizeof(int)); + _(FBIOSCURSOR, READ, struct_fbcursor_sz); + _(FBIOGCURSOR, READWRITE, struct_fbcursor_sz); + _(FBIOSCURPOS, READ, struct_fbcurpos_sz); + _(FBIOGCURPOS, READ, struct_fbcurpos_sz); + _(FBIOGCURMAX, WRITE, struct_fbcurpos_sz); + /* Entries from file: dev/sun/kbio.h */ + _(KIOCTRANS, READ, sizeof(int)); + _(KIOCSETKEY, READWRITE, struct_okiockey_sz); + _(KIOCGETKEY, READWRITE, struct_okiockey_sz); + _(KIOCGTRANS, WRITE, sizeof(int)); + _(KIOCCMD, READ, sizeof(int)); + _(KIOCTYPE, WRITE, sizeof(int)); + _(KIOCSDIRECT, READ, sizeof(int)); + _(KIOCSKEY, READ, struct_kiockeymap_sz); + _(KIOCGKEY, READWRITE, struct_kiockeymap_sz); + _(KIOCSLED, READ, sizeof(char)); + _(KIOCGLED, WRITE, sizeof(char)); + _(KIOCLAYOUT, WRITE, sizeof(int)); + /* Entries from file: dev/sun/vuid_event.h */ + _(VUIDSFORMAT, READ, sizeof(int)); + _(VUIDGFORMAT, WRITE, sizeof(int)); + /* Entries from file: dev/tc/sticio.h */ + _(STICIO_GXINFO, WRITE, struct_stic_xinfo_sz); + _(STICIO_RESET, NONE, 0); + _(STICIO_STARTQ, NONE, 0); + _(STICIO_STOPQ, NONE, 0); + /* Entries from file: dev/usb/ukyopon.h */ + _(UKYOPON_IDENTIFY, WRITE, struct_ukyopon_identify_sz); + /* Entries from file: dev/usb/urio.h */ + _(URIO_SEND_COMMAND, READWRITE, struct_urio_command_sz); + _(URIO_RECV_COMMAND, READWRITE, struct_urio_command_sz); + /* Entries from file: dev/usb/usb.h */ + _(USB_REQUEST, READWRITE, struct_usb_ctl_request_sz); + _(USB_SETDEBUG, READ, sizeof(int)); + _(USB_DISCOVER, NONE, 0); + _(USB_DEVICEINFO, READWRITE, struct_usb_device_info_sz); + _(USB_DEVICEINFO_OLD, READWRITE, struct_usb_device_info_old_sz); + _(USB_DEVICESTATS, WRITE, struct_usb_device_stats_sz); + _(USB_GET_REPORT_DESC, WRITE, struct_usb_ctl_report_desc_sz); + _(USB_SET_IMMED, READ, sizeof(int)); + _(USB_GET_REPORT, READWRITE, struct_usb_ctl_report_sz); + _(USB_SET_REPORT, READ, struct_usb_ctl_report_sz); + _(USB_GET_REPORT_ID, WRITE, sizeof(int)); + _(USB_GET_CONFIG, WRITE, sizeof(int)); + _(USB_SET_CONFIG, READ, sizeof(int)); + _(USB_GET_ALTINTERFACE, READWRITE, struct_usb_alt_interface_sz); + _(USB_SET_ALTINTERFACE, READWRITE, struct_usb_alt_interface_sz); + _(USB_GET_NO_ALT, READWRITE, struct_usb_alt_interface_sz); + _(USB_GET_DEVICE_DESC, WRITE, struct_usb_device_descriptor_sz); + _(USB_GET_CONFIG_DESC, READWRITE, struct_usb_config_desc_sz); + _(USB_GET_INTERFACE_DESC, READWRITE, struct_usb_interface_desc_sz); + _(USB_GET_ENDPOINT_DESC, READWRITE, struct_usb_endpoint_desc_sz); + _(USB_GET_FULL_DESC, READWRITE, struct_usb_full_desc_sz); + _(USB_GET_STRING_DESC, READWRITE, struct_usb_string_desc_sz); + _(USB_DO_REQUEST, READWRITE, struct_usb_ctl_request_sz); + _(USB_GET_DEVICEINFO, WRITE, struct_usb_device_info_sz); + _(USB_GET_DEVICEINFO_OLD, WRITE, struct_usb_device_info_old_sz); + _(USB_SET_SHORT_XFER, READ, sizeof(int)); + _(USB_SET_TIMEOUT, READ, sizeof(int)); + _(USB_SET_BULK_RA, READ, sizeof(int)); + _(USB_SET_BULK_WB, READ, sizeof(int)); + _(USB_SET_BULK_RA_OPT, READ, struct_usb_bulk_ra_wb_opt_sz); + _(USB_SET_BULK_WB_OPT, READ, struct_usb_bulk_ra_wb_opt_sz); + _(USB_GET_CM_OVER_DATA, WRITE, sizeof(int)); + _(USB_SET_CM_OVER_DATA, READ, sizeof(int)); + /* Entries from file: dev/usb/utoppy.h */ + _(UTOPPYIOTURBO, READ, sizeof(int)); + _(UTOPPYIOREBOOT, NONE, 0); + _(UTOPPYIOSTATS, WRITE, struct_utoppy_stats_sz); + _(UTOPPYIORENAME, READ, struct_utoppy_rename_sz); + _(UTOPPYIOMKDIR, READ, sizeof(uptr)); + _(UTOPPYIODELETE, READ, sizeof(uptr)); + _(UTOPPYIOREADDIR, READ, sizeof(uptr)); + _(UTOPPYIOREADFILE, READ, struct_utoppy_readfile_sz); + _(UTOPPYIOWRITEFILE, READ, struct_utoppy_writefile_sz); + /* Entries from file: dev/vme/xio.h */ + _(DIOSXDCMD, READWRITE, struct_xd_iocmd_sz); + /* Entries from file: dev/wscons/wsdisplay_usl_io.h */ + _(VT_OPENQRY, WRITE, sizeof(int)); + _(VT_SETMODE, READ, struct_vt_mode_sz); + _(VT_GETMODE, WRITE, struct_vt_mode_sz); + _(VT_RELDISP, NONE, 0); + _(VT_ACTIVATE, NONE, 0); + _(VT_WAITACTIVE, NONE, 0); + _(VT_GETACTIVE, WRITE, sizeof(int)); + _(VT_GETSTATE, WRITE, struct_vt_stat_sz); + _(KDGETKBENT, READWRITE, struct_kbentry_sz); + _(KDGKBMODE, WRITE, sizeof(int)); + _(KDSKBMODE, NONE, 0); + _(KDMKTONE, NONE, 0); + _(KDSETMODE, NONE, 0); + _(KDENABIO, NONE, 0); + _(KDDISABIO, NONE, 0); + _(KDGKBTYPE, WRITE, sizeof(char)); + _(KDGETLED, WRITE, sizeof(int)); + _(KDSETLED, NONE, 0); + _(KDSETRAD, NONE, 0); + _(VGAPCVTID, READWRITE, struct_pcvtid_sz); + _(CONS_GETVERS, WRITE, sizeof(int)); + /* Entries from file: dev/wscons/wsconsio.h */ + _(WSKBDIO_GTYPE, WRITE, sizeof(unsigned int)); + _(WSKBDIO_BELL, NONE, 0); + _(WSKBDIO_COMPLEXBELL, READ, struct_wskbd_bell_data_sz); + _(WSKBDIO_SETBELL, READ, struct_wskbd_bell_data_sz); + _(WSKBDIO_GETBELL, WRITE, struct_wskbd_bell_data_sz); + _(WSKBDIO_SETDEFAULTBELL, READ, struct_wskbd_bell_data_sz); + _(WSKBDIO_GETDEFAULTBELL, WRITE, struct_wskbd_bell_data_sz); + _(WSKBDIO_SETKEYREPEAT, READ, struct_wskbd_keyrepeat_data_sz); + _(WSKBDIO_GETKEYREPEAT, WRITE, struct_wskbd_keyrepeat_data_sz); + _(WSKBDIO_SETDEFAULTKEYREPEAT, READ, struct_wskbd_keyrepeat_data_sz); + _(WSKBDIO_GETDEFAULTKEYREPEAT, WRITE, struct_wskbd_keyrepeat_data_sz); + _(WSKBDIO_SETLEDS, READ, sizeof(int)); + _(WSKBDIO_GETLEDS, WRITE, sizeof(int)); + _(WSKBDIO_GETMAP, READWRITE, struct_wskbd_map_data_sz); + _(WSKBDIO_SETMAP, READ, struct_wskbd_map_data_sz); + _(WSKBDIO_GETENCODING, WRITE, sizeof(int)); + _(WSKBDIO_SETENCODING, READ, sizeof(int)); + _(WSKBDIO_SETMODE, READ, sizeof(int)); + _(WSKBDIO_GETMODE, WRITE, sizeof(int)); + _(WSKBDIO_SETKEYCLICK, READ, sizeof(int)); + _(WSKBDIO_GETKEYCLICK, WRITE, sizeof(int)); + _(WSKBDIO_GETSCROLL, WRITE, struct_wskbd_scroll_data_sz); + _(WSKBDIO_SETSCROLL, READ, struct_wskbd_scroll_data_sz); + _(WSKBDIO_SETVERSION, READ, sizeof(int)); + _(WSMOUSEIO_GTYPE, WRITE, sizeof(unsigned int)); + _(WSMOUSEIO_SRES, READ, sizeof(unsigned int)); + _(WSMOUSEIO_SSCALE, READ, sizeof(unsigned int)); + _(WSMOUSEIO_SRATE, READ, sizeof(unsigned int)); + _(WSMOUSEIO_SCALIBCOORDS, READ, struct_wsmouse_calibcoords_sz); + _(WSMOUSEIO_GCALIBCOORDS, WRITE, struct_wsmouse_calibcoords_sz); + _(WSMOUSEIO_GETID, READWRITE, struct_wsmouse_id_sz); + _(WSMOUSEIO_GETREPEAT, WRITE, struct_wsmouse_repeat_sz); + _(WSMOUSEIO_SETREPEAT, READ, struct_wsmouse_repeat_sz); + _(WSMOUSEIO_SETVERSION, READ, sizeof(int)); + _(WSDISPLAYIO_GTYPE, WRITE, sizeof(unsigned int)); + _(WSDISPLAYIO_GINFO, WRITE, struct_wsdisplay_fbinfo_sz); + _(WSDISPLAYIO_GETCMAP, READ, struct_wsdisplay_cmap_sz); + _(WSDISPLAYIO_PUTCMAP, READ, struct_wsdisplay_cmap_sz); + _(WSDISPLAYIO_GVIDEO, WRITE, sizeof(unsigned int)); + _(WSDISPLAYIO_SVIDEO, READ, sizeof(unsigned int)); + _(WSDISPLAYIO_GCURPOS, WRITE, struct_wsdisplay_curpos_sz); + _(WSDISPLAYIO_SCURPOS, READ, struct_wsdisplay_curpos_sz); + _(WSDISPLAYIO_GCURMAX, WRITE, struct_wsdisplay_curpos_sz); + _(WSDISPLAYIO_GCURSOR, READWRITE, struct_wsdisplay_cursor_sz); + _(WSDISPLAYIO_SCURSOR, READ, struct_wsdisplay_cursor_sz); + _(WSDISPLAYIO_GMODE, WRITE, sizeof(unsigned int)); + _(WSDISPLAYIO_SMODE, READ, sizeof(unsigned int)); + _(WSDISPLAYIO_LDFONT, READ, struct_wsdisplay_font_sz); + _(WSDISPLAYIO_ADDSCREEN, READ, struct_wsdisplay_addscreendata_sz); + _(WSDISPLAYIO_DELSCREEN, READ, struct_wsdisplay_delscreendata_sz); + _(WSDISPLAYIO_SFONT, READ, struct_wsdisplay_usefontdata_sz); + _(_O_WSDISPLAYIO_SETKEYBOARD, READWRITE, struct_wsdisplay_kbddata_sz); + _(WSDISPLAYIO_GETPARAM, READWRITE, struct_wsdisplay_param_sz); + _(WSDISPLAYIO_SETPARAM, READWRITE, struct_wsdisplay_param_sz); + _(WSDISPLAYIO_GETACTIVESCREEN, WRITE, sizeof(int)); + _(WSDISPLAYIO_GETWSCHAR, READWRITE, struct_wsdisplay_char_sz); + _(WSDISPLAYIO_PUTWSCHAR, READWRITE, struct_wsdisplay_char_sz); + _(WSDISPLAYIO_DGSCROLL, WRITE, struct_wsdisplay_scroll_data_sz); + _(WSDISPLAYIO_DSSCROLL, READ, struct_wsdisplay_scroll_data_sz); + _(WSDISPLAYIO_GMSGATTRS, WRITE, struct_wsdisplay_msgattrs_sz); + _(WSDISPLAYIO_SMSGATTRS, READ, struct_wsdisplay_msgattrs_sz); + _(WSDISPLAYIO_GBORDER, WRITE, sizeof(int)); + _(WSDISPLAYIO_SBORDER, READ, sizeof(int)); + _(WSDISPLAYIO_SSPLASH, READ, sizeof(int)); + _(WSDISPLAYIO_SPROGRESS, READ, sizeof(int)); + _(WSDISPLAYIO_LINEBYTES, WRITE, sizeof(unsigned int)); + _(WSDISPLAYIO_SETVERSION, READ, sizeof(int)); + _(WSMUXIO_ADD_DEVICE, READ, struct_wsmux_device_sz); + _(WSMUXIO_REMOVE_DEVICE, READ, struct_wsmux_device_sz); + _(WSMUXIO_LIST_DEVICES, READWRITE, struct_wsmux_device_list_sz); + _(WSMUXIO_INJECTEVENT, READ, struct_wscons_event_sz); + _(WSDISPLAYIO_GET_BUSID, WRITE, struct_wsdisplayio_bus_id_sz); + _(WSDISPLAYIO_GET_EDID, READWRITE, struct_wsdisplayio_edid_info_sz); + _(WSDISPLAYIO_SET_POLLING, READ, sizeof(int)); + _(WSDISPLAYIO_GET_FBINFO, READWRITE, struct_wsdisplayio_fbinfo_sz); + _(WSDISPLAYIO_DOBLIT, READWRITE, struct_wsdisplayio_blit_sz); + _(WSDISPLAYIO_WAITBLIT, READWRITE, struct_wsdisplayio_blit_sz); + /* Entries from file: dev/biovar.h */ + _(BIOCLOCATE, READWRITE, struct_bio_locate_sz); + _(BIOCINQ, READWRITE, struct_bioc_inq_sz); + _(BIOCDISK_NOVOL, READWRITE, struct_bioc_disk_sz); + _(BIOCDISK, READWRITE, struct_bioc_disk_sz); + _(BIOCVOL, READWRITE, struct_bioc_vol_sz); + _(BIOCALARM, READWRITE, struct_bioc_alarm_sz); + _(BIOCBLINK, READWRITE, struct_bioc_blink_sz); + _(BIOCSETSTATE, READWRITE, struct_bioc_setstate_sz); + _(BIOCVOLOPS, READWRITE, struct_bioc_volops_sz); + /* Entries from file: dev/md.h */ + _(MD_GETCONF, WRITE, struct_md_conf_sz); + _(MD_SETCONF, READ, struct_md_conf_sz); + /* Entries from file: dev/ccdvar.h */ + _(CCDIOCSET, READWRITE, struct_ccd_ioctl_sz); + _(CCDIOCCLR, READ, struct_ccd_ioctl_sz); + /* Entries from file: dev/cgdvar.h */ + _(CGDIOCSET, READWRITE, struct_cgd_ioctl_sz); + _(CGDIOCCLR, READ, struct_cgd_ioctl_sz); + _(CGDIOCGET, READWRITE, struct_cgd_user_sz); + /* Entries from file: dev/fssvar.h */ + _(FSSIOCSET, READ, struct_fss_set_sz); + _(FSSIOCGET, WRITE, struct_fss_get_sz); + _(FSSIOCCLR, NONE, 0); + _(FSSIOFSET, READ, sizeof(int)); + _(FSSIOFGET, WRITE, sizeof(int)); + /* Entries from file: dev/bluetooth/btdev.h */ + _(BTDEV_ATTACH, READ, struct_plistref_sz); + _(BTDEV_DETACH, READ, struct_plistref_sz); + /* Entries from file: dev/bluetooth/btsco.h */ + _(BTSCO_GETINFO, WRITE, struct_btsco_info_sz); + /* Entries from file: dev/kttcpio.h */ + _(KTTCP_IO_SEND, READWRITE, struct_kttcp_io_args_sz); + _(KTTCP_IO_RECV, READWRITE, struct_kttcp_io_args_sz); + /* Entries from file: dev/lockstat.h */ + _(IOC_LOCKSTAT_GVERSION, WRITE, sizeof(int)); + _(IOC_LOCKSTAT_ENABLE, READ, struct_lsenable_sz); + _(IOC_LOCKSTAT_DISABLE, WRITE, struct_lsdisable_sz); + /* Entries from file: dev/vndvar.h */ + _(VNDIOCSET, READWRITE, struct_vnd_ioctl_sz); + _(VNDIOCCLR, READ, struct_vnd_ioctl_sz); + _(VNDIOCGET, READWRITE, struct_vnd_user_sz); + /* Entries from file: dev/spkrio.h */ + _(SPKRTONE, READ, struct_tone_sz); + _(SPKRTUNE, NONE, 0); + _(SPKRGETVOL, WRITE, sizeof(unsigned int)); + _(SPKRSETVOL, READ, sizeof(unsigned int)); + /* Entries from file: net/bpf.h */ + _(BIOCGBLEN, WRITE, sizeof(unsigned int)); + _(BIOCSBLEN, READWRITE, sizeof(unsigned int)); + _(BIOCSETF, READ, struct_bpf_program_sz); + _(BIOCFLUSH, NONE, 0); + _(BIOCPROMISC, NONE, 0); + _(BIOCGDLT, WRITE, sizeof(unsigned int)); + _(BIOCGETIF, WRITE, struct_ifreq_sz); + _(BIOCSETIF, READ, struct_ifreq_sz); + _(BIOCGSTATS, WRITE, struct_bpf_stat_sz); + _(BIOCGSTATSOLD, WRITE, struct_bpf_stat_old_sz); + _(BIOCIMMEDIATE, READ, sizeof(unsigned int)); + _(BIOCVERSION, WRITE, struct_bpf_version_sz); + _(BIOCSTCPF, READ, struct_bpf_program_sz); + _(BIOCSUDPF, READ, struct_bpf_program_sz); + _(BIOCGHDRCMPLT, WRITE, sizeof(unsigned int)); + _(BIOCSHDRCMPLT, READ, sizeof(unsigned int)); + _(BIOCSDLT, READ, sizeof(unsigned int)); + _(BIOCGDLTLIST, READWRITE, struct_bpf_dltlist_sz); + _(BIOCGSEESENT, WRITE, sizeof(unsigned int)); + _(BIOCSSEESENT, READ, sizeof(unsigned int)); + _(BIOCSRTIMEOUT, READ, struct_timeval_sz); + _(BIOCGRTIMEOUT, WRITE, struct_timeval_sz); + _(BIOCGFEEDBACK, WRITE, sizeof(unsigned int)); + _(BIOCSFEEDBACK, READ, sizeof(unsigned int)); + /* Entries from file: net/if_atm.h */ + _(SIOCRAWATM, READWRITE, sizeof(int)); + _(SIOCATMENA, READWRITE, struct_atm_pseudoioctl_sz); + _(SIOCATMDIS, READWRITE, struct_atm_pseudoioctl_sz); + _(SIOCSPVCTX, READWRITE, struct_pvctxreq_sz); + _(SIOCGPVCTX, READWRITE, struct_pvctxreq_sz); + _(SIOCSPVCSIF, READWRITE, struct_ifreq_sz); + _(SIOCGPVCSIF, READWRITE, struct_ifreq_sz); + /* Entries from file: net/if_gre.h */ + _(GRESADDRS, READ, struct_ifreq_sz); + _(GRESADDRD, READ, struct_ifreq_sz); + _(GREGADDRS, READWRITE, struct_ifreq_sz); + _(GREGADDRD, READWRITE, struct_ifreq_sz); + _(GRESPROTO, READ, struct_ifreq_sz); + _(GREGPROTO, READWRITE, struct_ifreq_sz); + _(GRESSOCK, READ, struct_ifreq_sz); + _(GREDSOCK, READ, struct_ifreq_sz); + /* Entries from file: net/if_ppp.h */ + _(PPPIOCGRAWIN, WRITE, struct_ppp_rawin_sz); + _(PPPIOCGFLAGS, WRITE, sizeof(int)); + _(PPPIOCSFLAGS, READ, sizeof(int)); + _(PPPIOCGASYNCMAP, WRITE, sizeof(int)); + _(PPPIOCSASYNCMAP, READ, sizeof(int)); + _(PPPIOCGUNIT, WRITE, sizeof(int)); + _(PPPIOCGRASYNCMAP, WRITE, sizeof(int)); + _(PPPIOCSRASYNCMAP, READ, sizeof(int)); + _(PPPIOCGMRU, WRITE, sizeof(int)); + _(PPPIOCSMRU, READ, sizeof(int)); + _(PPPIOCSMAXCID, READ, sizeof(int)); + _(PPPIOCGXASYNCMAP, WRITE, (8 * sizeof(u32))); + _(PPPIOCSXASYNCMAP, READ, (8 * sizeof(u32))); + _(PPPIOCXFERUNIT, NONE, 0); + _(PPPIOCSCOMPRESS, READ, struct_ppp_option_data_sz); + _(PPPIOCGNPMODE, READWRITE, struct_npioctl_sz); + _(PPPIOCSNPMODE, READ, struct_npioctl_sz); + _(PPPIOCGIDLE, WRITE, struct_ppp_idle_sz); + _(PPPIOCGMTU, WRITE, sizeof(int)); + _(PPPIOCSMTU, READ, sizeof(int)); + _(SIOCGPPPSTATS, READWRITE, struct_ifpppstatsreq_sz); + _(SIOCGPPPCSTATS, READWRITE, struct_ifpppcstatsreq_sz); + /* Entries from file: net/npf.h */ + _(IOC_NPF_VERSION, WRITE, sizeof(int)); + _(IOC_NPF_SWITCH, READ, sizeof(int)); + _(IOC_NPF_LOAD, READWRITE, struct_plistref_sz); + _(IOC_NPF_TABLE, READ, struct_npf_ioctl_table_sz); + _(IOC_NPF_STATS, READ, sizeof(uptr)); + _(IOC_NPF_SAVE, WRITE, struct_plistref_sz); + _(IOC_NPF_RULE, READWRITE, struct_plistref_sz); + _(IOC_NPF_CONN_LOOKUP, READWRITE, struct_plistref_sz); + /* Entries from file: net/if_pppoe.h */ + _(PPPOESETPARMS, READ, struct_pppoediscparms_sz); + _(PPPOEGETPARMS, READWRITE, struct_pppoediscparms_sz); + _(PPPOEGETSESSION, READWRITE, struct_pppoeconnectionstate_sz); + /* Entries from file: net/if_sppp.h */ + _(SPPPGETAUTHCFG, READWRITE, struct_spppauthcfg_sz); + _(SPPPSETAUTHCFG, READ, struct_spppauthcfg_sz); + _(SPPPGETLCPCFG, READWRITE, struct_sppplcpcfg_sz); + _(SPPPSETLCPCFG, READ, struct_sppplcpcfg_sz); + _(SPPPGETSTATUS, READWRITE, struct_spppstatus_sz); + _(SPPPGETSTATUSNCP, READWRITE, struct_spppstatusncp_sz); + _(SPPPGETIDLETO, READWRITE, struct_spppidletimeout_sz); + _(SPPPSETIDLETO, READ, struct_spppidletimeout_sz); + _(SPPPGETAUTHFAILURES, READWRITE, struct_spppauthfailurestats_sz); + _(SPPPSETAUTHFAILURE, READ, struct_spppauthfailuresettings_sz); + _(SPPPSETDNSOPTS, READ, struct_spppdnssettings_sz); + _(SPPPGETDNSOPTS, READWRITE, struct_spppdnssettings_sz); + _(SPPPGETDNSADDRS, READWRITE, struct_spppdnsaddrs_sz); + _(SPPPSETKEEPALIVE, READ, struct_spppkeepalivesettings_sz); + _(SPPPGETKEEPALIVE, READWRITE, struct_spppkeepalivesettings_sz); + /* Entries from file: net/if_srt.h */ + _(SRT_GETNRT, WRITE, sizeof(unsigned int)); + _(SRT_GETRT, READWRITE, struct_srt_rt_sz); + _(SRT_SETRT, READ, struct_srt_rt_sz); + _(SRT_DELRT, READ, sizeof(unsigned int)); + _(SRT_SFLAGS, READ, sizeof(unsigned int)); + _(SRT_GFLAGS, WRITE, sizeof(unsigned int)); + _(SRT_SGFLAGS, READWRITE, sizeof(unsigned int)); + _(SRT_DEBUG, READ, sizeof(uptr)); + /* Entries from file: net/if_tap.h */ + _(TAPGIFNAME, WRITE, struct_ifreq_sz); + /* Entries from file: net/if_tun.h */ + _(TUNSDEBUG, READ, sizeof(int)); + _(TUNGDEBUG, WRITE, sizeof(int)); + _(TUNSIFMODE, READ, sizeof(int)); + _(TUNSIFHEAD, READ, sizeof(int)); + _(TUNGIFHEAD, WRITE, sizeof(int)); + /* Entries from file: net/pfvar.h */ + _(DIOCSTART, NONE, 0); + _(DIOCSTOP, NONE, 0); + _(DIOCADDRULE, READWRITE, struct_pfioc_rule_sz); + _(DIOCGETRULES, READWRITE, struct_pfioc_rule_sz); + _(DIOCGETRULE, READWRITE, struct_pfioc_rule_sz); + _(DIOCSETLCK, READWRITE, sizeof(u32)); + _(DIOCCLRSTATES, READWRITE, struct_pfioc_state_kill_sz); + _(DIOCGETSTATE, READWRITE, struct_pfioc_state_sz); + _(DIOCSETSTATUSIF, READWRITE, struct_pfioc_if_sz); + _(DIOCGETSTATUS, READWRITE, struct_pf_status_sz); + _(DIOCCLRSTATUS, NONE, 0); + _(DIOCNATLOOK, READWRITE, struct_pfioc_natlook_sz); + _(DIOCSETDEBUG, READWRITE, sizeof(u32)); + _(DIOCGETSTATES, READWRITE, struct_pfioc_states_sz); + _(DIOCCHANGERULE, READWRITE, struct_pfioc_rule_sz); + _(DIOCSETTIMEOUT, READWRITE, struct_pfioc_tm_sz); + _(DIOCGETTIMEOUT, READWRITE, struct_pfioc_tm_sz); + _(DIOCADDSTATE, READWRITE, struct_pfioc_state_sz); + _(DIOCCLRRULECTRS, NONE, 0); + _(DIOCGETLIMIT, READWRITE, struct_pfioc_limit_sz); + _(DIOCSETLIMIT, READWRITE, struct_pfioc_limit_sz); + _(DIOCKILLSTATES, READWRITE, struct_pfioc_state_kill_sz); + _(DIOCSTARTALTQ, NONE, 0); + _(DIOCSTOPALTQ, NONE, 0); + _(DIOCADDALTQ, READWRITE, struct_pfioc_altq_sz); + _(DIOCGETALTQS, READWRITE, struct_pfioc_altq_sz); + _(DIOCGETALTQ, READWRITE, struct_pfioc_altq_sz); + _(DIOCCHANGEALTQ, READWRITE, struct_pfioc_altq_sz); + _(DIOCGETQSTATS, READWRITE, struct_pfioc_qstats_sz); + _(DIOCBEGINADDRS, READWRITE, struct_pfioc_pooladdr_sz); + _(DIOCADDADDR, READWRITE, struct_pfioc_pooladdr_sz); + _(DIOCGETADDRS, READWRITE, struct_pfioc_pooladdr_sz); + _(DIOCGETADDR, READWRITE, struct_pfioc_pooladdr_sz); + _(DIOCCHANGEADDR, READWRITE, struct_pfioc_pooladdr_sz); + _(DIOCADDSTATES, READWRITE, struct_pfioc_states_sz); + _(DIOCGETRULESETS, READWRITE, struct_pfioc_ruleset_sz); + _(DIOCGETRULESET, READWRITE, struct_pfioc_ruleset_sz); + _(DIOCRCLRTABLES, READWRITE, struct_pfioc_table_sz); + _(DIOCRADDTABLES, READWRITE, struct_pfioc_table_sz); + _(DIOCRDELTABLES, READWRITE, struct_pfioc_table_sz); + _(DIOCRGETTABLES, READWRITE, struct_pfioc_table_sz); + _(DIOCRGETTSTATS, READWRITE, struct_pfioc_table_sz); + _(DIOCRCLRTSTATS, READWRITE, struct_pfioc_table_sz); + _(DIOCRCLRADDRS, READWRITE, struct_pfioc_table_sz); + _(DIOCRADDADDRS, READWRITE, struct_pfioc_table_sz); + _(DIOCRDELADDRS, READWRITE, struct_pfioc_table_sz); + _(DIOCRSETADDRS, READWRITE, struct_pfioc_table_sz); + _(DIOCRGETADDRS, READWRITE, struct_pfioc_table_sz); + _(DIOCRGETASTATS, READWRITE, struct_pfioc_table_sz); + _(DIOCRCLRASTATS, READWRITE, struct_pfioc_table_sz); + _(DIOCRTSTADDRS, READWRITE, struct_pfioc_table_sz); + _(DIOCRSETTFLAGS, READWRITE, struct_pfioc_table_sz); + _(DIOCRINADEFINE, READWRITE, struct_pfioc_table_sz); + _(DIOCOSFPFLUSH, NONE, 0); + _(DIOCOSFPADD, READWRITE, struct_pf_osfp_ioctl_sz); + _(DIOCOSFPGET, READWRITE, struct_pf_osfp_ioctl_sz); + _(DIOCXBEGIN, READWRITE, struct_pfioc_trans_sz); + _(DIOCXCOMMIT, READWRITE, struct_pfioc_trans_sz); + _(DIOCXROLLBACK, READWRITE, struct_pfioc_trans_sz); + _(DIOCGETSRCNODES, READWRITE, struct_pfioc_src_nodes_sz); + _(DIOCCLRSRCNODES, NONE, 0); + _(DIOCSETHOSTID, READWRITE, sizeof(u32)); + _(DIOCIGETIFACES, READWRITE, struct_pfioc_iface_sz); + _(DIOCSETIFFLAG, READWRITE, struct_pfioc_iface_sz); + _(DIOCCLRIFFLAG, READWRITE, struct_pfioc_iface_sz); + _(DIOCKILLSRCNODES, READWRITE, struct_pfioc_src_node_kill_sz); + /* Entries from file: netbt/hci.h */ + _(SIOCGBTINFO, READWRITE, struct_btreq_sz); + _(SIOCGBTINFOA, READWRITE, struct_btreq_sz); + _(SIOCNBTINFO, READWRITE, struct_btreq_sz); + _(SIOCSBTFLAGS, READWRITE, struct_btreq_sz); + _(SIOCSBTPOLICY, READWRITE, struct_btreq_sz); + _(SIOCSBTPTYPE, READWRITE, struct_btreq_sz); + _(SIOCGBTSTATS, READWRITE, struct_btreq_sz); + _(SIOCZBTSTATS, READWRITE, struct_btreq_sz); + _(SIOCBTDUMP, READ, struct_btreq_sz); + _(SIOCSBTSCOMTU, READWRITE, struct_btreq_sz); + _(SIOCGBTFEAT, READWRITE, struct_btreq_sz); + /* Entries from file: netinet/ip_nat.h */ + _(SIOCADNAT, READ, struct_ipfobj_sz); + _(SIOCRMNAT, READ, struct_ipfobj_sz); + _(SIOCGNATS, READWRITE, struct_ipfobj_sz); + _(SIOCGNATL, READWRITE, struct_ipfobj_sz); + _(SIOCPURGENAT, READWRITE, struct_ipfobj_sz); + /* Entries from file: netinet6/in6_var.h */ + _(SIOCSIFINFO_FLAGS, READWRITE, struct_in6_ndireq_sz); + _(SIOCAADDRCTL_POLICY, READ, struct_in6_addrpolicy_sz); + _(SIOCDADDRCTL_POLICY, READ, struct_in6_addrpolicy_sz); + /* Entries from file: netsmb/smb_dev.h */ + _(SMBIOC_OPENSESSION, READ, struct_smbioc_ossn_sz); + _(SMBIOC_OPENSHARE, READ, struct_smbioc_oshare_sz); + _(SMBIOC_REQUEST, READWRITE, struct_smbioc_rq_sz); + _(SMBIOC_SETFLAGS, READ, struct_smbioc_flags_sz); + _(SMBIOC_LOOKUP, READ, struct_smbioc_lookup_sz); + _(SMBIOC_READ, READWRITE, struct_smbioc_rw_sz); + _(SMBIOC_WRITE, READWRITE, struct_smbioc_rw_sz); + /* Entries from file: sys/agpio.h */ + _(AGPIOC_INFO, WRITE, struct__agp_info_sz); + _(AGPIOC_ACQUIRE, NONE, 0); + _(AGPIOC_RELEASE, NONE, 0); + _(AGPIOC_SETUP, READ, struct__agp_setup_sz); + _(AGPIOC_ALLOCATE, READWRITE, struct__agp_allocate_sz); + _(AGPIOC_DEALLOCATE, READ, sizeof(int)); + _(AGPIOC_BIND, READ, struct__agp_bind_sz); + _(AGPIOC_UNBIND, READ, struct__agp_unbind_sz); + /* Entries from file: sys/audioio.h */ + _(AUDIO_GETINFO, WRITE, struct_audio_info_sz); + _(AUDIO_SETINFO, READWRITE, struct_audio_info_sz); + _(AUDIO_DRAIN, NONE, 0); + _(AUDIO_FLUSH, NONE, 0); + _(AUDIO_WSEEK, WRITE, sizeof(unsigned long)); + _(AUDIO_RERROR, WRITE, sizeof(int)); + _(AUDIO_GETDEV, WRITE, struct_audio_device_sz); + _(AUDIO_GETENC, READWRITE, struct_audio_encoding_sz); + _(AUDIO_GETFD, WRITE, sizeof(int)); + _(AUDIO_SETFD, READWRITE, sizeof(int)); + _(AUDIO_PERROR, WRITE, sizeof(int)); + _(AUDIO_GETIOFFS, WRITE, struct_audio_offset_sz); + _(AUDIO_GETOOFFS, WRITE, struct_audio_offset_sz); + _(AUDIO_GETPROPS, WRITE, sizeof(int)); + _(AUDIO_GETBUFINFO, WRITE, struct_audio_info_sz); + _(AUDIO_SETCHAN, READ, sizeof(int)); + _(AUDIO_GETCHAN, WRITE, sizeof(int)); + _(AUDIO_MIXER_READ, READWRITE, struct_mixer_ctrl_sz); + _(AUDIO_MIXER_WRITE, READWRITE, struct_mixer_ctrl_sz); + _(AUDIO_MIXER_DEVINFO, READWRITE, struct_mixer_devinfo_sz); + /* Entries from file: sys/ataio.h */ + _(ATAIOCCOMMAND, READWRITE, struct_atareq_sz); + _(ATABUSIOSCAN, READ, struct_atabusioscan_args_sz); + _(ATABUSIORESET, NONE, 0); + _(ATABUSIODETACH, READ, struct_atabusiodetach_args_sz); + /* Entries from file: sys/cdio.h */ + _(CDIOCPLAYTRACKS, READ, struct_ioc_play_track_sz); + _(CDIOCPLAYBLOCKS, READ, struct_ioc_play_blocks_sz); + _(CDIOCREADSUBCHANNEL, READWRITE, struct_ioc_read_subchannel_sz); + _(CDIOREADTOCHEADER, WRITE, struct_ioc_toc_header_sz); + _(CDIOREADTOCENTRIES, READWRITE, struct_ioc_read_toc_entry_sz); + _(CDIOREADMSADDR, READWRITE, sizeof(int)); + _(CDIOCSETPATCH, READ, struct_ioc_patch_sz); + _(CDIOCGETVOL, WRITE, struct_ioc_vol_sz); + _(CDIOCSETVOL, READ, struct_ioc_vol_sz); + _(CDIOCSETMONO, NONE, 0); + _(CDIOCSETSTEREO, NONE, 0); + _(CDIOCSETMUTE, NONE, 0); + _(CDIOCSETLEFT, NONE, 0); + _(CDIOCSETRIGHT, NONE, 0); + _(CDIOCSETDEBUG, NONE, 0); + _(CDIOCCLRDEBUG, NONE, 0); + _(CDIOCPAUSE, NONE, 0); + _(CDIOCRESUME, NONE, 0); + _(CDIOCRESET, NONE, 0); + _(CDIOCSTART, NONE, 0); + _(CDIOCSTOP, NONE, 0); + _(CDIOCEJECT, NONE, 0); + _(CDIOCALLOW, NONE, 0); + _(CDIOCPREVENT, NONE, 0); + _(CDIOCCLOSE, NONE, 0); + _(CDIOCPLAYMSF, READ, struct_ioc_play_msf_sz); + _(CDIOCLOADUNLOAD, READ, struct_ioc_load_unload_sz); + /* Entries from file: sys/chio.h */ + _(CHIOMOVE, READ, struct_changer_move_request_sz); + _(CHIOEXCHANGE, READ, struct_changer_exchange_request_sz); + _(CHIOPOSITION, READ, struct_changer_position_request_sz); + _(CHIOSPICKER, READ, sizeof(int)); + _(CHIOGPARAMS, WRITE, struct_changer_params_sz); + _(CHIOIELEM, NONE, 0); + _(OCHIOGSTATUS, READ, struct_ochanger_element_status_request_sz); + _(CHIOGSTATUS, READ, struct_changer_element_status_request_sz); + _(CHIOSVOLTAG, READ, struct_changer_set_voltag_request_sz); + /* Entries from file: sys/clockctl.h */ + _(CLOCKCTL_SETTIMEOFDAY, READ, struct_clockctl_settimeofday_sz); + _(CLOCKCTL_ADJTIME, READWRITE, struct_clockctl_adjtime_sz); + _(CLOCKCTL_CLOCK_SETTIME, READ, struct_clockctl_clock_settime_sz); + _(CLOCKCTL_NTP_ADJTIME, READWRITE, struct_clockctl_ntp_adjtime_sz); + /* Entries from file: sys/cpuio.h */ + _(IOC_CPU_SETSTATE, READ, struct_cpustate_sz); + _(IOC_CPU_GETSTATE, READWRITE, struct_cpustate_sz); + _(IOC_CPU_GETCOUNT, WRITE, sizeof(int)); + _(IOC_CPU_MAPID, READWRITE, sizeof(int)); + _(IOC_CPU_UCODE_GET_VERSION, READWRITE, struct_cpu_ucode_version_sz); + _(IOC_CPU_UCODE_APPLY, READ, struct_cpu_ucode_sz); + /* Entries from file: sys/dkio.h */ + _(DIOCGDINFO, WRITE, struct_disklabel_sz); + _(DIOCSDINFO, READ, struct_disklabel_sz); + _(DIOCWDINFO, READ, 0); + _(DIOCRFORMAT, READWRITE, struct_format_op_sz); + _(DIOCWFORMAT, READWRITE, struct_format_op_sz); + _(DIOCSSTEP, READ, sizeof(int)); + _(DIOCSRETRIES, READ, sizeof(int)); + _(DIOCKLABEL, READ, sizeof(int)); + _(DIOCWLABEL, READ, sizeof(int)); + _(DIOCSBAD, READ, struct_dkbad_sz); + _(DIOCEJECT, READ, sizeof(int)); + _(ODIOCEJECT, NONE, 0); + _(DIOCLOCK, READ, sizeof(int)); + _(DIOCGDEFLABEL, WRITE, struct_disklabel_sz); + _(DIOCCLRLABEL, NONE, 0); + _(DIOCGCACHE, WRITE, sizeof(int)); + _(DIOCSCACHE, READ, sizeof(int)); + _(DIOCCACHESYNC, READ, sizeof(int)); + _(DIOCBSLIST, READWRITE, struct_disk_badsecinfo_sz); + _(DIOCBSFLUSH, NONE, 0); + _(DIOCAWEDGE, READWRITE, struct_dkwedge_info_sz); + _(DIOCGWEDGEINFO, WRITE, struct_dkwedge_info_sz); + _(DIOCDWEDGE, READ, struct_dkwedge_info_sz); + _(DIOCLWEDGES, READWRITE, struct_dkwedge_list_sz); + _(DIOCGSTRATEGY, WRITE, struct_disk_strategy_sz); + _(DIOCSSTRATEGY, READ, struct_disk_strategy_sz); + _(DIOCGDISKINFO, WRITE, struct_plistref_sz); + _(DIOCTUR, WRITE, sizeof(int)); + _(DIOCMWEDGES, WRITE, sizeof(int)); + _(DIOCGSECTORSIZE, WRITE, sizeof(unsigned int)); + _(DIOCGMEDIASIZE, WRITE, sizeof(uptr)); + /* Entries from file: sys/drvctlio.h */ + _(DRVDETACHDEV, READ, struct_devdetachargs_sz); + _(DRVRESCANBUS, READ, struct_devrescanargs_sz); + _(DRVCTLCOMMAND, READWRITE, struct_plistref_sz); + _(DRVRESUMEDEV, READ, struct_devpmargs_sz); + _(DRVLISTDEV, READWRITE, struct_devlistargs_sz); + _(DRVGETEVENT, WRITE, struct_plistref_sz); + _(DRVSUSPENDDEV, READ, struct_devpmargs_sz); + /* Entries from file: sys/dvdio.h */ + _(DVD_READ_STRUCT, READWRITE, union_dvd_struct_sz); + _(DVD_WRITE_STRUCT, READWRITE, union_dvd_struct_sz); + _(DVD_AUTH, READWRITE, union_dvd_authinfo_sz); + /* Entries from file: sys/envsys.h */ + _(ENVSYS_GETDICTIONARY, READWRITE, struct_plistref_sz); + _(ENVSYS_SETDICTIONARY, READWRITE, struct_plistref_sz); + _(ENVSYS_REMOVEPROPS, READWRITE, struct_plistref_sz); + _(ENVSYS_GTREDATA, READWRITE, struct_envsys_tre_data_sz); + _(ENVSYS_GTREINFO, READWRITE, struct_envsys_basic_info_sz); + /* Entries from file: sys/event.h */ + _(KFILTER_BYFILTER, READWRITE, struct_kfilter_mapping_sz); + _(KFILTER_BYNAME, READWRITE, struct_kfilter_mapping_sz); + /* Entries from file: sys/fdio.h */ + _(FDIOCGETOPTS, WRITE, 0); + _(FDIOCSETOPTS, READ, sizeof(int)); + _(FDIOCSETFORMAT, READ, struct_fdformat_parms_sz); + _(FDIOCGETFORMAT, WRITE, struct_fdformat_parms_sz); + _(FDIOCFORMAT_TRACK, READ, struct_fdformat_cmd_sz); + /* Entries from file: sys/filio.h */ + _(FIOCLEX, NONE, 0); + _(FIONCLEX, NONE, 0); + _(FIONREAD, WRITE, sizeof(int)); + _(FIONBIO, READ, sizeof(int)); + _(FIOASYNC, READ, sizeof(int)); + _(FIOSETOWN, READ, sizeof(int)); + _(FIOGETOWN, WRITE, sizeof(int)); + _(OFIOGETBMAP, READWRITE, sizeof(u32)); + _(FIOGETBMAP, READWRITE, sizeof(u64)); + _(FIONWRITE, WRITE, sizeof(int)); + _(FIONSPACE, WRITE, sizeof(int)); + /* Entries from file: sys/gpio.h */ + _(GPIOINFO, WRITE, struct_gpio_info_sz); + _(GPIOSET, READWRITE, struct_gpio_set_sz); + _(GPIOUNSET, READWRITE, struct_gpio_set_sz); + _(GPIOREAD, READWRITE, struct_gpio_req_sz); + _(GPIOWRITE, READWRITE, struct_gpio_req_sz); + _(GPIOTOGGLE, READWRITE, struct_gpio_req_sz); + _(GPIOATTACH, READWRITE, struct_gpio_attach_sz); + /* Entries from file: sys/ioctl.h */ + _(PTIOCNETBSD, READ, struct_ioctl_pt_sz); + _(PTIOCSUNOS, READ, struct_ioctl_pt_sz); + _(PTIOCLINUX, READ, struct_ioctl_pt_sz); + _(PTIOCFREEBSD, READ, struct_ioctl_pt_sz); + _(PTIOCULTRIX, READ, struct_ioctl_pt_sz); + /* Entries from file: sys/ioctl_compat.h */ + _(TIOCHPCL, NONE, 0); + _(TIOCGETP, WRITE, struct_sgttyb_sz); + _(TIOCSETP, READ, struct_sgttyb_sz); + _(TIOCSETN, READ, 0); + _(TIOCSETC, READ, struct_tchars_sz); + _(TIOCGETC, WRITE, struct_tchars_sz); + _(TIOCLBIS, READ, sizeof(int)); + _(TIOCLBIC, READ, sizeof(int)); + _(TIOCLSET, READ, sizeof(int)); + _(TIOCLGET, WRITE, sizeof(int)); + _(TIOCSLTC, READ, struct_ltchars_sz); + _(TIOCGLTC, WRITE, struct_ltchars_sz); + _(OTIOCCONS, NONE, 0); + /* Entries from file: sys/joystick.h */ + _(JOY_SETTIMEOUT, READ, sizeof(int)); + _(JOY_GETTIMEOUT, WRITE, sizeof(int)); + _(JOY_SET_X_OFFSET, READ, sizeof(int)); + _(JOY_SET_Y_OFFSET, READ, sizeof(int)); + _(JOY_GET_Y_OFFSET, WRITE, sizeof(int)); + /* Entries from file: sys/ksyms.h */ + _(OKIOCGSYMBOL, READ, struct_ksyms_ogsymbol_sz); + _(OKIOCGVALUE, READ, struct_ksyms_ogsymbol_sz); + _(KIOCGSIZE, WRITE, sizeof(int)); + _(KIOCGVALUE, READWRITE, struct_ksyms_gvalue_sz); + _(KIOCGSYMBOL, READWRITE, struct_ksyms_gsymbol_sz); + /* Entries from file: sys/lua.h */ + _(LUAINFO, READWRITE, struct_lua_info_sz); + _(LUACREATE, READWRITE, struct_lua_create_sz); + _(LUADESTROY, READWRITE, struct_lua_create_sz); + _(LUAREQUIRE, READWRITE, struct_lua_require_sz); + _(LUALOAD, READWRITE, struct_lua_load_sz); + /* Entries from file: sys/midiio.h */ + _(MIDI_PRETIME, READWRITE, sizeof(int)); + _(MIDI_MPUMODE, READWRITE, sizeof(int)); + _(MIDI_MPUCMD, READWRITE, struct_mpu_command_rec_sz); + _(SEQUENCER_RESET, NONE, 0); + _(SEQUENCER_SYNC, NONE, 0); + _(SEQUENCER_INFO, READWRITE, struct_synth_info_sz); + _(SEQUENCER_CTRLRATE, READWRITE, sizeof(int)); + _(SEQUENCER_GETOUTCOUNT, WRITE, sizeof(int)); + _(SEQUENCER_GETINCOUNT, WRITE, sizeof(int)); + _(SEQUENCER_RESETSAMPLES, READ, sizeof(int)); + _(SEQUENCER_NRSYNTHS, WRITE, sizeof(int)); + _(SEQUENCER_NRMIDIS, WRITE, sizeof(int)); + _(SEQUENCER_THRESHOLD, READ, sizeof(int)); + _(SEQUENCER_MEMAVL, READWRITE, sizeof(int)); + _(SEQUENCER_PANIC, NONE, 0); + _(SEQUENCER_OUTOFBAND, READ, struct_seq_event_rec_sz); + _(SEQUENCER_GETTIME, WRITE, sizeof(int)); + _(SEQUENCER_TMR_TIMEBASE, READWRITE, sizeof(int)); + _(SEQUENCER_TMR_START, NONE, 0); + _(SEQUENCER_TMR_STOP, NONE, 0); + _(SEQUENCER_TMR_CONTINUE, NONE, 0); + _(SEQUENCER_TMR_TEMPO, READWRITE, sizeof(int)); + _(SEQUENCER_TMR_SOURCE, READWRITE, sizeof(int)); + _(SEQUENCER_TMR_METRONOME, READ, sizeof(int)); + _(SEQUENCER_TMR_SELECT, READ, sizeof(int)); + /* Entries from file: sys/mtio.h */ + _(MTIOCTOP, READ, struct_mtop_sz); + _(MTIOCGET, WRITE, struct_mtget_sz); + _(MTIOCIEOT, NONE, 0); + _(MTIOCEEOT, NONE, 0); + _(MTIOCRDSPOS, WRITE, sizeof(u32)); + _(MTIOCRDHPOS, WRITE, sizeof(u32)); + _(MTIOCSLOCATE, READ, sizeof(u32)); + _(MTIOCHLOCATE, READ, sizeof(u32)); + /* Entries from file: sys/power.h */ + _(POWER_EVENT_RECVDICT, READWRITE, struct_plistref_sz); + _(POWER_IOC_GET_TYPE, WRITE, struct_power_type_sz); + _(POWER_IOC_GET_TYPE_WITH_LOSSAGE, WRITE, sizeof(uptr)); + /* Entries from file: sys/radioio.h */ + _(RIOCGINFO, WRITE, struct_radio_info_sz); + _(RIOCSINFO, READWRITE, struct_radio_info_sz); + _(RIOCSSRCH, READ, sizeof(int)); + /* Entries from file: sys/rndio.h */ + _(RNDGETENTCNT, WRITE, sizeof(u32)); + _(RNDGETSRCNUM, READWRITE, struct_rndstat_sz); + _(RNDGETSRCNAME, READWRITE, struct_rndstat_name_sz); + _(RNDCTL, READ, struct_rndctl_sz); + _(RNDADDDATA, READ, struct_rnddata_sz); + _(RNDGETPOOLSTAT, WRITE, struct_rndpoolstat_sz); + _(RNDGETESTNUM, READWRITE, struct_rndstat_est_sz); + _(RNDGETESTNAME, READWRITE, struct_rndstat_est_name_sz); + /* Entries from file: sys/scanio.h */ + _(SCIOCGET, WRITE, struct_scan_io_sz); + _(SCIOCSET, READ, struct_scan_io_sz); + _(SCIOCRESTART, NONE, 0); + /* Entries from file: sys/scsiio.h */ + _(SCIOCCOMMAND, READWRITE, struct_scsireq_sz); + _(SCIOCDEBUG, READ, sizeof(int)); + _(SCIOCIDENTIFY, WRITE, struct_scsi_addr_sz); + _(OSCIOCIDENTIFY, WRITE, struct_oscsi_addr_sz); + _(SCIOCDECONFIG, NONE, 0); + _(SCIOCRECONFIG, NONE, 0); + _(SCIOCRESET, NONE, 0); + _(SCBUSIOSCAN, READ, struct_scbusioscan_args_sz); + _(SCBUSIORESET, NONE, 0); + _(SCBUSIODETACH, READ, struct_scbusiodetach_args_sz); + _(SCBUSACCEL, READ, struct_scbusaccel_args_sz); + /* Entries from file: sys/sockio.h */ + _(SIOCSHIWAT, READ, sizeof(int)); + _(SIOCGHIWAT, WRITE, sizeof(int)); + _(SIOCSLOWAT, READ, sizeof(int)); + _(SIOCGLOWAT, WRITE, sizeof(int)); + _(SIOCATMARK, WRITE, sizeof(int)); + _(SIOCSPGRP, READ, sizeof(int)); + _(SIOCGPGRP, WRITE, sizeof(int)); + _(SIOCADDRT, READ, struct_ortentry_sz); + _(SIOCDELRT, READ, struct_ortentry_sz); + _(SIOCSIFADDR, READ, struct_ifreq_sz); + _(SIOCGIFADDR, READWRITE, struct_ifreq_sz); + _(SIOCSIFDSTADDR, READ, struct_ifreq_sz); + _(SIOCGIFDSTADDR, READWRITE, struct_ifreq_sz); + _(SIOCSIFFLAGS, READ, struct_ifreq_sz); + _(SIOCGIFFLAGS, READWRITE, struct_ifreq_sz); + _(SIOCGIFBRDADDR, READWRITE, struct_ifreq_sz); + _(SIOCSIFBRDADDR, READ, struct_ifreq_sz); + _(SIOCGIFCONF, READWRITE, struct_ifconf_sz); + _(SIOCGIFNETMASK, READWRITE, struct_ifreq_sz); + _(SIOCSIFNETMASK, READ, struct_ifreq_sz); + _(SIOCGIFMETRIC, READWRITE, struct_ifreq_sz); + _(SIOCSIFMETRIC, READ, struct_ifreq_sz); + _(SIOCDIFADDR, READ, struct_ifreq_sz); + _(SIOCAIFADDR, READ, struct_ifaliasreq_sz); + _(SIOCGIFALIAS, READWRITE, struct_ifaliasreq_sz); + _(SIOCGIFAFLAG_IN, READWRITE, struct_ifreq_sz); + _(SIOCALIFADDR, READ, struct_if_laddrreq_sz); + _(SIOCGLIFADDR, READWRITE, struct_if_laddrreq_sz); + _(SIOCDLIFADDR, READ, struct_if_laddrreq_sz); + _(SIOCSIFADDRPREF, READ, struct_if_addrprefreq_sz); + _(SIOCGIFADDRPREF, READWRITE, struct_if_addrprefreq_sz); + _(SIOCADDMULTI, READ, struct_ifreq_sz); + _(SIOCDELMULTI, READ, struct_ifreq_sz); + _(SIOCGETVIFCNT, READWRITE, struct_sioc_vif_req_sz); + _(SIOCGETSGCNT, READWRITE, struct_sioc_sg_req_sz); + _(SIOCSIFMEDIA, READWRITE, struct_ifreq_sz); + _(SIOCGIFMEDIA, READWRITE, struct_ifmediareq_sz); + _(SIOCSIFGENERIC, READ, struct_ifreq_sz); + _(SIOCGIFGENERIC, READWRITE, struct_ifreq_sz); + _(SIOCSIFPHYADDR, READ, struct_ifaliasreq_sz); + _(SIOCGIFPSRCADDR, READWRITE, struct_ifreq_sz); + _(SIOCGIFPDSTADDR, READWRITE, struct_ifreq_sz); + _(SIOCDIFPHYADDR, READ, struct_ifreq_sz); + _(SIOCSLIFPHYADDR, READ, struct_if_laddrreq_sz); + _(SIOCGLIFPHYADDR, READWRITE, struct_if_laddrreq_sz); + _(SIOCSIFMTU, READ, struct_ifreq_sz); + _(SIOCGIFMTU, READWRITE, struct_ifreq_sz); + _(SIOCSDRVSPEC, READ, struct_ifdrv_sz); + _(SIOCGDRVSPEC, READWRITE, struct_ifdrv_sz); + _(SIOCIFCREATE, READ, struct_ifreq_sz); + _(SIOCIFDESTROY, READ, struct_ifreq_sz); + _(SIOCIFGCLONERS, READWRITE, struct_if_clonereq_sz); + _(SIOCGIFDLT, READWRITE, struct_ifreq_sz); + _(SIOCGIFCAP, READWRITE, struct_ifcapreq_sz); + _(SIOCSIFCAP, READ, struct_ifcapreq_sz); + _(SIOCSVH, READWRITE, struct_ifreq_sz); + _(SIOCGVH, READWRITE, struct_ifreq_sz); + _(SIOCINITIFADDR, READWRITE, struct_ifaddr_sz); + _(SIOCGIFDATA, READWRITE, struct_ifdatareq_sz); + _(SIOCZIFDATA, READWRITE, struct_ifdatareq_sz); + _(SIOCGLINKSTR, READWRITE, struct_ifdrv_sz); + _(SIOCSLINKSTR, READ, struct_ifdrv_sz); + _(SIOCGETHERCAP, READWRITE, struct_eccapreq_sz); + _(SIOCGIFINDEX, READWRITE, struct_ifreq_sz); + _(SIOCSETPFSYNC, READ, struct_ifreq_sz); + _(SIOCGETPFSYNC, READWRITE, struct_ifreq_sz); + /* Entries from file: sys/timepps.h */ + _(PPS_IOC_CREATE, NONE, 0); + _(PPS_IOC_DESTROY, NONE, 0); + _(PPS_IOC_SETPARAMS, READ, struct_pps_params_sz); + _(PPS_IOC_GETPARAMS, WRITE, struct_pps_params_sz); + _(PPS_IOC_GETCAP, WRITE, sizeof(int)); + _(PPS_IOC_FETCH, READWRITE, struct_pps_info_sz); + _(PPS_IOC_KCBIND, READ, sizeof(int)); + /* Entries from file: sys/ttycom.h */ + _(TIOCEXCL, NONE, 0); + _(TIOCNXCL, NONE, 0); + _(TIOCFLUSH, READ, sizeof(int)); + _(TIOCGETA, WRITE, struct_termios_sz); + _(TIOCSETA, READ, struct_termios_sz); + _(TIOCSETAW, READ, 0); + _(TIOCSETAF, READ, 0); + _(TIOCGETD, WRITE, sizeof(int)); + _(TIOCSETD, READ, sizeof(int)); + _(TIOCGLINED, WRITE, (32 * sizeof(char))); + _(TIOCSLINED, READ, (32 * sizeof(char))); + _(TIOCSBRK, NONE, 0); + _(TIOCCBRK, NONE, 0); + _(TIOCSDTR, NONE, 0); + _(TIOCCDTR, NONE, 0); + _(TIOCGPGRP, WRITE, sizeof(int)); + _(TIOCSPGRP, READ, sizeof(int)); + _(TIOCOUTQ, WRITE, sizeof(int)); + _(TIOCSTI, READ, sizeof(char)); + _(TIOCNOTTY, NONE, 0); + _(TIOCPKT, READ, sizeof(int)); + _(TIOCSTOP, NONE, 0); + _(TIOCSTART, NONE, 0); + _(TIOCMSET, READ, sizeof(int)); + _(TIOCMBIS, READ, sizeof(int)); + _(TIOCMBIC, READ, sizeof(int)); + _(TIOCMGET, WRITE, sizeof(int)); + _(TIOCREMOTE, READ, sizeof(int)); + _(TIOCGWINSZ, WRITE, struct_winsize_sz); + _(TIOCSWINSZ, READ, struct_winsize_sz); + _(TIOCUCNTL, READ, sizeof(int)); + _(TIOCSTAT, READ, sizeof(int)); + _(TIOCGSID, WRITE, sizeof(int)); + _(TIOCCONS, READ, sizeof(int)); + _(TIOCSCTTY, NONE, 0); + _(TIOCEXT, READ, sizeof(int)); + _(TIOCSIG, NONE, 0); + _(TIOCDRAIN, NONE, 0); + _(TIOCGFLAGS, WRITE, sizeof(int)); + _(TIOCSFLAGS, READ, sizeof(int)); + _(TIOCDCDTIMESTAMP, WRITE, struct_timeval_sz); + _(TIOCRCVFRAME, READ, sizeof(uptr)); + _(TIOCXMTFRAME, READ, sizeof(uptr)); + _(TIOCPTMGET, WRITE, struct_ptmget_sz); + _(TIOCGRANTPT, NONE, 0); + _(TIOCPTSNAME, WRITE, struct_ptmget_sz); + _(TIOCSQSIZE, READ, sizeof(int)); + _(TIOCGQSIZE, WRITE, sizeof(int)); + /* Entries from file: sys/verified_exec.h */ + _(VERIEXEC_LOAD, READ, struct_plistref_sz); + _(VERIEXEC_TABLESIZE, READ, struct_plistref_sz); + _(VERIEXEC_DELETE, READ, struct_plistref_sz); + _(VERIEXEC_QUERY, READWRITE, struct_plistref_sz); + _(VERIEXEC_DUMP, WRITE, struct_plistref_sz); + _(VERIEXEC_FLUSH, NONE, 0); + /* Entries from file: sys/videoio.h */ + _(VIDIOC_QUERYCAP, WRITE, struct_v4l2_capability_sz); + _(VIDIOC_RESERVED, NONE, 0); + _(VIDIOC_ENUM_FMT, READWRITE, struct_v4l2_fmtdesc_sz); + _(VIDIOC_G_FMT, READWRITE, struct_v4l2_format_sz); + _(VIDIOC_S_FMT, READWRITE, struct_v4l2_format_sz); + _(VIDIOC_REQBUFS, READWRITE, struct_v4l2_requestbuffers_sz); + _(VIDIOC_QUERYBUF, READWRITE, struct_v4l2_buffer_sz); + _(VIDIOC_G_FBUF, WRITE, struct_v4l2_framebuffer_sz); + _(VIDIOC_S_FBUF, READ, struct_v4l2_framebuffer_sz); + _(VIDIOC_OVERLAY, READ, sizeof(int)); + _(VIDIOC_QBUF, READWRITE, struct_v4l2_buffer_sz); + _(VIDIOC_DQBUF, READWRITE, struct_v4l2_buffer_sz); + _(VIDIOC_STREAMON, READ, sizeof(int)); + _(VIDIOC_STREAMOFF, READ, sizeof(int)); + _(VIDIOC_G_PARM, READWRITE, struct_v4l2_streamparm_sz); + _(VIDIOC_S_PARM, READWRITE, struct_v4l2_streamparm_sz); + _(VIDIOC_G_STD, WRITE, sizeof(u64)); + _(VIDIOC_S_STD, READ, sizeof(u64)); + _(VIDIOC_ENUMSTD, READWRITE, struct_v4l2_standard_sz); + _(VIDIOC_ENUMINPUT, READWRITE, struct_v4l2_input_sz); + _(VIDIOC_G_CTRL, READWRITE, struct_v4l2_control_sz); + _(VIDIOC_S_CTRL, READWRITE, struct_v4l2_control_sz); + _(VIDIOC_G_TUNER, READWRITE, struct_v4l2_tuner_sz); + _(VIDIOC_S_TUNER, READ, struct_v4l2_tuner_sz); + _(VIDIOC_G_AUDIO, WRITE, struct_v4l2_audio_sz); + _(VIDIOC_S_AUDIO, READ, struct_v4l2_audio_sz); + _(VIDIOC_QUERYCTRL, READWRITE, struct_v4l2_queryctrl_sz); + _(VIDIOC_QUERYMENU, READWRITE, struct_v4l2_querymenu_sz); + _(VIDIOC_G_INPUT, WRITE, sizeof(int)); + _(VIDIOC_S_INPUT, READWRITE, sizeof(int)); + _(VIDIOC_G_OUTPUT, WRITE, sizeof(int)); + _(VIDIOC_S_OUTPUT, READWRITE, sizeof(int)); + _(VIDIOC_ENUMOUTPUT, READWRITE, struct_v4l2_output_sz); + _(VIDIOC_G_AUDOUT, WRITE, struct_v4l2_audioout_sz); + _(VIDIOC_S_AUDOUT, READ, struct_v4l2_audioout_sz); + _(VIDIOC_G_MODULATOR, READWRITE, struct_v4l2_modulator_sz); + _(VIDIOC_S_MODULATOR, READ, struct_v4l2_modulator_sz); + _(VIDIOC_G_FREQUENCY, READWRITE, struct_v4l2_frequency_sz); + _(VIDIOC_S_FREQUENCY, READ, struct_v4l2_frequency_sz); + _(VIDIOC_CROPCAP, READWRITE, struct_v4l2_cropcap_sz); + _(VIDIOC_G_CROP, READWRITE, struct_v4l2_crop_sz); + _(VIDIOC_S_CROP, READ, struct_v4l2_crop_sz); + _(VIDIOC_G_JPEGCOMP, WRITE, struct_v4l2_jpegcompression_sz); + _(VIDIOC_S_JPEGCOMP, READ, struct_v4l2_jpegcompression_sz); + _(VIDIOC_QUERYSTD, WRITE, sizeof(u64)); + _(VIDIOC_TRY_FMT, READWRITE, struct_v4l2_format_sz); + _(VIDIOC_ENUMAUDIO, READWRITE, struct_v4l2_audio_sz); + _(VIDIOC_ENUMAUDOUT, READWRITE, struct_v4l2_audioout_sz); + _(VIDIOC_G_PRIORITY, WRITE, enum_v4l2_priority_sz); + _(VIDIOC_S_PRIORITY, READ, enum_v4l2_priority_sz); + _(VIDIOC_ENUM_FRAMESIZES, READWRITE, struct_v4l2_frmsizeenum_sz); + _(VIDIOC_ENUM_FRAMEINTERVALS, READWRITE, struct_v4l2_frmivalenum_sz); + /* Entries from file: sys/wdog.h */ + _(WDOGIOC_GMODE, READWRITE, struct_wdog_mode_sz); + _(WDOGIOC_SMODE, READ, struct_wdog_mode_sz); + _(WDOGIOC_WHICH, WRITE, struct_wdog_mode_sz); + _(WDOGIOC_TICKLE, NONE, 0); + _(WDOGIOC_GTICKLER, WRITE, sizeof(int)); + _(WDOGIOC_GWDOGS, READWRITE, struct_wdog_conf_sz); + /* Entries from file: soundcard.h */ + _(SNDCTL_DSP_RESET, NONE, 0); + _(SNDCTL_DSP_SYNC, NONE, 0); + _(SNDCTL_DSP_SPEED, READWRITE, sizeof(int)); + _(SOUND_PCM_READ_RATE, WRITE, sizeof(int)); + _(SNDCTL_DSP_STEREO, READWRITE, sizeof(int)); + _(SNDCTL_DSP_GETBLKSIZE, READWRITE, sizeof(int)); + _(SNDCTL_DSP_SETFMT, READWRITE, sizeof(int)); + _(SOUND_PCM_READ_BITS, WRITE, sizeof(int)); + _(SNDCTL_DSP_CHANNELS, READWRITE, sizeof(int)); + _(SOUND_PCM_READ_CHANNELS, WRITE, sizeof(int)); + _(SOUND_PCM_WRITE_FILTER, READWRITE, sizeof(int)); + _(SOUND_PCM_READ_FILTER, WRITE, sizeof(int)); + _(SNDCTL_DSP_POST, NONE, 0); + _(SNDCTL_DSP_SUBDIVIDE, READWRITE, sizeof(int)); + _(SNDCTL_DSP_SETFRAGMENT, READWRITE, sizeof(int)); + _(SNDCTL_DSP_GETFMTS, WRITE, sizeof(int)); + _(SNDCTL_DSP_GETOSPACE, WRITE, struct_audio_buf_info_sz); + _(SNDCTL_DSP_GETISPACE, WRITE, struct_audio_buf_info_sz); + _(SNDCTL_DSP_NONBLOCK, NONE, 0); + _(SNDCTL_DSP_GETCAPS, WRITE, sizeof(int)); + _(SNDCTL_DSP_GETTRIGGER, WRITE, sizeof(int)); + _(SNDCTL_DSP_SETTRIGGER, READ, sizeof(int)); + _(SNDCTL_DSP_GETIPTR, WRITE, struct_count_info_sz); + _(SNDCTL_DSP_GETOPTR, WRITE, struct_count_info_sz); + _(SNDCTL_DSP_MAPINBUF, WRITE, struct_buffmem_desc_sz); + _(SNDCTL_DSP_MAPOUTBUF, WRITE, struct_buffmem_desc_sz); + _(SNDCTL_DSP_SETSYNCRO, NONE, 0); + _(SNDCTL_DSP_SETDUPLEX, NONE, 0); + _(SNDCTL_DSP_PROFILE, READ, sizeof(int)); + _(SNDCTL_DSP_GETODELAY, WRITE, sizeof(int)); + _(SOUND_MIXER_INFO, WRITE, struct_mixer_info_sz); + _(SOUND_OLD_MIXER_INFO, WRITE, struct__old_mixer_info_sz); + _(OSS_GETVERSION, WRITE, sizeof(int)); + _(SNDCTL_SYSINFO, WRITE, struct_oss_sysinfo_sz); + _(SNDCTL_AUDIOINFO, READWRITE, struct_oss_audioinfo_sz); + _(SNDCTL_ENGINEINFO, READWRITE, struct_oss_audioinfo_sz); + _(SNDCTL_DSP_GETPLAYVOL, WRITE, sizeof(unsigned int)); + _(SNDCTL_DSP_SETPLAYVOL, READ, sizeof(unsigned int)); + _(SNDCTL_DSP_GETRECVOL, WRITE, sizeof(unsigned int)); + _(SNDCTL_DSP_SETRECVOL, READ, sizeof(unsigned int)); + _(SNDCTL_DSP_SKIP, NONE, 0); + _(SNDCTL_DSP_SILENCE, NONE, 0); +#undef _ +} + +static bool ioctl_initialized = false; + +struct ioctl_desc_compare { + bool operator()(const ioctl_desc &left, const ioctl_desc &right) const { + return left.req < right.req; + } +}; + +static void ioctl_init() { + ioctl_table_fill(); + Sort(ioctl_table, ioctl_table_size, ioctl_desc_compare()); + + bool bad = false; + for (unsigned i = 0; i < ioctl_table_size - 1; ++i) { + if (ioctl_table[i].req >= ioctl_table[i + 1].req) { + Printf("Duplicate or unsorted ioctl request id %x >= %x (%s vs %s)\n", + ioctl_table[i].req, ioctl_table[i + 1].req, ioctl_table[i].name, + ioctl_table[i + 1].name); + bad = true; + } + } + + if (bad) + Die(); + + ioctl_initialized = true; +} + +static const ioctl_desc *ioctl_table_lookup(unsigned req) { + int left = 0; + int right = ioctl_table_size; + while (left < right) { + int mid = (left + right) / 2; + if (ioctl_table[mid].req < req) + left = mid + 1; + else + right = mid; + } + if (left == right && ioctl_table[left].req == req) + return ioctl_table + left; + else + return nullptr; +} + +static bool ioctl_decode(unsigned req, ioctl_desc *desc) { + CHECK(desc); + desc->req = req; + desc->name = "<DECODED_IOCTL>"; + desc->size = IOC_SIZE(req); + // Sanity check. + if (desc->size > 0xFFFF) + return false; + unsigned dir = IOC_DIR(req); + switch (dir) { + case IOC_NONE: + desc->type = ioctl_desc::NONE; + break; + case IOC_READ | IOC_WRITE: + desc->type = ioctl_desc::READWRITE; + break; + case IOC_READ: + desc->type = ioctl_desc::WRITE; + break; + case IOC_WRITE: + desc->type = ioctl_desc::READ; + break; + default: + return false; + } + // Size can be 0 iff type is NONE. + if ((desc->type == IOC_NONE) != (desc->size == 0)) + return false; + // Sanity check. + if (IOC_TYPE(req) == 0) + return false; + return true; +} + +static const ioctl_desc *ioctl_lookup(unsigned req) { + const ioctl_desc *desc = ioctl_table_lookup(req); + if (desc) + return desc; + + // Try stripping access size from the request id. + desc = ioctl_table_lookup(req & ~(IOC_SIZEMASK << IOC_SIZESHIFT)); + // Sanity check: requests that encode access size are either read or write and + // have size of 0 in the table. + if (desc && desc->size == 0 && + (desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE || + desc->type == ioctl_desc::READ)) + return desc; + return nullptr; +} + +static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d, + unsigned request, void *arg) { + if (desc->type == ioctl_desc::READ || desc->type == ioctl_desc::READWRITE) { + unsigned size = desc->size ? desc->size : IOC_SIZE(request); + COMMON_INTERCEPTOR_READ_RANGE(ctx, arg, size); + } + if (desc->type != ioctl_desc::CUSTOM) + return; + if (request == IOCTL_SIOCGIFCONF) { + struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg; + COMMON_INTERCEPTOR_READ_RANGE(ctx, (char *)&ifc->ifc_len, + sizeof(ifc->ifc_len)); + } +} + +static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d, + unsigned request, void *arg) { + if (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READWRITE) { + // FIXME: add verbose output + unsigned size = desc->size ? desc->size : IOC_SIZE(request); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg, size); + } + if (desc->type != ioctl_desc::CUSTOM) + return; + if (request == IOCTL_SIOCGIFCONF) { + struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg; + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len); + } +} + +#endif // SANITIZER_NETBSD diff --git a/lib/sanitizer_common/sanitizer_interface_internal.h b/lib/sanitizer_common/sanitizer_interface_internal.h index 942f0b1cb586..f53d9557eab4 100644 --- a/lib/sanitizer_common/sanitizer_interface_internal.h +++ b/lib/sanitizer_common/sanitizer_interface_internal.h @@ -52,6 +52,12 @@ extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage(); SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard); + + // Returns 1 on the first call, then returns 0 thereafter. Called by the tool + // to ensure only one report is printed when multiple errors occur + // simultaneously. + SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state(); + SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_annotate_contiguous_container(const void *beg, const void *end, diff --git a/lib/sanitizer_common/sanitizer_internal_defs.h b/lib/sanitizer_common/sanitizer_internal_defs.h index dc480e75f98d..f8a405ba6e4d 100644 --- a/lib/sanitizer_common/sanitizer_internal_defs.h +++ b/lib/sanitizer_common/sanitizer_internal_defs.h @@ -19,6 +19,9 @@ # define SANITIZER_DEBUG 0 #endif +#define SANITIZER_STRINGIFY_(S) #S +#define SANITIZER_STRINGIFY(S) SANITIZER_STRINGIFY_(S) + // Only use SANITIZER_*ATTRIBUTE* before the function return type! #if SANITIZER_WINDOWS #if SANITIZER_IMPORT_INTERFACE @@ -36,7 +39,8 @@ #endif // TLS is handled differently on different platforms -#if SANITIZER_LINUX || SANITIZER_NETBSD +#if SANITIZER_LINUX || SANITIZER_NETBSD || \ + SANITIZER_FREEBSD || SANITIZER_OPENBSD # define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE \ __attribute__((tls_model("initial-exec"))) thread_local #else @@ -65,7 +69,13 @@ // SANITIZER_SUPPORTS_WEAK_HOOKS means that we support real weak functions that // will evaluate to a null pointer when not defined. #ifndef SANITIZER_SUPPORTS_WEAK_HOOKS -#if (SANITIZER_LINUX || SANITIZER_MAC || SANITIZER_SOLARIS) && !SANITIZER_GO +#if (SANITIZER_LINUX || SANITIZER_SOLARIS) && !SANITIZER_GO +# define SANITIZER_SUPPORTS_WEAK_HOOKS 1 +// Before Xcode 4.5, the Darwin linker doesn't reliably support undefined +// weak symbols. Mac OS X 10.9/Darwin 13 is the first release only supported +// by Xcode >= 4.5. +#elif SANITIZER_MAC && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1090 && !SANITIZER_GO # define SANITIZER_SUPPORTS_WEAK_HOOKS 1 #else # define SANITIZER_SUPPORTS_WEAK_HOOKS 0 @@ -88,9 +98,16 @@ // We can use .preinit_array section on Linux to call sanitizer initialization // functions very early in the process startup (unless PIC macro is defined). +// +// On FreeBSD, .preinit_array functions are called with rtld_bind_lock writer +// lock held. It will lead to dead lock if unresolved PLT functions (which helds +// rtld_bind_lock reader lock) are called inside .preinit_array functions. +// // FIXME: do we have anything like this on Mac? -#if SANITIZER_LINUX && !SANITIZER_ANDROID && !defined(PIC) -# define SANITIZER_CAN_USE_PREINIT_ARRAY 1 +#ifndef SANITIZER_CAN_USE_PREINIT_ARRAY +#if ((SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_OPENBSD) && \ + !defined(PIC) +#define SANITIZER_CAN_USE_PREINIT_ARRAY 1 // Before Solaris 11.4, .preinit_array is fully supported only with GNU ld. // FIXME: Check for those conditions. #elif SANITIZER_SOLARIS && !defined(PIC) @@ -98,12 +115,18 @@ #else # define SANITIZER_CAN_USE_PREINIT_ARRAY 0 #endif +#endif // SANITIZER_CAN_USE_PREINIT_ARRAY // GCC does not understand __has_feature #if !defined(__has_feature) # define __has_feature(x) 0 #endif +// Older GCCs do not understand __has_attribute. +#if !defined(__has_attribute) +# define __has_attribute(x) 0 +#endif + // For portability reasons we do not include stddef.h, stdint.h or any other // system header, but we do need some basic types that are not defined // in a portable way by the language itself. @@ -147,7 +170,8 @@ typedef long pid_t; typedef int pid_t; #endif -#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC || \ +#if SANITIZER_FREEBSD || SANITIZER_NETBSD || \ + SANITIZER_OPENBSD || SANITIZER_MAC || \ (SANITIZER_LINUX && defined(__x86_64__)) typedef u64 OFF_T; #else @@ -158,7 +182,7 @@ typedef u64 OFF64_T; #if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC typedef uptr operator_new_size_type; #else -# if defined(__s390__) && !defined(__s390x__) +# if SANITIZER_OPENBSD || defined(__s390__) && !defined(__s390x__) // Special case: 31-bit s390 has unsigned long as size_t. typedef unsigned long operator_new_size_type; # else @@ -166,12 +190,7 @@ typedef u32 operator_new_size_type; # endif #endif -#if SANITIZER_MAC -// On Darwin, thread IDs are 64-bit even on 32-bit systems. typedef u64 tid_t; -#else -typedef uptr tid_t; -#endif // ----------- ATTENTION ------------- // This header should NOT include any other headers to avoid portability issues. @@ -197,6 +216,7 @@ typedef uptr tid_t; # define LIKELY(x) (x) # define UNLIKELY(x) (x) # define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */ (void)0 +# define WARN_UNUSED_RESULT #else // _MSC_VER # define ALWAYS_INLINE inline __attribute__((always_inline)) # define ALIAS(x) __attribute__((alias(x))) @@ -215,6 +235,7 @@ typedef uptr tid_t; # else # define PREFETCH(x) __builtin_prefetch(x) # endif +# define WARN_UNUSED_RESULT __attribute__((warn_unused_result)) #endif // _MSC_VER #if !defined(_MSC_VER) || defined(__clang__) @@ -343,6 +364,12 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond, #define INT64_MAX (__INT64_C(9223372036854775807)) #undef UINT64_MAX #define UINT64_MAX (__UINT64_C(18446744073709551615)) +#undef UINTPTR_MAX +#if SANITIZER_WORDSIZE == 64 +# define UINTPTR_MAX (18446744073709551615UL) +#else +# define UINTPTR_MAX (4294967295U) +#endif // SANITIZER_WORDSIZE == 64 enum LinkerInitialized { LINKER_INITIALIZED = 0 }; diff --git a/lib/sanitizer_common/sanitizer_libc.cc b/lib/sanitizer_common/sanitizer_libc.cc index d6c8ea2a11a0..4b462bfe9728 100644 --- a/lib/sanitizer_common/sanitizer_libc.cc +++ b/lib/sanitizer_common/sanitizer_libc.cc @@ -72,17 +72,6 @@ void *internal_memmove(void *dest, const void *src, uptr n) { return dest; } -// Semi-fast bzero for 16-aligned data. Still far from peak performance. -void internal_bzero_aligned16(void *s, uptr n) { - struct ALIGNED(16) S16 { u64 a, b; }; - CHECK_EQ((reinterpret_cast<uptr>(s) | n) & 15, 0); - for (S16 *p = reinterpret_cast<S16*>(s), *end = p + n / 16; p < end; p++) { - p->a = p->b = 0; - // Make sure this does not become memset. - SanitizerBreakOptimization(nullptr); - } -} - void *internal_memset(void* s, int c, uptr n) { // The next line prevents Clang from making a call to memset() instead of the // loop below. @@ -112,14 +101,6 @@ char* internal_strdup(const char *s) { return s2; } -char* internal_strndup(const char *s, uptr n) { - uptr len = internal_strnlen(s, n); - char *s2 = (char*)InternalAlloc(len + 1); - internal_memcpy(s2, s, len); - s2[len] = 0; - return s2; -} - int internal_strcmp(const char *s1, const char *s2) { while (true) { unsigned c1 = *s1; @@ -234,13 +215,7 @@ char *internal_strstr(const char *haystack, const char *needle) { return nullptr; } -uptr internal_wcslen(const wchar_t *s) { - uptr i = 0; - while (s[i]) i++; - return i; -} - -s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) { +s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base) { CHECK_EQ(base, 10); while (IsSpace(*nptr)) nptr++; int sgn = 1; diff --git a/lib/sanitizer_common/sanitizer_libc.h b/lib/sanitizer_common/sanitizer_libc.h index 9c11fb0ad2be..7ed4e28a5695 100644 --- a/lib/sanitizer_common/sanitizer_libc.h +++ b/lib/sanitizer_common/sanitizer_libc.h @@ -32,8 +32,6 @@ void *internal_memrchr(const void *s, int c, uptr n); int internal_memcmp(const void* s1, const void* s2, uptr n); void *internal_memcpy(void *dest, const void *src, uptr n); void *internal_memmove(void *dest, const void *src, uptr n); -// Set [s, s + n) to 0. Both s and n should be 16-aligned. -void internal_bzero_aligned16(void *s, uptr n); // Should not be used in performance-critical places. void *internal_memset(void *s, int c, uptr n); char* internal_strchr(const char *s, int c); @@ -41,7 +39,6 @@ char *internal_strchrnul(const char *s, int c); int internal_strcmp(const char *s1, const char *s2); uptr internal_strcspn(const char *s, const char *reject); char *internal_strdup(const char *s); -char *internal_strndup(const char *s, uptr n); uptr internal_strlen(const char *s); uptr internal_strlcat(char *dst, const char *src, uptr maxlen); char *internal_strncat(char *dst, const char *src, uptr n); @@ -50,11 +47,9 @@ uptr internal_strlcpy(char *dst, const char *src, uptr maxlen); char *internal_strncpy(char *dst, const char *src, uptr n); uptr internal_strnlen(const char *s, uptr maxlen); char *internal_strrchr(const char *s, int c); -// This is O(N^2), but we are not using it in hot places. -uptr internal_wcslen(const wchar_t *s); char *internal_strstr(const char *haystack, const char *needle); // Works only for base=10 and doesn't set errno. -s64 internal_simple_strtoll(const char *nptr, char **endptr, int base); +s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base); int internal_snprintf(char *buffer, uptr length, const char *format, ...); // Return true if all bytes in [mem, mem+size) are zero. diff --git a/lib/sanitizer_common/sanitizer_libignore.cc b/lib/sanitizer_common/sanitizer_libignore.cc index 0df055edae97..49c4ba429492 100644 --- a/lib/sanitizer_common/sanitizer_libignore.cc +++ b/lib/sanitizer_common/sanitizer_libignore.cc @@ -9,7 +9,8 @@ #include "sanitizer_platform.h" -#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || SANITIZER_NETBSD +#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \ + SANITIZER_NETBSD || SANITIZER_OPENBSD #include "sanitizer_libignore.h" #include "sanitizer_flags.h" @@ -80,7 +81,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) { lib->name = internal_strdup(mod.full_name()); const uptr idx = atomic_load(&ignored_ranges_count_, memory_order_relaxed); - CHECK_LT(idx, kMaxLibs); + CHECK_LT(idx, ARRAY_SIZE(ignored_code_ranges_)); ignored_code_ranges_[idx].begin = range.beg; ignored_code_ranges_[idx].end = range.end; atomic_store(&ignored_ranges_count_, idx + 1, memory_order_release); @@ -109,7 +110,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) { range.beg, range.end, mod.full_name()); const uptr idx = atomic_load(&instrumented_ranges_count_, memory_order_relaxed); - CHECK_LT(idx, kMaxLibs); + CHECK_LT(idx, ARRAY_SIZE(instrumented_code_ranges_)); instrumented_code_ranges_[idx].begin = range.beg; instrumented_code_ranges_[idx].end = range.end; atomic_store(&instrumented_ranges_count_, idx + 1, diff --git a/lib/sanitizer_common/sanitizer_libignore.h b/lib/sanitizer_common/sanitizer_libignore.h index 17b0f563d47d..49967b1e8d2a 100644 --- a/lib/sanitizer_common/sanitizer_libignore.h +++ b/lib/sanitizer_common/sanitizer_libignore.h @@ -66,14 +66,16 @@ class LibIgnore { return (pc >= range.begin && pc < range.end); } - static const uptr kMaxLibs = 128; + static const uptr kMaxIgnoredRanges = 128; + static const uptr kMaxInstrumentedRanges = 1024; + static const uptr kMaxLibs = 1024; // Hot part: atomic_uintptr_t ignored_ranges_count_; - LibCodeRange ignored_code_ranges_[kMaxLibs]; + LibCodeRange ignored_code_ranges_[kMaxIgnoredRanges]; atomic_uintptr_t instrumented_ranges_count_; - LibCodeRange instrumented_code_ranges_[kMaxLibs]; + LibCodeRange instrumented_code_ranges_[kMaxInstrumentedRanges]; // Cold part: BlockingMutex mutex_; diff --git a/lib/sanitizer_common/sanitizer_linux.cc b/lib/sanitizer_common/sanitizer_linux.cc index 6c83e8db42a5..96d6c1eff42c 100644 --- a/lib/sanitizer_common/sanitizer_linux.cc +++ b/lib/sanitizer_common/sanitizer_linux.cc @@ -15,7 +15,7 @@ #include "sanitizer_platform.h" #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS + SANITIZER_OPENBSD || SANITIZER_SOLARIS #include "sanitizer_common.h" #include "sanitizer_flags.h" @@ -26,8 +26,6 @@ #include "sanitizer_mutex.h" #include "sanitizer_placement_new.h" #include "sanitizer_procmaps.h" -#include "sanitizer_stacktrace.h" -#include "sanitizer_symbolizer.h" #if SANITIZER_LINUX #include <asm/param.h> @@ -55,6 +53,7 @@ #include <link.h> #include <pthread.h> #include <sched.h> +#include <signal.h> #include <sys/mman.h> #if !SANITIZER_SOLARIS #include <sys/ptrace.h> @@ -64,7 +63,12 @@ #include <sys/syscall.h> #include <sys/time.h> #include <sys/types.h> +#if !SANITIZER_OPENBSD #include <ucontext.h> +#endif +#if SANITIZER_OPENBSD +#include <sys/futex.h> +#endif #include <unistd.h> #if SANITIZER_LINUX @@ -84,13 +88,12 @@ extern "C" { // FreeBSD 9.2 and 10.0. #include <sys/umtx.h> } -extern char **environ; // provided by crt1 +#include <sys/thr.h> #endif // SANITIZER_FREEBSD #if SANITIZER_NETBSD #include <limits.h> // For NAME_MAX #include <sys/sysctl.h> -extern char **environ; // provided by crt1 #include <sys/exec.h> extern struct ps_strings *__ps_strings; #endif // SANITIZER_NETBSD @@ -98,14 +101,10 @@ extern struct ps_strings *__ps_strings; #if SANITIZER_SOLARIS #include <stdlib.h> #include <thread.h> - -extern char **_environ; #define environ _environ #endif -#if !SANITIZER_ANDROID -#include <sys/signal.h> -#endif +extern char **environ; #if SANITIZER_LINUX // <linux/time.h> @@ -122,8 +121,8 @@ const int FUTEX_WAKE = 1; // Are we using 32-bit or 64-bit Linux syscalls? // x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32 // but it still needs to use 64-bit syscalls. -#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \ - SANITIZER_WORDSIZE == 64) +#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \ + SANITIZER_WORDSIZE == 64) # define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1 #else # define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0 @@ -135,6 +134,9 @@ extern void internal_sigreturn(); } #endif +// Note : FreeBSD had implemented both +// Linux and OpenBSD apis, available from +// future 12.x version most likely #if SANITIZER_LINUX && defined(__NR_getrandom) # if !defined(GRND_NONBLOCK) # define GRND_NONBLOCK 1 @@ -144,6 +146,12 @@ extern void internal_sigreturn(); # define SANITIZER_USE_GETRANDOM 0 #endif // SANITIZER_LINUX && defined(__NR_getrandom) +#if SANITIZER_OPENBSD +# define SANITIZER_USE_GETENTROPY 1 +#else +# define SANITIZER_USE_GETENTROPY 0 +#endif // SANITIZER_USE_GETENTROPY + namespace __sanitizer { #if SANITIZER_LINUX && defined(__x86_64__) @@ -158,11 +166,11 @@ namespace __sanitizer { // --------------- sanitizer_libc.h #if !SANITIZER_SOLARIS -#if !SANITIZER_S390 +#if !SANITIZER_S390 && !SANITIZER_OPENBSD uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd, OFF_T offset) { #if SANITIZER_NETBSD - return internal_syscall_ptr(SYSCALL(mmap), addr, length, prot, flags, fd, + return internal_syscall64(SYSCALL(mmap), addr, length, prot, flags, fd, (long)0, offset); #elif SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd, @@ -174,8 +182,9 @@ uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd, offset / 4096); #endif } -#endif // !SANITIZER_S390 +#endif // !SANITIZER_S390 && !SANITIZER_OPENBSD +#if !SANITIZER_OPENBSD uptr internal_munmap(void *addr, uptr length) { return internal_syscall_ptr(SYSCALL(munmap), (uptr)addr, length); } @@ -183,6 +192,7 @@ uptr internal_munmap(void *addr, uptr length) { int internal_mprotect(void *addr, uptr length, int prot) { return internal_syscall_ptr(SYSCALL(mprotect), (uptr)addr, length, prot); } +#endif uptr internal_close(fd_t fd) { return internal_syscall(SYSCALL(close), fd); @@ -298,12 +308,12 @@ static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) { #endif uptr internal_stat(const char *path, void *buf) { -#if SANITIZER_FREEBSD || SANITIZER_NETBSD - return internal_syscall_ptr(SYSCALL(fstatat), AT_FDCWD, (uptr)path, - (uptr)buf, 0); +#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD + return internal_syscall_ptr(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, + 0); #elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS - return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, - (uptr)buf, 0); + return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, + 0); #elif SANITIZER_LINUX_USES_64BIT_SYSCALLS # if defined(__mips64) // For mips64, stat syscall fills buffer in the format of kernel_stat @@ -325,12 +335,12 @@ uptr internal_stat(const char *path, void *buf) { uptr internal_lstat(const char *path, void *buf) { #if SANITIZER_NETBSD return internal_syscall_ptr(SYSCALL(lstat), path, buf); -#elif SANITIZER_FREEBSD - return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, - (uptr)buf, AT_SYMLINK_NOFOLLOW); +#elif SANITIZER_FREEBSD || SANITIZER_OPENBSD + return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, + AT_SYMLINK_NOFOLLOW); #elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS - return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, - (uptr)buf, AT_SYMLINK_NOFOLLOW); + return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, + AT_SYMLINK_NOFOLLOW); #elif SANITIZER_LINUX_USES_64BIT_SYSCALLS # if SANITIZER_MIPS64 // For mips64, lstat syscall fills buffer in the format of kernel_stat @@ -350,8 +360,9 @@ uptr internal_lstat(const char *path, void *buf) { } uptr internal_fstat(fd_t fd, void *buf) { -#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS -# if SANITIZER_MIPS64 && !SANITIZER_NETBSD +#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD || \ + SANITIZER_LINUX_USES_64BIT_SYSCALLS +#if SANITIZER_MIPS64 && !SANITIZER_NETBSD && !SANITIZER_OPENBSD // For mips64, fstat syscall fills buffer in the format of kernel_stat struct kernel_stat kbuf; int res = internal_syscall(SYSCALL(fstat), fd, &kbuf); @@ -385,15 +396,18 @@ uptr internal_dup2(int oldfd, int newfd) { uptr internal_readlink(const char *path, char *buf, uptr bufsize) { #if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS - return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, - (uptr)path, (uptr)buf, bufsize); + return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf, + bufsize); +#elif SANITIZER_OPENBSD + return internal_syscall_ptr(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, + (uptr)buf, bufsize); #else return internal_syscall_ptr(SYSCALL(readlink), path, buf, bufsize); #endif } uptr internal_unlink(const char *path) { -#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS || SANITIZER_OPENBSD return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0); #else return internal_syscall_ptr(SYSCALL(unlink), (uptr)path); @@ -401,7 +415,7 @@ uptr internal_unlink(const char *path) { } uptr internal_rename(const char *oldpath, const char *newpath) { -#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS +#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS || SANITIZER_OPENBSD return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD, (uptr)newpath); #else @@ -414,7 +428,7 @@ uptr internal_sched_yield() { } void internal__exit(int exitcode) { -#if SANITIZER_FREEBSD || SANITIZER_NETBSD +#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD internal_syscall(SYSCALL(exit), exitcode); #else internal_syscall(SYSCALL(exit_group), exitcode); @@ -434,7 +448,7 @@ unsigned int internal_sleep(unsigned int seconds) { uptr internal_execve(const char *filename, char *const argv[], char *const envp[]) { return internal_syscall_ptr(SYSCALL(execve), (uptr)filename, (uptr)argv, - (uptr)envp); + (uptr)envp); } #endif // !SANITIZER_SOLARIS @@ -453,11 +467,15 @@ bool FileExists(const char *filename) { tid_t GetTid() { #if SANITIZER_FREEBSD - return (uptr)pthread_self(); + long Tid; + thr_self(&Tid); + return Tid; +#elif SANITIZER_OPENBSD + return internal_syscall(SYSCALL(getthrid)); #elif SANITIZER_NETBSD return _lwp_self(); #elif SANITIZER_SOLARIS - return (uptr)thr_self(); + return thr_self(); #else return internal_syscall(SYSCALL(gettid)); #endif @@ -465,7 +483,7 @@ tid_t GetTid() { #if !SANITIZER_SOLARIS u64 NanoTime() { -#if SANITIZER_FREEBSD || SANITIZER_NETBSD +#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD timeval tv; #else kernel_timeval tv; @@ -484,7 +502,8 @@ uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) { // 'environ' array (on some others) and does not use libc. This function // should be called first inside __asan_init. const char *GetEnv(const char *name) { -#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_SOLARIS +#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD || \ + SANITIZER_SOLARIS if (::environ != 0) { uptr NameLen = internal_strlen(name); for (char **Env = ::environ; *Env != 0; Env++) { @@ -522,13 +541,14 @@ const char *GetEnv(const char *name) { #endif } -#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD +#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && !SANITIZER_OPENBSD extern "C" { - SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end; +SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end; } #endif -#if !SANITIZER_GO && !SANITIZER_FREEBSD && !SANITIZER_NETBSD +#if !SANITIZER_GO && !SANITIZER_FREEBSD && !SANITIZER_NETBSD && \ + !SANITIZER_OPENBSD static void ReadNullSepFileToArray(const char *path, char ***arr, int arr_size) { char *buff; @@ -553,6 +573,7 @@ static void ReadNullSepFileToArray(const char *path, char ***arr, } #endif +#if !SANITIZER_OPENBSD static void GetArgsAndEnv(char ***argv, char ***envp) { #if SANITIZER_FREEBSD // On FreeBSD, retrieving the argument and environment arrays is done via the @@ -568,11 +589,11 @@ static void GetArgsAndEnv(char ***argv, char ***envp) { *envp = pss->ps_envstr; #elif SANITIZER_NETBSD *argv = __ps_strings->ps_argvstr; - *argv = __ps_strings->ps_envstr; -#else + *envp = __ps_strings->ps_envstr; +#else // SANITIZER_FREEBSD #if !SANITIZER_GO if (&__libc_stack_end) { -#endif +#endif // !SANITIZER_GO uptr* stack_end = (uptr*)__libc_stack_end; int argc = *stack_end; *argv = (char**)(stack_end + 1); @@ -583,8 +604,8 @@ static void GetArgsAndEnv(char ***argv, char ***envp) { ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv); ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp); } -#endif -#endif +#endif // !SANITIZER_GO +#endif // SANITIZER_FREEBSD } char **GetArgv() { @@ -619,6 +640,7 @@ void ReExec() { Printf("execve failed, errno %d\n", rverrno); Die(); } +#endif #if !SANITIZER_SOLARIS enum MutexState { @@ -672,7 +694,7 @@ void BlockingMutex::CheckLocked() { // The actual size of this structure is specified by d_reclen. // Note that getdents64 uses a different structure format. We only provide the // 32-bit syscall here. -#if SANITIZER_NETBSD +#if SANITIZER_NETBSD || SANITIZER_OPENBSD // struct dirent is different for Linux and us. At this moment, we use only // d_fileno (Linux call this d_ino), d_reclen, and d_name. struct linux_dirent { @@ -825,19 +847,18 @@ int internal_sigaction_syscall(int signum, const void *act, void *oldact) { __sanitizer_sigaction u_adjust; internal_memcpy(&u_adjust, act, sizeof(u_adjust)); #if !SANITIZER_ANDROID || !SANITIZER_MIPS32 - if (u_adjust.sa_restorer == nullptr) { - u_adjust.sa_restorer = internal_sigreturn; - } + if (u_adjust.sa_restorer == nullptr) { + u_adjust.sa_restorer = internal_sigreturn; + } #endif - return internal_sigaction_norestorer(signum, (const void *)&u_adjust, - oldact); + return internal_sigaction_norestorer(signum, (const void *)&u_adjust, oldact); } -#endif // defined(__x86_64__) && !SANITIZER_GO +#endif // defined(__x86_64__) && !SANITIZER_GO #endif // SANITIZER_LINUX uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set, - __sanitizer_sigset_t *oldset) { -#if SANITIZER_FREEBSD || SANITIZER_NETBSD + __sanitizer_sigset_t *oldset) { +#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD return internal_syscall_ptr(SYSCALL(sigprocmask), how, set, oldset); #else __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set; @@ -880,71 +901,90 @@ bool internal_sigismember(__sanitizer_sigset_t *set, int signum) { #endif // !SANITIZER_SOLARIS // ThreadLister implementation. -ThreadLister::ThreadLister(int pid) - : pid_(pid), - descriptor_(-1), - buffer_(4096), - error_(true), - entry_((struct linux_dirent *)buffer_.data()), - bytes_read_(0) { +ThreadLister::ThreadLister(pid_t pid) : pid_(pid), buffer_(4096) { char task_directory_path[80]; internal_snprintf(task_directory_path, sizeof(task_directory_path), "/proc/%d/task/", pid); - uptr openrv = internal_open(task_directory_path, O_RDONLY | O_DIRECTORY); - if (internal_iserror(openrv)) { - error_ = true; + descriptor_ = internal_open(task_directory_path, O_RDONLY | O_DIRECTORY); + if (internal_iserror(descriptor_)) { Report("Can't open /proc/%d/task for reading.\n", pid); - } else { - error_ = false; - descriptor_ = openrv; } } -int ThreadLister::GetNextTID() { - int tid = -1; - do { - if (error_) - return -1; - if ((char *)entry_ >= &buffer_[bytes_read_] && !GetDirectoryEntries()) - return -1; - if (entry_->d_ino != 0 && entry_->d_name[0] >= '0' && - entry_->d_name[0] <= '9') { - // Found a valid tid. - tid = (int)internal_atoll(entry_->d_name); +ThreadLister::Result ThreadLister::ListThreads( + InternalMmapVector<tid_t> *threads) { + if (internal_iserror(descriptor_)) + return Error; + internal_lseek(descriptor_, 0, SEEK_SET); + threads->clear(); + + Result result = Ok; + for (bool first_read = true;; first_read = false) { + // Resize to max capacity if it was downsized by IsAlive. + buffer_.resize(buffer_.capacity()); + CHECK_GE(buffer_.size(), 4096); + uptr read = internal_getdents( + descriptor_, (struct linux_dirent *)buffer_.data(), buffer_.size()); + if (!read) + return result; + if (internal_iserror(read)) { + Report("Can't read directory entries from /proc/%d/task.\n", pid_); + return Error; } - entry_ = (struct linux_dirent *)(((char *)entry_) + entry_->d_reclen); - } while (tid < 0); - return tid; -} -void ThreadLister::Reset() { - if (error_ || descriptor_ < 0) - return; - internal_lseek(descriptor_, 0, SEEK_SET); -} + for (uptr begin = (uptr)buffer_.data(), end = begin + read; begin < end;) { + struct linux_dirent *entry = (struct linux_dirent *)begin; + begin += entry->d_reclen; + if (entry->d_ino == 1) { + // Inode 1 is for bad blocks and also can be a reason for early return. + // Should be emitted if kernel tried to output terminating thread. + // See proc_task_readdir implementation in Linux. + result = Incomplete; + } + if (entry->d_ino && *entry->d_name >= '0' && *entry->d_name <= '9') + threads->push_back(internal_atoll(entry->d_name)); + } -ThreadLister::~ThreadLister() { - if (descriptor_ >= 0) - internal_close(descriptor_); + // Now we are going to detect short-read or early EOF. In such cases Linux + // can return inconsistent list with missing alive threads. + // Code will just remember that the list can be incomplete but it will + // continue reads to return as much as possible. + if (!first_read) { + // The first one was a short-read by definition. + result = Incomplete; + } else if (read > buffer_.size() - 1024) { + // Read was close to the buffer size. So double the size and assume the + // worst. + buffer_.resize(buffer_.size() * 2); + result = Incomplete; + } else if (!threads->empty() && !IsAlive(threads->back())) { + // Maybe Linux early returned from read on terminated thread (!pid_alive) + // and failed to restore read position. + // See next_tid and proc_task_instantiate in Linux. + result = Incomplete; + } + } } -bool ThreadLister::error() { return error_; } - -bool ThreadLister::GetDirectoryEntries() { - CHECK_GE(descriptor_, 0); - CHECK_NE(error_, true); - bytes_read_ = internal_getdents(descriptor_, - (struct linux_dirent *)buffer_.data(), - buffer_.size()); - if (internal_iserror(bytes_read_)) { - Report("Can't read directory entries from /proc/%d/task.\n", pid_); - error_ = true; +bool ThreadLister::IsAlive(int tid) { + // /proc/%d/task/%d/status uses same call to detect alive threads as + // proc_task_readdir. See task_state implementation in Linux. + char path[80]; + internal_snprintf(path, sizeof(path), "/proc/%d/task/%d/status", pid_, tid); + if (!ReadFileToVector(path, &buffer_) || buffer_.empty()) return false; - } else if (bytes_read_ == 0) { + buffer_.push_back(0); + static const char kPrefix[] = "\nPPid:"; + const char *field = internal_strstr(buffer_.data(), kPrefix); + if (!field) return false; - } - entry_ = (struct linux_dirent *)buffer_.data(); - return true; + field += internal_strlen(kPrefix); + return (int)internal_atoll(field) != 0; +} + +ThreadLister::~ThreadLister() { + if (!internal_iserror(descriptor_)) + internal_close(descriptor_); } #if SANITIZER_WORDSIZE == 32 @@ -983,7 +1023,7 @@ static uptr GetKernelAreaSize() { #endif // SANITIZER_WORDSIZE == 32 uptr GetMaxVirtualAddress() { -#if SANITIZER_NETBSD && defined(__x86_64__) +#if (SANITIZER_NETBSD || SANITIZER_OPENBSD) && defined(__x86_64__) return 0x7f7ffffff000ULL; // (0x00007f8000000000 - PAGE_SIZE) #elif SANITIZER_WORDSIZE == 64 # if defined(__powerpc64__) || defined(__aarch64__) @@ -1034,6 +1074,7 @@ uptr GetPageSize() { #endif } +#if !SANITIZER_OPENBSD uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { #if SANITIZER_SOLARIS const char *default_module_name = getexecname(); @@ -1069,6 +1110,7 @@ uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { return module_name_len; #endif } +#endif // !SANITIZER_OPENBSD uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) { #if SANITIZER_LINUX @@ -1101,10 +1143,10 @@ bool LibraryNameIs(const char *full_name, const char *base_name) { // Call cb for each region mapped by map. void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) { CHECK_NE(map, nullptr); -#if !SANITIZER_FREEBSD +#if !SANITIZER_FREEBSD && !SANITIZER_OPENBSD typedef ElfW(Phdr) Elf_Phdr; typedef ElfW(Ehdr) Elf_Ehdr; -#endif // !SANITIZER_FREEBSD +#endif // !SANITIZER_FREEBSD && !SANITIZER_OPENBSD char *base = (char *)map->l_addr; Elf_Ehdr *ehdr = (Elf_Ehdr *)base; char *phdrs = base + ehdr->e_phoff; @@ -1618,6 +1660,8 @@ static HandleSignalMode GetHandleSignalModeImpl(int signum) { return common_flags()->handle_abort; case SIGILL: return common_flags()->handle_sigill; + case SIGTRAP: + return common_flags()->handle_sigtrap; case SIGFPE: return common_flags()->handle_sigfpe; case SIGSEGV: @@ -1684,21 +1728,78 @@ static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) { } #endif +#if SANITIZER_OPENBSD +using Context = sigcontext; +#else +using Context = ucontext_t; +#endif + SignalContext::WriteFlag SignalContext::GetWriteFlag() const { - ucontext_t *ucontext = (ucontext_t *)context; + Context *ucontext = (Context *)context; #if defined(__x86_64__) || defined(__i386__) static const uptr PF_WRITE = 1U << 1; #if SANITIZER_FREEBSD uptr err = ucontext->uc_mcontext.mc_err; #elif SANITIZER_NETBSD uptr err = ucontext->uc_mcontext.__gregs[_REG_ERR]; +#elif SANITIZER_OPENBSD + uptr err = ucontext->sc_err; #elif SANITIZER_SOLARIS && defined(__i386__) -# define ERR 13 - uptr err = ucontext->uc_mcontext.gregs[ERR]; + const int Err = 13; + uptr err = ucontext->uc_mcontext.gregs[Err]; #else uptr err = ucontext->uc_mcontext.gregs[REG_ERR]; -#endif +#endif // SANITIZER_FREEBSD return err & PF_WRITE ? WRITE : READ; +#elif defined(__mips__) + uint32_t *exception_source; + uint32_t faulty_instruction; + uint32_t op_code; + + exception_source = (uint32_t *)ucontext->uc_mcontext.pc; + faulty_instruction = (uint32_t)(*exception_source); + + op_code = (faulty_instruction >> 26) & 0x3f; + + // FIXME: Add support for FPU, microMIPS, DSP, MSA memory instructions. + switch (op_code) { + case 0x28: // sb + case 0x29: // sh + case 0x2b: // sw + case 0x3f: // sd +#if __mips_isa_rev < 6 + case 0x2c: // sdl + case 0x2d: // sdr + case 0x2a: // swl + case 0x2e: // swr +#endif + return SignalContext::WRITE; + + case 0x20: // lb + case 0x24: // lbu + case 0x21: // lh + case 0x25: // lhu + case 0x23: // lw + case 0x27: // lwu + case 0x37: // ld +#if __mips_isa_rev < 6 + case 0x1a: // ldl + case 0x1b: // ldr + case 0x22: // lwl + case 0x26: // lwr +#endif + return SignalContext::READ; +#if __mips_isa_rev == 6 + case 0x3b: // pcrel + op_code = (faulty_instruction >> 19) & 0x3; + switch (op_code) { + case 0x1: // lwpc + case 0x2: // lwupc + return SignalContext::READ; + } +#endif + } + return SignalContext::UNKNOWN; #elif defined(__arm__) static const uptr FSR_WRITE = 1U << 11; uptr fsr = ucontext->uc_mcontext.error_code; @@ -1725,7 +1826,13 @@ void SignalContext::DumpAllRegisters(void *context) { } static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { -#if defined(__arm__) +#if SANITIZER_NETBSD + // This covers all NetBSD architectures + ucontext_t *ucontext = (ucontext_t *)context; + *pc = _UC_MACHINE_PC(ucontext); + *bp = _UC_MACHINE_FP(ucontext); + *sp = _UC_MACHINE_SP(ucontext); +#elif defined(__arm__) ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.arm_pc; *bp = ucontext->uc_mcontext.arm_fp; @@ -1747,11 +1854,11 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { *pc = ucontext->uc_mcontext.mc_rip; *bp = ucontext->uc_mcontext.mc_rbp; *sp = ucontext->uc_mcontext.mc_rsp; -#elif SANITIZER_NETBSD - ucontext_t *ucontext = (ucontext_t *)context; - *pc = ucontext->uc_mcontext.__gregs[_REG_RIP]; - *bp = ucontext->uc_mcontext.__gregs[_REG_RBP]; - *sp = ucontext->uc_mcontext.__gregs[_REG_RSP]; +#elif SANITIZER_OPENBSD + sigcontext *ucontext = (sigcontext *)context; + *pc = ucontext->sc_rip; + *bp = ucontext->sc_rbp; + *sp = ucontext->sc_rsp; # else ucontext_t *ucontext = (ucontext_t*)context; *pc = ucontext->uc_mcontext.gregs[REG_RIP]; @@ -1764,11 +1871,11 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { *pc = ucontext->uc_mcontext.mc_eip; *bp = ucontext->uc_mcontext.mc_ebp; *sp = ucontext->uc_mcontext.mc_esp; -#elif SANITIZER_NETBSD - ucontext_t *ucontext = (ucontext_t *)context; - *pc = ucontext->uc_mcontext.__gregs[_REG_EIP]; - *bp = ucontext->uc_mcontext.__gregs[_REG_EBP]; - *sp = ucontext->uc_mcontext.__gregs[_REG_ESP]; +#elif SANITIZER_OPENBSD + sigcontext *ucontext = (sigcontext *)context; + *pc = ucontext->sc_eip; + *bp = ucontext->sc_ebp; + *sp = ucontext->sc_esp; # else ucontext_t *ucontext = (ucontext_t*)context; # if SANITIZER_SOLARIS @@ -1843,6 +1950,30 @@ void MaybeReexec() { // No need to re-exec on Linux. } +void CheckASLR() { +#if SANITIZER_NETBSD + int mib[3]; + int paxflags; + size_t len = sizeof(paxflags); + + mib[0] = CTL_PROC; + mib[1] = internal_getpid(); + mib[2] = PROC_PID_PAXFLAGS; + + if (UNLIKELY(sysctl(mib, 3, &paxflags, &len, NULL, 0) == -1)) { + Printf("sysctl failed\n"); + Die(); + } + + if (UNLIKELY(paxflags & CTL_PROC_PAXFLAGS_ASLR)) { + Printf("This sanitizer is not compatible with enabled ASLR\n"); + Die(); + } +#else + // Do nothing +#endif +} + void PrintModuleMap() { } void CheckNoDeepBind(const char *filename, int flag) { @@ -1861,7 +1992,8 @@ void CheckNoDeepBind(const char *filename, int flag) { } uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, - uptr *largest_gap_found) { + uptr *largest_gap_found, + uptr *max_occupied_addr) { UNREACHABLE("FindAvailableMemoryRange is not available"); return 0; } @@ -1869,6 +2001,15 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, bool GetRandom(void *buffer, uptr length, bool blocking) { if (!buffer || !length || length > 256) return false; +#if SANITIZER_USE_GETENTROPY + uptr rnd = getentropy(buffer, length); + int rverrno = 0; + if (internal_iserror(rnd, &rverrno) && rverrno == EFAULT) + return false; + else if (rnd == 0) + return true; +#endif // SANITIZER_USE_GETENTROPY + #if SANITIZER_USE_GETRANDOM static atomic_uint8_t skip_getrandom_syscall; if (!atomic_load_relaxed(&skip_getrandom_syscall)) { @@ -1881,7 +2022,7 @@ bool GetRandom(void *buffer, uptr length, bool blocking) { else if (res == length) return true; } -#endif // SANITIZER_USE_GETRANDOM +#endif // SANITIZER_USE_GETRANDOM // Up to 256 bytes, a read off /dev/urandom will not be interrupted. // blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom. uptr fd = internal_open("/dev/urandom", O_RDONLY); @@ -1896,6 +2037,4 @@ bool GetRandom(void *buffer, uptr length, bool blocking) { } // namespace __sanitizer -#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || - // SANITIZER_SOLARIS - +#endif diff --git a/lib/sanitizer_common/sanitizer_linux.h b/lib/sanitizer_common/sanitizer_linux.h index 2d227f82a99e..975d6541d88a 100644 --- a/lib/sanitizer_common/sanitizer_linux.h +++ b/lib/sanitizer_common/sanitizer_linux.h @@ -14,11 +14,12 @@ #define SANITIZER_LINUX_H #include "sanitizer_platform.h" -#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS +#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ + SANITIZER_OPENBSD || SANITIZER_SOLARIS #include "sanitizer_common.h" #include "sanitizer_internal_defs.h" #include "sanitizer_platform_limits_netbsd.h" +#include "sanitizer_platform_limits_openbsd.h" #include "sanitizer_platform_limits_posix.h" #include "sanitizer_platform_limits_solaris.h" #include "sanitizer_posix.h" @@ -73,23 +74,21 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, // This class reads thread IDs from /proc/<pid>/task using only syscalls. class ThreadLister { public: - explicit ThreadLister(int pid); + explicit ThreadLister(pid_t pid); ~ThreadLister(); - // GetNextTID returns -1 if the list of threads is exhausted, or if there has - // been an error. - int GetNextTID(); - void Reset(); - bool error(); + enum Result { + Error, + Incomplete, + Ok, + }; + Result ListThreads(InternalMmapVector<tid_t> *threads); private: - bool GetDirectoryEntries(); - - int pid_; - int descriptor_; - InternalScopedBuffer<char> buffer_; - bool error_; - struct linux_dirent* entry_; - int bytes_read_; + bool IsAlive(int tid); + + pid_t pid_; + int descriptor_ = -1; + InternalMmapVector<char> buffer_; }; // Exposed for testing. @@ -145,6 +144,5 @@ ALWAYS_INLINE uptr *get_android_tls_ptr() { } // namespace __sanitizer -#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || - // SANITIZER_SOLARIS +#endif #endif // SANITIZER_LINUX_H diff --git a/lib/sanitizer_common/sanitizer_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_linux_libcdep.cc index 56fdfc8705f3..1c7fb7db8844 100644 --- a/lib/sanitizer_common/sanitizer_linux_libcdep.cc +++ b/lib/sanitizer_common/sanitizer_linux_libcdep.cc @@ -14,8 +14,8 @@ #include "sanitizer_platform.h" -#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS +#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ + SANITIZER_OPENBSD || SANITIZER_SOLARIS #include "sanitizer_allocator_internal.h" #include "sanitizer_atomic.h" @@ -26,7 +26,6 @@ #include "sanitizer_linux.h" #include "sanitizer_placement_new.h" #include "sanitizer_procmaps.h" -#include "sanitizer_stacktrace.h" #include <dlfcn.h> // for dlsym() #include <link.h> @@ -42,6 +41,11 @@ #define pthread_getattr_np pthread_attr_get_np #endif +#if SANITIZER_OPENBSD +#include <pthread_np.h> +#include <sys/sysctl.h> +#endif + #if SANITIZER_NETBSD #include <sys/sysctl.h> #include <sys/tls.h> @@ -51,10 +55,6 @@ #include <thread.h> #endif -#if SANITIZER_LINUX -#include <sys/prctl.h> -#endif - #if SANITIZER_ANDROID #include <android/api-level.h> #if !defined(CPU_COUNT) && !defined(__aarch64__) @@ -127,7 +127,12 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, CHECK_EQ(thr_stksegment(&ss), 0); stacksize = ss.ss_size; stackaddr = (char *)ss.ss_sp - stacksize; -#else // !SANITIZER_SOLARIS +#elif SANITIZER_OPENBSD + stack_t sattr; + CHECK_EQ(pthread_stackseg_np(pthread_self(), &sattr), 0); + stackaddr = sattr.ss_sp; + stacksize = sattr.ss_size; +#else // !SANITIZER_SOLARIS pthread_attr_t attr; pthread_attr_init(&attr); CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0); @@ -152,50 +157,98 @@ bool SetEnv(const char *name, const char *value) { } #endif -bool SanitizerSetThreadName(const char *name) { -#ifdef PR_SET_NAME - return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0); // NOLINT -#else - return false; -#endif -} - -bool SanitizerGetThreadName(char *name, int max_len) { -#ifdef PR_GET_NAME - char buff[17]; - if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0)) // NOLINT +__attribute__((unused)) static bool GetLibcVersion(int *major, int *minor, + int *patch) { +#ifdef _CS_GNU_LIBC_VERSION + char buf[64]; + uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf)); + if (len >= sizeof(buf)) return false; - internal_strncpy(name, buff, max_len); - name[max_len] = 0; + buf[len] = 0; + static const char kGLibC[] = "glibc "; + if (internal_strncmp(buf, kGLibC, sizeof(kGLibC) - 1) != 0) + return false; + const char *p = buf + sizeof(kGLibC) - 1; + *major = internal_simple_strtoll(p, &p, 10); + *minor = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0; + *patch = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0; return true; #else return false; #endif } -#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO && \ - !SANITIZER_NETBSD && !SANITIZER_SOLARIS +#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO && \ + !SANITIZER_NETBSD && !SANITIZER_OPENBSD && !SANITIZER_SOLARIS static uptr g_tls_size; #ifdef __i386__ +# ifndef __GLIBC_PREREQ +# define CHECK_GET_TLS_STATIC_INFO_VERSION 1 +# else +# define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27)) +# endif +#else +# define CHECK_GET_TLS_STATIC_INFO_VERSION 0 +#endif + +#if CHECK_GET_TLS_STATIC_INFO_VERSION # define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall)) #else # define DL_INTERNAL_FUNCTION #endif +namespace { +struct GetTlsStaticInfoCall { + typedef void (*get_tls_func)(size_t*, size_t*); +}; +struct GetTlsStaticInfoRegparmCall { + typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION; +}; + +template <typename T> +void CallGetTls(void* ptr, size_t* size, size_t* align) { + typename T::get_tls_func get_tls; + CHECK_EQ(sizeof(get_tls), sizeof(ptr)); + internal_memcpy(&get_tls, &ptr, sizeof(ptr)); + CHECK_NE(get_tls, 0); + get_tls(size, align); +} + +bool CmpLibcVersion(int major, int minor, int patch) { + int ma; + int mi; + int pa; + if (!GetLibcVersion(&ma, &mi, &pa)) + return false; + if (ma > major) + return true; + if (ma < major) + return false; + if (mi > minor) + return true; + if (mi < minor) + return false; + return pa >= patch; +} + +} // namespace + void InitTlsSize() { -// all current supported platforms have 16 bytes stack alignment + // all current supported platforms have 16 bytes stack alignment const size_t kStackAlign = 16; - typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION; - get_tls_func get_tls; void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info"); - CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr)); - internal_memcpy(&get_tls, &get_tls_static_info_ptr, - sizeof(get_tls_static_info_ptr)); - CHECK_NE(get_tls, 0); size_t tls_size = 0; size_t tls_align = 0; - get_tls(&tls_size, &tls_align); + // On i?86, _dl_get_tls_static_info used to be internal_function, i.e. + // __attribute__((regparm(3), stdcall)) before glibc 2.27 and is normal + // function in 2.27 and later. + if (CHECK_GET_TLS_STATIC_INFO_VERSION && !CmpLibcVersion(2, 27, 0)) + CallGetTls<GetTlsStaticInfoRegparmCall>(get_tls_static_info_ptr, + &tls_size, &tls_align); + else + CallGetTls<GetTlsStaticInfoCall>(get_tls_static_info_ptr, + &tls_size, &tls_align); if (tls_align < kStackAlign) tls_align = kStackAlign; g_tls_size = RoundUpTo(tls_size, tls_align); @@ -205,77 +258,59 @@ void InitTlsSize() { } #endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO && // !SANITIZER_NETBSD && !SANITIZER_SOLARIS -#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) \ - || defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) \ - || defined(__arm__)) && SANITIZER_LINUX && !SANITIZER_ANDROID +#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) || \ + defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) || \ + defined(__arm__)) && \ + SANITIZER_LINUX && !SANITIZER_ANDROID // sizeof(struct pthread) from glibc. -static atomic_uintptr_t kThreadDescriptorSize; +static atomic_uintptr_t thread_descriptor_size; uptr ThreadDescriptorSize() { - uptr val = atomic_load(&kThreadDescriptorSize, memory_order_relaxed); + uptr val = atomic_load_relaxed(&thread_descriptor_size); if (val) return val; #if defined(__x86_64__) || defined(__i386__) || defined(__arm__) -#ifdef _CS_GNU_LIBC_VERSION - char buf[64]; - uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf)); - if (len < sizeof(buf) && internal_strncmp(buf, "glibc 2.", 8) == 0) { - char *end; - int minor = internal_simple_strtoll(buf + 8, &end, 10); - if (end != buf + 8 && (*end == '\0' || *end == '.' || *end == '-')) { - int patch = 0; - if (*end == '.') - // strtoll will return 0 if no valid conversion could be performed - patch = internal_simple_strtoll(end + 1, nullptr, 10); - - /* sizeof(struct pthread) values from various glibc versions. */ - if (SANITIZER_X32) - val = 1728; // Assume only one particular version for x32. - // For ARM sizeof(struct pthread) changed in Glibc 2.23. - else if (SANITIZER_ARM) - val = minor <= 22 ? 1120 : 1216; - else if (minor <= 3) - val = FIRST_32_SECOND_64(1104, 1696); - else if (minor == 4) - val = FIRST_32_SECOND_64(1120, 1728); - else if (minor == 5) - val = FIRST_32_SECOND_64(1136, 1728); - else if (minor <= 9) - val = FIRST_32_SECOND_64(1136, 1712); - else if (minor == 10) - val = FIRST_32_SECOND_64(1168, 1776); - else if (minor == 11 || (minor == 12 && patch == 1)) - val = FIRST_32_SECOND_64(1168, 2288); - else if (minor <= 13) - val = FIRST_32_SECOND_64(1168, 2304); - else - val = FIRST_32_SECOND_64(1216, 2304); - } - if (val) - atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed); - return val; + int major; + int minor; + int patch; + if (GetLibcVersion(&major, &minor, &patch) && major == 2) { + /* sizeof(struct pthread) values from various glibc versions. */ + if (SANITIZER_X32) + val = 1728; // Assume only one particular version for x32. + // For ARM sizeof(struct pthread) changed in Glibc 2.23. + else if (SANITIZER_ARM) + val = minor <= 22 ? 1120 : 1216; + else if (minor <= 3) + val = FIRST_32_SECOND_64(1104, 1696); + else if (minor == 4) + val = FIRST_32_SECOND_64(1120, 1728); + else if (minor == 5) + val = FIRST_32_SECOND_64(1136, 1728); + else if (minor <= 9) + val = FIRST_32_SECOND_64(1136, 1712); + else if (minor == 10) + val = FIRST_32_SECOND_64(1168, 1776); + else if (minor == 11 || (minor == 12 && patch == 1)) + val = FIRST_32_SECOND_64(1168, 2288); + else if (minor <= 13) + val = FIRST_32_SECOND_64(1168, 2304); + else + val = FIRST_32_SECOND_64(1216, 2304); } -#endif #elif defined(__mips__) // TODO(sagarthakur): add more values as per different glibc versions. val = FIRST_32_SECOND_64(1152, 1776); - if (val) - atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed); - return val; #elif defined(__aarch64__) // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22. val = 1776; - atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed); - return val; #elif defined(__powerpc64__) val = 1776; // from glibc.ppc64le 2.20-8.fc21 - atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed); - return val; #elif defined(__s390__) val = FIRST_32_SECOND_64(1152, 1776); // valid for glibc 2.22 - atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed); #endif - return 0; + if (val) + atomic_store_relaxed(&thread_descriptor_size, val); + return val; } // The offset at which pointer to self is located in the thread descriptor. @@ -432,6 +467,9 @@ static void GetTls(uptr *addr, uptr *size) { *addr = (uptr)tcb->tcb_dtv[1]; } } +#elif SANITIZER_OPENBSD + *addr = 0; + *size = 0; #elif SANITIZER_ANDROID *addr = 0; *size = 0; @@ -447,8 +485,8 @@ static void GetTls(uptr *addr, uptr *size) { #if !SANITIZER_GO uptr GetTlsSize() { -#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS +#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD || \ + SANITIZER_OPENBSD || SANITIZER_SOLARIS uptr addr, size; GetTls(&addr, &size); return size; @@ -485,13 +523,13 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, #endif } -# if !SANITIZER_FREEBSD +#if !SANITIZER_FREEBSD && !SANITIZER_OPENBSD typedef ElfW(Phdr) Elf_Phdr; -# elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2 -# define Elf_Phdr XElf32_Phdr -# define dl_phdr_info xdl_phdr_info -# define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b)) -# endif +#elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2 +#define Elf_Phdr XElf32_Phdr +#define dl_phdr_info xdl_phdr_info +#define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b)) +#endif // !SANITIZER_FREEBSD && !SANITIZER_OPENBSD struct DlIteratePhdrData { InternalMmapVectorNoCtor<LoadedModule> *modules; @@ -512,7 +550,7 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { return 0; LoadedModule cur_module; cur_module.set(module_name.data(), info->dlpi_addr); - for (int i = 0; i < info->dlpi_phnum; i++) { + for (int i = 0; i < (int)info->dlpi_phnum; i++) { const Elf_Phdr *phdr = &info->dlpi_phdr[i]; if (phdr->p_type == PT_LOAD) { uptr cur_beg = info->dlpi_addr + phdr->p_vaddr; @@ -611,7 +649,7 @@ uptr GetRSS() { // sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used on most platforms as // they allocate memory. u32 GetNumberOfCPUs() { -#if SANITIZER_FREEBSD || SANITIZER_NETBSD +#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD u32 ncpu; int req[2]; size_t len = sizeof(ncpu); @@ -628,7 +666,7 @@ u32 GetNumberOfCPUs() { uptr fd = internal_open("/sys/devices/system/cpu", O_RDONLY | O_DIRECTORY); if (internal_iserror(fd)) return 0; - InternalScopedBuffer<u8> buffer(4096); + InternalMmapVector<u8> buffer(4096); uptr bytes_read = buffer.size(); uptr n_cpus = 0; u8 *d_type; @@ -770,4 +808,4 @@ u64 MonotonicNanoTime() { } // namespace __sanitizer -#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD +#endif diff --git a/lib/sanitizer_common/sanitizer_linux_s390.cc b/lib/sanitizer_common/sanitizer_linux_s390.cc index a6da82ecb1d2..ad8c87f2d6b6 100644 --- a/lib/sanitizer_common/sanitizer_linux_s390.cc +++ b/lib/sanitizer_common/sanitizer_linux_s390.cc @@ -126,7 +126,7 @@ static bool FixedCVE_2016_2143() { // This should never fail, but just in case... if (uname(&buf)) return false; - char *ptr = buf.release; + const char *ptr = buf.release; major = internal_simple_strtoll(ptr, &ptr, 10); // At least first 2 should be matched. if (ptr[0] != '.') diff --git a/lib/sanitizer_common/sanitizer_mac.cc b/lib/sanitizer_common/sanitizer_mac.cc index e1c51f580874..180d7c199aef 100644 --- a/lib/sanitizer_common/sanitizer_mac.cc +++ b/lib/sanitizer_common/sanitizer_mac.cc @@ -59,7 +59,9 @@ extern "C" { #include <libkern/OSAtomic.h> #include <mach-o/dyld.h> #include <mach/mach.h> +#include <mach/mach_time.h> #include <mach/vm_statistics.h> +#include <malloc/malloc.h> #include <pthread.h> #include <sched.h> #include <signal.h> @@ -100,9 +102,15 @@ extern "C" void *__mmap(void *addr, size_t len, int prot, int flags, int fildes, extern "C" int __munmap(void *, size_t) SANITIZER_WEAK_ATTRIBUTE; // ---------------------- sanitizer_libc.h + +// From <mach/vm_statistics.h>, but not on older OSs. +#ifndef VM_MEMORY_SANITIZER +#define VM_MEMORY_SANITIZER 99 +#endif + uptr internal_mmap(void *addr, size_t length, int prot, int flags, int fd, u64 offset) { - if (fd == -1) fd = VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL); + if (fd == -1) fd = VM_MAKE_TAG(VM_MEMORY_SANITIZER); if (&__mmap) return (uptr)__mmap(addr, length, prot, flags, fd, offset); return (uptr)mmap(addr, length, prot, flags, fd, offset); } @@ -338,10 +346,37 @@ void ReExec() { UNIMPLEMENTED(); } +void CheckASLR() { + // Do nothing +} + uptr GetPageSize() { return sysconf(_SC_PAGESIZE); } +extern "C" unsigned malloc_num_zones; +extern "C" malloc_zone_t **malloc_zones; +malloc_zone_t sanitizer_zone; + +// We need to make sure that sanitizer_zone is registered as malloc_zones[0]. If +// libmalloc tries to set up a different zone as malloc_zones[0], it will call +// mprotect(malloc_zones, ..., PROT_READ). This interceptor will catch that and +// make sure we are still the first (default) zone. +void MprotectMallocZones(void *addr, int prot) { + if (addr == malloc_zones && prot == PROT_READ) { + if (malloc_num_zones > 1 && malloc_zones[0] != &sanitizer_zone) { + for (unsigned i = 1; i < malloc_num_zones; i++) { + if (malloc_zones[i] == &sanitizer_zone) { + // Swap malloc_zones[0] and malloc_zones[i]. + malloc_zones[i] = malloc_zones[0]; + malloc_zones[0] = &sanitizer_zone; + break; + } + } + } + } +} + BlockingMutex::BlockingMutex() { internal_memset(this, 0, sizeof(*this)); } @@ -362,11 +397,17 @@ void BlockingMutex::CheckLocked() { } u64 NanoTime() { - return 0; + timeval tv; + internal_memset(&tv, 0, sizeof(tv)); + gettimeofday(&tv, 0); + return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000; } +// This needs to be called during initialization to avoid being racy. u64 MonotonicNanoTime() { - return 0; + static mach_timebase_info_data_t timebase_info; + if (timebase_info.denom == 0) mach_timebase_info(&timebase_info); + return (mach_absolute_time() * timebase_info.numer) / timebase_info.denom; } uptr GetTlsSize() { @@ -428,6 +469,8 @@ static HandleSignalMode GetHandleSignalModeImpl(int signum) { return common_flags()->handle_abort; case SIGILL: return common_flags()->handle_sigill; + case SIGTRAP: + return common_flags()->handle_sigtrap; case SIGFPE: return common_flags()->handle_sigfpe; case SIGSEGV: @@ -673,6 +716,9 @@ bool DyldNeedsEnvVariable() { } void MaybeReexec() { + // FIXME: This should really live in some "InitializePlatform" method. + MonotonicNanoTime(); + if (ReexecDisabled()) return; // Make sure the dynamic runtime library is preloaded so that the @@ -875,10 +921,9 @@ uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); } -uptr FindAvailableMemoryRange(uptr shadow_size, - uptr alignment, - uptr left_padding, - uptr *largest_gap_found) { +uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, + uptr *largest_gap_found, + uptr *max_occupied_addr) { typedef vm_region_submap_short_info_data_64_t RegionInfo; enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 }; // Start searching for available memory region past PAGEZERO, which is @@ -890,6 +935,7 @@ uptr FindAvailableMemoryRange(uptr shadow_size, mach_vm_address_t free_begin = start_address; kern_return_t kr = KERN_SUCCESS; if (largest_gap_found) *largest_gap_found = 0; + if (max_occupied_addr) *max_occupied_addr = 0; while (kr == KERN_SUCCESS) { mach_vm_size_t vmsize = 0; natural_t depth = 0; @@ -901,13 +947,15 @@ uptr FindAvailableMemoryRange(uptr shadow_size, // No more regions beyond "address", consider the gap at the end of VM. address = GetMaxVirtualAddress() + 1; vmsize = 0; + } else { + if (max_occupied_addr) *max_occupied_addr = address + vmsize; } if (free_begin != address) { // We found a free region [free_begin..address-1]. uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment); uptr gap_end = RoundDownTo((uptr)address, alignment); uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0; - if (shadow_size < gap_size) { + if (size < gap_size) { return gap_start; } @@ -994,9 +1042,10 @@ void FormatUUID(char *out, uptr size, const u8 *uuid) { void PrintModuleMap() { Printf("Process module map:\n"); MemoryMappingLayout memory_mapping(false); - InternalMmapVector<LoadedModule> modules(/*initial_capacity*/ 128); + InternalMmapVector<LoadedModule> modules; + modules.reserve(128); memory_mapping.DumpListOfModules(&modules); - InternalSort(&modules, modules.size(), CompareBaseAddress); + Sort(modules.data(), modules.size(), CompareBaseAddress); for (uptr i = 0; i < modules.size(); ++i) { char uuid_str[128]; FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid()); diff --git a/lib/sanitizer_common/sanitizer_malloc_mac.inc b/lib/sanitizer_common/sanitizer_malloc_mac.inc index 5699c59043e9..e69d6f94b0e1 100644 --- a/lib/sanitizer_common/sanitizer_malloc_mac.inc +++ b/lib/sanitizer_common/sanitizer_malloc_mac.inc @@ -29,7 +29,9 @@ // Similar code is used in Google Perftools, // https://github.com/gperftools/gperftools. -static malloc_zone_t sanitizer_zone; +namespace __sanitizer { +extern malloc_zone_t sanitizer_zone; +} INTERCEPTOR(malloc_zone_t *, malloc_create_zone, vm_size_t start_size, unsigned zone_flags) { @@ -65,29 +67,6 @@ INTERCEPTOR(void, malloc_destroy_zone, malloc_zone_t *zone) { COMMON_MALLOC_FREE(zone); } -extern unsigned malloc_num_zones; -extern malloc_zone_t **malloc_zones; - -// We need to make sure that sanitizer_zone is registered as malloc_zones[0]. If -// libmalloc tries to set up a different zone as malloc_zones[0], it will call -// mprotect(malloc_zones, ..., PROT_READ). This interceptor will catch that and -// make sure we are still the first (default) zone. -INTERCEPTOR(int, mprotect, void *addr, size_t len, int prot) { - if (addr == malloc_zones && prot == PROT_READ) { - if (malloc_num_zones > 1 && malloc_zones[0] != &sanitizer_zone) { - for (unsigned i = 1; i < malloc_num_zones; i++) { - if (malloc_zones[i] == &sanitizer_zone) { - // Swap malloc_zones[0] and malloc_zones[i]. - malloc_zones[i] = malloc_zones[0]; - malloc_zones[0] = &sanitizer_zone; - break; - } - } - } - } - return REAL(mprotect)(addr, len, prot); -} - INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) { COMMON_MALLOC_ENTER(); return &sanitizer_zone; @@ -170,12 +149,8 @@ INTERCEPTOR(size_t, malloc_good_size, size_t size) { INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) { COMMON_MALLOC_ENTER(); CHECK(memptr); - COMMON_MALLOC_MEMALIGN(alignment, size); - if (p) { - *memptr = p; - return 0; - } - return -1; + COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size); + return res; } namespace { diff --git a/lib/sanitizer_common/sanitizer_openbsd.cc b/lib/sanitizer_common/sanitizer_openbsd.cc new file mode 100644 index 000000000000..2aea7cb14875 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_openbsd.cc @@ -0,0 +1,101 @@ +//===-- sanitizer_openbsd.cc ----------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between various sanitizers' runtime libraries and +// implements Solaris-specific functions. +//===----------------------------------------------------------------------===// + +#include "sanitizer_platform.h" +#if SANITIZER_OPENBSD + +#include <stdio.h> + +#include "sanitizer_common.h" +#include "sanitizer_flags.h" +#include "sanitizer_internal_defs.h" +#include "sanitizer_libc.h" +#include "sanitizer_placement_new.h" +#include "sanitizer_platform_limits_posix.h" +#include "sanitizer_procmaps.h" + +#include <errno.h> +#include <fcntl.h> +#include <limits.h> +#include <pthread.h> +#include <sched.h> +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <sys/mman.h> +#include <sys/shm.h> +#include <sys/sysctl.h> +#include <sys/types.h> +#include <unistd.h> + +extern char **environ; + +namespace __sanitizer { + +uptr internal_mmap(void *addr, size_t length, int prot, int flags, int fd, + u64 offset) { + return (uptr)mmap(addr, length, prot, flags, fd, offset); +} + +uptr internal_munmap(void *addr, uptr length) { return munmap(addr, length); } + +int internal_mprotect(void *addr, uptr length, int prot) { + return mprotect(addr, length, prot); +} + +uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { + // On OpenBSD we cannot get the full path + struct kinfo_proc kp; + size_t kl; + const int Mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid()}; + if (sysctl(Mib, ARRAY_SIZE(Mib), &kp, &kl, NULL, 0) != -1) + return internal_snprintf(buf, + (KI_MAXCOMLEN < buf_len ? KI_MAXCOMLEN : buf_len), + "%s", kp.p_comm); + return (uptr)0; +} + +static void GetArgsAndEnv(char ***argv, char ***envp) { + size_t nargv; + size_t nenv; + int argvmib[4] = {CTL_KERN, KERN_PROC_ARGS, getpid(), KERN_PROC_ARGV}; + int envmib[4] = {CTL_KERN, KERN_PROC_ARGS, getpid(), KERN_PROC_ENV}; + if (sysctl(argvmib, 4, NULL, &nargv, NULL, 0) == -1) { + Printf("sysctl KERN_PROC_NARGV failed\n"); + Die(); + } + if (sysctl(envmib, 4, NULL, &nenv, NULL, 0) == -1) { + Printf("sysctl KERN_PROC_NENV failed\n"); + Die(); + } + if (sysctl(argvmib, 4, &argv, &nargv, NULL, 0) == -1) { + Printf("sysctl KERN_PROC_ARGV failed\n"); + Die(); + } + if (sysctl(envmib, 4, &envp, &nenv, NULL, 0) == -1) { + Printf("sysctl KERN_PROC_ENV failed\n"); + Die(); + } +} + +char **GetArgv() { + char **argv, **envp; + GetArgsAndEnv(&argv, &envp); + return argv; +} + +void ReExec() { + UNIMPLEMENTED(); +} + +} // namespace __sanitizer + +#endif // SANITIZER_OPENBSD diff --git a/lib/sanitizer_common/sanitizer_platform.h b/lib/sanitizer_common/sanitizer_platform.h index 334903c26f9f..d81e25580d9e 100644 --- a/lib/sanitizer_common/sanitizer_platform.h +++ b/lib/sanitizer_common/sanitizer_platform.h @@ -14,7 +14,8 @@ #define SANITIZER_PLATFORM_H #if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \ - !defined(__APPLE__) && !defined(_WIN32) && !defined(__Fuchsia__) && \ + !defined(__OpenBSD__) && !defined(__APPLE__) && !defined(_WIN32) && \ + !defined(__Fuchsia__) && !defined(__rtems__) && \ !(defined(__sun__) && defined(__svr4__)) # error "This operating system is not supported" #endif @@ -37,6 +38,12 @@ # define SANITIZER_NETBSD 0 #endif +#if defined(__OpenBSD__) +# define SANITIZER_OPENBSD 1 +#else +# define SANITIZER_OPENBSD 0 +#endif + #if defined(__sun__) && defined(__svr4__) # define SANITIZER_SOLARIS 1 #else @@ -98,9 +105,15 @@ # define SANITIZER_FUCHSIA 0 #endif +#if defined(__rtems__) +# define SANITIZER_RTEMS 1 +#else +# define SANITIZER_RTEMS 0 +#endif + #define SANITIZER_POSIX \ (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \ - SANITIZER_NETBSD || SANITIZER_SOLARIS) + SANITIZER_NETBSD || SANITIZER_OPENBSD || SANITIZER_SOLARIS) #if __LP64__ || defined(_WIN64) # define SANITIZER_WORDSIZE 64 @@ -195,6 +208,12 @@ # define SANITIZER_SOLARIS32 0 #endif +#if defined(__myriad2__) +# define SANITIZER_MYRIAD2 1 +#else +# define SANITIZER_MYRIAD2 0 +#endif + // By default we allow to use SizeClassAllocator64 on 64-bit platform. // But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64 // does not work well and we need to fallback to SizeClassAllocator32. @@ -296,10 +315,26 @@ # define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0 #endif -#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || SANITIZER_SOLARIS +#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || \ + SANITIZER_OPENBSD || SANITIZER_SOLARIS # define SANITIZER_MADVISE_DONTNEED MADV_FREE #else # define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED #endif +// Older gcc have issues aligning to a constexpr, and require an integer. +// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others. +#if defined(__powerpc__) || defined(__powerpc64__) +# define SANITIZER_CACHE_LINE_SIZE 128 +#else +# define SANITIZER_CACHE_LINE_SIZE 64 +#endif + +// Enable offline markup symbolizer for Fuchsia and RTEMS. +#if SANITIZER_FUCHSIA || SANITIZER_RTEMS +#define SANITIZER_SYMBOLIZER_MARKUP 1 +#else +#define SANITIZER_SYMBOLIZER_MARKUP 0 +#endif + #endif // SANITIZER_PLATFORM_H diff --git a/lib/sanitizer_common/sanitizer_platform_interceptors.h b/lib/sanitizer_common/sanitizer_platform_interceptors.h index b99ac4480586..f95539a73c69 100644 --- a/lib/sanitizer_common/sanitizer_platform_interceptors.h +++ b/lib/sanitizer_common/sanitizer_platform_interceptors.h @@ -28,12 +28,13 @@ # define SI_WINDOWS 1 #endif -#if (SI_POSIX != 0) == (SI_WINDOWS != 0) && !SANITIZER_FUCHSIA +#if SI_WINDOWS && SI_POSIX # error "Windows is not POSIX!" #endif #if SI_POSIX # include "sanitizer_platform_limits_netbsd.h" +#include "sanitizer_platform_limits_openbsd.h" # include "sanitizer_platform_limits_posix.h" # include "sanitizer_platform_limits_solaris.h" #endif @@ -62,6 +63,12 @@ # define SI_NETBSD 0 #endif +#if SANITIZER_OPENBSD +#define SI_OPENBSD 1 +#else +#define SI_OPENBSD 0 +#endif + #if SANITIZER_LINUX # define SI_LINUX 1 #else @@ -88,6 +95,12 @@ # define SI_NOT_FUCHSIA 1 #endif +#if SANITIZER_RTEMS +# define SI_NOT_RTEMS 0 +#else +# define SI_NOT_RTEMS 1 +#endif + #if SANITIZER_SOLARIS # define SI_SOLARIS 1 #else @@ -141,7 +154,8 @@ // FIXME: enable memmem on Windows. #define SANITIZER_INTERCEPT_MEMMEM (SI_POSIX && !SI_MAC_DEPLOYMENT_BELOW_10_7) #define SANITIZER_INTERCEPT_MEMCHR SI_NOT_FUCHSIA -#define SANITIZER_INTERCEPT_MEMRCHR (SI_FREEBSD || SI_LINUX || SI_NETBSD) +#define SANITIZER_INTERCEPT_MEMRCHR \ + (SI_FREEBSD || SI_LINUX || SI_NETBSD || SI_OPENBSD) #define SANITIZER_INTERCEPT_READ SI_POSIX #define SANITIZER_INTERCEPT_PREAD SI_POSIX @@ -150,6 +164,9 @@ #define SANITIZER_INTERCEPT_FREAD SI_POSIX #define SANITIZER_INTERCEPT_FWRITE SI_POSIX +#define SANITIZER_INTERCEPT_FGETS SI_POSIX +#define SANITIZER_INTERCEPT_FPUTS SI_POSIX +#define SANITIZER_INTERCEPT_PUTS SI_POSIX #define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32 #define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32 @@ -158,7 +175,7 @@ #define SANITIZER_INTERCEPT_WRITEV SI_POSIX #define SANITIZER_INTERCEPT_PREADV \ - (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID) + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID @@ -184,24 +201,26 @@ #define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_POSIX #define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_POSIX -#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \ - (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) -#define SANITIZER_INTERCEPT_GETPWENT \ - (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) +#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \ + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \ + SI_SOLARIS) +#define SANITIZER_INTERCEPT_GETPWENT \ + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \ + SI_SOLARIS) #define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_GETPWENT_R \ - (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_SETPWENT \ (SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_CLOCK_GETTIME \ - (SI_FREEBSD || SI_NETBSD || SI_LINUX || SI_SOLARIS) + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX || SI_SOLARIS) #define SANITIZER_INTERCEPT_GETITIMER SI_POSIX #define SANITIZER_INTERCEPT_TIME SI_POSIX #define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_GLOB64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_WAIT SI_POSIX #define SANITIZER_INTERCEPT_INET SI_POSIX -#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_POSIX +#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM (SI_POSIX && !SI_OPENBSD) #define SANITIZER_INTERCEPT_GETADDRINFO SI_POSIX #define SANITIZER_INTERCEPT_GETNAMEINFO SI_POSIX #define SANITIZER_INTERCEPT_GETSOCKNAME SI_POSIX @@ -217,10 +236,14 @@ (SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_GETSOCKOPT SI_POSIX #define SANITIZER_INTERCEPT_ACCEPT SI_POSIX -#define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_ACCEPT4 \ + (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_OPENBSD) +#define SANITIZER_INTERCEPT_PACCEPT SI_NETBSD #define SANITIZER_INTERCEPT_MODF SI_POSIX #define SANITIZER_INTERCEPT_RECVMSG SI_POSIX #define SANITIZER_INTERCEPT_SENDMSG SI_POSIX +#define SANITIZER_INTERCEPT_RECVMMSG SI_LINUX +#define SANITIZER_INTERCEPT_SENDMMSG SI_LINUX #define SANITIZER_INTERCEPT_GETPEERNAME SI_POSIX #define SANITIZER_INTERCEPT_IOCTL SI_POSIX #define SANITIZER_INTERCEPT_INET_ATON SI_POSIX @@ -243,23 +266,30 @@ #define SANITIZER_INTERCEPT_MBSNRTOWCS \ (SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_WCSTOMBS SI_POSIX -#define SANITIZER_INTERCEPT_WCSNRTOMBS \ - (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) -#define SANITIZER_INTERCEPT_WCRTOMB \ - (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) +#define SANITIZER_INTERCEPT_STRXFRM SI_POSIX +#define SANITIZER_INTERCEPT___STRXFRM_L SI_LINUX +#define SANITIZER_INTERCEPT_WCSXFRM SI_POSIX +#define SANITIZER_INTERCEPT___WCSXFRM_L SI_LINUX +#define SANITIZER_INTERCEPT_WCSNRTOMBS \ + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \ + SI_SOLARIS) +#define SANITIZER_INTERCEPT_WCRTOMB \ + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \ + SI_SOLARIS) #define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_REALPATH SI_POSIX #define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME \ (SI_LINUX_NOT_ANDROID || SI_SOLARIS) -#define SANITIZER_INTERCEPT_CONFSTR \ - (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) +#define SANITIZER_INTERCEPT_CONFSTR \ + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \ + SI_SOLARIS) #define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_SCHED_GETPARAM SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_STRERROR SI_POSIX #define SANITIZER_INTERCEPT_STRERROR_R SI_POSIX #define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_SCANDIR \ - (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32 #define SANITIZER_INTERCEPT_GETGROUPS SI_POSIX #define SANITIZER_INTERCEPT_POLL SI_POSIX @@ -275,7 +305,7 @@ #define SANITIZER_INTERCEPT_SIGPENDING SI_POSIX #define SANITIZER_INTERCEPT_SIGPROCMASK SI_POSIX #define SANITIZER_INTERCEPT_BACKTRACE \ - (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX #define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_STATFS \ @@ -283,24 +313,26 @@ #define SANITIZER_INTERCEPT_STATFS64 \ ((SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_STATVFS \ - (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID) + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_INITGROUPS SI_POSIX -#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_POSIX +#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON (SI_POSIX && !SI_OPENBSD) #define SANITIZER_INTERCEPT_ETHER_HOST \ (SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID) #define SANITIZER_INTERCEPT_ETHER_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID) -#define SANITIZER_INTERCEPT_SHMCTL \ - (SI_NETBSD || SI_SOLARIS || ((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && \ - SANITIZER_WORDSIZE == 64)) // NOLINT +#define SANITIZER_INTERCEPT_SHMCTL \ + (SI_NETBSD || SI_OPENBSD || SI_SOLARIS || \ + ((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && \ + SANITIZER_WORDSIZE == 64)) // NOLINT #define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_POSIX #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED (SI_POSIX && !SI_OPENBSD) #define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED \ - (SI_POSIX && !SI_NETBSD) -#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE SI_POSIX + (SI_POSIX && !SI_NETBSD && !SI_OPENBSD) +#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE (SI_POSIX && !SI_OPENBSD) #define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL \ (SI_MAC || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING \ @@ -309,14 +341,15 @@ (SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED \ - (SI_POSIX && !SI_NETBSD) + (SI_POSIX && !SI_NETBSD && !SI_OPENBSD) #define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED \ - (SI_POSIX && !SI_NETBSD) + (SI_POSIX && !SI_NETBSD && !SI_OPENBSD) #define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK \ (SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED \ - (SI_LINUX_NOT_ANDROID && !SI_NETBSD) + (SI_LINUX_NOT_ANDROID && !SI_NETBSD && !SI_OPENBSD) +#define SANITIZER_INTERCEPT_THR_EXIT SI_FREEBSD #define SANITIZER_INTERCEPT_TMPNAM SI_POSIX #define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_TTYNAME_R SI_POSIX @@ -327,35 +360,40 @@ #define SANITIZER_INTERCEPT_LGAMMA_R (SI_FREEBSD || SI_LINUX || SI_SOLARIS) #define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_RAND_R \ - (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS) +#define SANITIZER_INTERCEPT_RAND_R \ + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \ + SI_SOLARIS) #define SANITIZER_INTERCEPT_ICONV \ - (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_TIMES SI_POSIX // FIXME: getline seems to be available on OSX 10.7 #define SANITIZER_INTERCEPT_GETLINE \ - (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT__EXIT \ - (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_MAC || SI_SOLARIS) + (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_SOLARIS) -#define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_POSIX +#define SANITIZER_INTERCEPT_PTHREAD_MUTEX SI_POSIX +#define SANITIZER_INTERCEPT___PTHREAD_MUTEX SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT___LIBC_MUTEX SI_NETBSD #define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \ - (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP \ (SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_TLS_GET_ADDR \ - (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS) #define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX #define SANITIZER_INTERCEPT_GETXATTR SI_LINUX #define SANITIZER_INTERCEPT_GETRESID SI_LINUX -#define SANITIZER_INTERCEPT_GETIFADDRS \ - (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC || SI_SOLARIS) -#define SANITIZER_INTERCEPT_IF_INDEXTONAME \ - (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC || SI_SOLARIS) +#define SANITIZER_INTERCEPT_GETIFADDRS \ + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_MAC || \ + SI_SOLARIS) +#define SANITIZER_INTERCEPT_IF_INDEXTONAME \ + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_MAC || \ + SI_SOLARIS) #define SANITIZER_INTERCEPT_CAPGET SI_LINUX_NOT_ANDROID #if SI_LINUX && defined(__arm__) #define SANITIZER_INTERCEPT_AEABI_MEM 1 @@ -363,26 +401,28 @@ #define SANITIZER_INTERCEPT_AEABI_MEM 0 #endif #define SANITIZER_INTERCEPT___BZERO SI_MAC -#define SANITIZER_INTERCEPT_FTIME (!SI_FREEBSD && !SI_NETBSD && SI_POSIX) +#define SANITIZER_INTERCEPT_FTIME \ + (!SI_FREEBSD && !SI_NETBSD && !SI_OPENBSD && SI_POSIX) #define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID || SI_SOLARIS #define SANITIZER_INTERCEPT_TSEARCH \ - (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_SOLARIS) + (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_OPENBSD || SI_SOLARIS) #define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_FOPEN SI_POSIX #define SANITIZER_INTERCEPT_FOPEN64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32 #define SANITIZER_INTERCEPT_OPEN_MEMSTREAM \ - (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_SOLARIS) + (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_OPENBSD || SI_SOLARIS) #define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_FFLUSH SI_POSIX #define SANITIZER_INTERCEPT_FCLOSE SI_POSIX #ifndef SANITIZER_INTERCEPT_DLOPEN_DLCLOSE -#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE \ - (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC || SI_SOLARIS) +#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE \ + (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_MAC || \ + SI_SOLARIS) #endif #define SANITIZER_INTERCEPT_GETPASS \ - (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD) + (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_OPENBSD) #define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_MLOCKX SI_POSIX @@ -390,19 +430,22 @@ #define SANITIZER_INTERCEPT_SEM \ (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_SOLARIS) #define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_POSIX -#define SANITIZER_INTERCEPT_MINCORE (SI_LINUX || SI_NETBSD || SI_SOLARIS) +#define SANITIZER_INTERCEPT_MINCORE \ + (SI_LINUX || SI_NETBSD || SI_OPENBSD || SI_SOLARIS) #define SANITIZER_INTERCEPT_PROCESS_VM_READV SI_LINUX #define SANITIZER_INTERCEPT_CTERMID \ - (SI_LINUX || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_SOLARIS) + (SI_LINUX || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_SOLARIS) #define SANITIZER_INTERCEPT_CTERMID_R (SI_MAC || SI_FREEBSD || SI_SOLARIS) -#define SANITIZER_INTERCEPTOR_HOOKS (SI_LINUX || SI_MAC || SI_WINDOWS) +#define SANITIZER_INTERCEPTOR_HOOKS \ + (SI_LINUX || SI_MAC || SI_WINDOWS || SI_NETBSD) #define SANITIZER_INTERCEPT_RECV_RECVFROM SI_POSIX #define SANITIZER_INTERCEPT_SEND_SENDTO SI_POSIX #define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX #define SANITIZER_INTERCEPT_STAT \ - (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS) + (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_OPENBSD || SI_SOLARIS) +#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD) #define SANITIZER_INTERCEPT___XSTAT (!SANITIZER_INTERCEPT_STAT && SI_POSIX) #define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT @@ -414,20 +457,58 @@ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD) #define SANITIZER_INTERCEPT_GETLOADAVG \ - (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD) + (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_OPENBSD) +#define SANITIZER_INTERCEPT_MMAP SI_POSIX +#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \ - (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA) -#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD) + (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \ + SI_NOT_RTEMS) +#define SANITIZER_INTERCEPT_MEMALIGN \ + (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_RTEMS) #define SANITIZER_INTERCEPT_PVALLOC \ - (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA) + (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \ + SI_NOT_RTEMS) #define SANITIZER_INTERCEPT_CFREE \ - (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA) -#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC) -#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC) + (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \ + SI_NOT_RTEMS) +#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC && SI_NOT_RTEMS) +#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_OPENBSD) #define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_WCSCAT SI_POSIX #define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION (!SI_WINDOWS && SI_NOT_FUCHSIA) #define SANITIZER_INTERCEPT_BSD_SIGNAL SI_ANDROID +#define SANITIZER_INTERCEPT_ACCT (SI_NETBSD || SI_OPENBSD || SI_FREEBSD) +#define SANITIZER_INTERCEPT_USER_FROM_UID SI_NETBSD +#define SANITIZER_INTERCEPT_UID_FROM_USER SI_NETBSD +#define SANITIZER_INTERCEPT_GROUP_FROM_GID SI_NETBSD +#define SANITIZER_INTERCEPT_GID_FROM_GROUP SI_NETBSD +#define SANITIZER_INTERCEPT_ACCESS (SI_NETBSD || SI_OPENBSD || SI_FREEBSD) +#define SANITIZER_INTERCEPT_FACCESSAT (SI_NETBSD || SI_OPENBSD || SI_FREEBSD) +#define SANITIZER_INTERCEPT_GETGROUPLIST (SI_NETBSD || SI_OPENBSD) +#define SANITIZER_INTERCEPT_STRLCPY \ + (SI_NETBSD || SI_FREEBSD || SI_OPENBSD || SI_MAC || SI_ANDROID) + +#define SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT SI_LINUX_NOT_ANDROID + +#define SANITIZER_INTERCEPT_READLINK SI_POSIX +#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101000 +# define SI_MAC_DEPLOYMENT_BELOW_10_10 1 +#else +# define SI_MAC_DEPLOYMENT_BELOW_10_10 0 +#endif +#define SANITIZER_INTERCEPT_READLINKAT \ + (SI_POSIX && !SI_MAC_DEPLOYMENT_BELOW_10_10) + +#define SANITIZER_INTERCEPT_DEVNAME (SI_NETBSD || SI_OPENBSD || SI_FREEBSD) +#define SANITIZER_INTERCEPT_DEVNAME_R (SI_NETBSD || SI_FREEBSD) +#define SANITIZER_INTERCEPT_FGETLN (SI_NETBSD || SI_FREEBSD) +#define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD) +#define SANITIZER_INTERCEPT_TTYENT SI_NETBSD +#define SANITIZER_INTERCEPT_PROTOENT SI_NETBSD +#define SANITIZER_INTERCEPT_NETENT SI_NETBSD + #endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H diff --git a/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cc b/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cc index 108e196e7a05..3c6bb2090fa5 100644 --- a/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cc +++ b/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cc @@ -15,7 +15,53 @@ #include "sanitizer_platform.h" #if SANITIZER_NETBSD +#include <sys/param.h> +#include <sys/types.h> + +#include <altq/altq.h> +#include <altq/altq_afmap.h> +#include <altq/altq_blue.h> +#include <altq/altq_cbq.h> +#include <altq/altq_cdnr.h> +#include <altq/altq_fifoq.h> +#include <altq/altq_hfsc.h> +#include <altq/altq_jobs.h> +#include <altq/altq_priq.h> +#include <altq/altq_red.h> +#include <altq/altq_rio.h> +#include <altq/altq_wfq.h> #include <arpa/inet.h> +#include <crypto/cryptodev.h> +#include <dev/apm/apmio.h> +#include <dev/dm/netbsd-dm.h> +#include <dev/dmover/dmover_io.h> +#include <dev/dtv/dtvio_demux.h> +#include <dev/dtv/dtvio_frontend.h> +#include <dev/filemon/filemon.h> +#include <dev/hdaudio/hdaudioio.h> +#include <dev/hdmicec/hdmicecio.h> +#include <dev/hpc/hpcfbio.h> +#include <dev/i2o/iopio.h> +#include <dev/ic/athioctl.h> +#include <dev/ic/bt8xx.h> +#include <dev/ic/icp_ioctl.h> +#include <dev/ic/isp_ioctl.h> +#include <dev/ic/mlxio.h> +#include <dev/ic/nvmeio.h> +#include <dev/ir/irdaio.h> +#include <dev/isa/isvio.h> +#include <dev/isa/satlinkio.h> +#include <dev/isa/wtreg.h> +#include <dev/iscsi/iscsi_ioctl.h> +#include <dev/ofw/openfirmio.h> +#include <dev/pci/amrio.h> + +#include <dev/pci/mlyreg.h> +#include <dev/pci/mlyio.h> + +#include <dev/pci/pciio.h> +#include <dev/pci/tweio.h> +#include <dev/pcmcia/if_cnwioctl.h> #include <dirent.h> #include <glob.h> #include <grp.h> @@ -28,6 +74,8 @@ #include <net/route.h> #include <netdb.h> #include <netinet/in.h> +#include <netinet/ip_compat.h> +#include <netinet/ip_fil.h> #include <netinet/ip_mroute.h> #include <poll.h> #include <pthread.h> @@ -35,6 +83,93 @@ #include <semaphore.h> #include <signal.h> #include <stddef.h> +#include <stdio.h> +#include <sys/disk.h> +#include <sys/disklabel.h> +#include <sys/mount.h> +#define RAY_DO_SIGLEV +#include <dev/biovar.h> +#include <dev/bluetooth/btdev.h> +#include <dev/bluetooth/btsco.h> +#include <dev/ccdvar.h> +#include <dev/cgdvar.h> +#include <dev/fssvar.h> +#include <dev/kttcpio.h> +#include <dev/lockstat.h> +#include <dev/md.h> +#include <dev/pcmcia/if_rayreg.h> +#include <dev/raidframe/raidframeio.h> +#include <dev/sbus/mbppio.h> +#include <dev/scsipi/ses.h> +#include <dev/spkrio.h> +#include <dev/sun/disklabel.h> +#include <dev/sun/fbio.h> +#include <dev/sun/kbio.h> +#include <dev/sun/vuid_event.h> +#include <dev/tc/sticio.h> +#include <dev/usb/ukyopon.h> +#include <dev/usb/urio.h> +#include <dev/usb/usb.h> +#include <dev/usb/utoppy.h> +#include <dev/vme/xio.h> +#include <dev/vndvar.h> +#include <dev/wscons/wsconsio.h> +#include <dev/wscons/wsdisplay_usl_io.h> +#include <net/bpf.h> +#include <net/if_atm.h> +#include <net/if_gre.h> +#include <net/if_ppp.h> +#include <net/if_pppoe.h> +#include <net/if_sppp.h> +#include <net/if_srt.h> +#include <net/if_tap.h> +#include <net/if_tun.h> +#include <net/npf.h> +#include <net/pfvar.h> +#include <net/slip.h> +#include <netbt/hci.h> +#include <netinet/ip_nat.h> +#include <netinet/ip_proxy.h> +#include <netinet6/in6_var.h> +#include <netinet6/nd6.h> +#include <netnatm/natm.h> +#include <netsmb/smb_dev.h> +#include <soundcard.h> +#include <sys/agpio.h> +#include <sys/ataio.h> +#include <sys/audioio.h> +#include <sys/cdio.h> +#include <sys/chio.h> +#include <sys/clockctl.h> +#include <sys/cpuio.h> +#include <sys/dkio.h> +#include <sys/drvctlio.h> +#include <sys/dvdio.h> +#include <sys/envsys.h> +#include <sys/event.h> +#include <sys/fdio.h> +#include <sys/filio.h> +#include <sys/gpio.h> +#include <sys/ioctl.h> +#include <sys/ioctl_compat.h> +#include <sys/joystick.h> +#include <sys/ksyms.h> +#include <sys/lua.h> +#include <sys/midiio.h> +#include <sys/mtio.h> +#include <sys/power.h> +#include <sys/radioio.h> +#include <sys/rndio.h> +#include <sys/scanio.h> +#include <sys/scsiio.h> +#include <sys/sockio.h> +#include <sys/timepps.h> +#include <sys/ttycom.h> +#include <sys/verified_exec.h> +#include <sys/videoio.h> +#include <sys/wdog.h> +//#include <xen/xenio.h> +#include <sys/event.h> #include <sys/filio.h> #include <sys/ipc.h> #include <sys/mman.h> @@ -44,6 +179,7 @@ #include <sys/mtio.h> #include <sys/ptrace.h> #include <sys/resource.h> +#include <sys/sem.h> #include <sys/shm.h> #include <sys/signal.h> #include <sys/socket.h> @@ -62,6 +198,7 @@ #include <term.h> #include <termios.h> #include <time.h> +#include <ttyent.h> #include <utime.h> #include <utmp.h> #include <utmpx.h> @@ -99,18 +236,81 @@ unsigned struct_sockaddr_sz = sizeof(struct sockaddr); unsigned ucontext_t_sz = sizeof(ucontext_t); unsigned struct_rlimit_sz = sizeof(struct rlimit); unsigned struct_timespec_sz = sizeof(struct timespec); +unsigned struct_sembuf_sz = sizeof(struct sembuf); +unsigned struct_kevent_sz = sizeof(struct kevent); unsigned struct_utimbuf_sz = sizeof(struct utimbuf); unsigned struct_itimerspec_sz = sizeof(struct itimerspec); unsigned struct_timex_sz = sizeof(struct timex); unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds); unsigned struct_mq_attr_sz = sizeof(struct mq_attr); unsigned struct_statvfs_sz = sizeof(struct statvfs); +unsigned struct_sigaltstack_sz = sizeof(stack_t); const uptr sig_ign = (uptr)SIG_IGN; const uptr sig_dfl = (uptr)SIG_DFL; const uptr sig_err = (uptr)SIG_ERR; const uptr sa_siginfo = (uptr)SA_SIGINFO; +int ptrace_pt_io = PT_IO; +int ptrace_pt_lwpinfo = PT_LWPINFO; +int ptrace_pt_set_event_mask = PT_SET_EVENT_MASK; +int ptrace_pt_get_event_mask = PT_GET_EVENT_MASK; +int ptrace_pt_get_process_state = PT_GET_PROCESS_STATE; +int ptrace_pt_set_siginfo = PT_SET_SIGINFO; +int ptrace_pt_get_siginfo = PT_GET_SIGINFO; +int ptrace_piod_read_d = PIOD_READ_D; +int ptrace_piod_write_d = PIOD_WRITE_D; +int ptrace_piod_read_i = PIOD_READ_I; +int ptrace_piod_write_i = PIOD_WRITE_I; +int ptrace_piod_read_auxv = PIOD_READ_AUXV; + +#if defined(PT_SETREGS) && defined(PT_GETREGS) +int ptrace_pt_setregs = PT_SETREGS; +int ptrace_pt_getregs = PT_GETREGS; +#else +int ptrace_pt_setregs = -1; +int ptrace_pt_getregs = -1; +#endif + +#if defined(PT_SETFPREGS) && defined(PT_GETFPREGS) +int ptrace_pt_setfpregs = PT_SETFPREGS; +int ptrace_pt_getfpregs = PT_GETFPREGS; +#else +int ptrace_pt_setfpregs = -1; +int ptrace_pt_getfpregs = -1; +#endif + +#if defined(PT_SETDBREGS) && defined(PT_GETDBREGS) +int ptrace_pt_setdbregs = PT_SETDBREGS; +int ptrace_pt_getdbregs = PT_GETDBREGS; +#else +int ptrace_pt_setdbregs = -1; +int ptrace_pt_getdbregs = -1; +#endif + +unsigned struct_ptrace_ptrace_io_desc_struct_sz = sizeof(struct ptrace_io_desc); +unsigned struct_ptrace_ptrace_lwpinfo_struct_sz = sizeof(struct ptrace_lwpinfo); +unsigned struct_ptrace_ptrace_event_struct_sz = sizeof(ptrace_event_t); +unsigned struct_ptrace_ptrace_siginfo_struct_sz = sizeof(ptrace_siginfo_t); + +#if defined(PT_SETREGS) +unsigned struct_ptrace_reg_struct_sz = sizeof(struct reg); +#else +unsigned struct_ptrace_reg_struct_sz = -1; +#endif + +#if defined(PT_SETFPREGS) +unsigned struct_ptrace_fpreg_struct_sz = sizeof(struct fpreg); +#else +unsigned struct_ptrace_fpreg_struct_sz = -1; +#endif + +#if defined(PT_SETDBREGS) +unsigned struct_ptrace_dbreg_struct_sz = sizeof(struct dbreg); +#else +unsigned struct_ptrace_dbreg_struct_sz = -1; +#endif + int shmctl_ipc_stat = (int)IPC_STAT; unsigned struct_utmp_sz = sizeof(struct utmp); @@ -137,65 +337,1729 @@ int glob_altdirfunc = GLOB_ALTDIRFUNC; unsigned path_max = PATH_MAX; +int struct_ttyent_sz = sizeof(struct ttyent); + // ioctl arguments -unsigned struct_ifreq_sz = sizeof(struct ifreq); -unsigned struct_termios_sz = sizeof(struct termios); -unsigned struct_winsize_sz = sizeof(struct winsize); -unsigned struct_mtget_sz = sizeof(struct mtget); -unsigned struct_mtop_sz = sizeof(struct mtop); -unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info); -unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats); -unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req); -unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req); +unsigned struct_altqreq_sz = sizeof(altqreq); +unsigned struct_amr_user_ioctl_sz = sizeof(amr_user_ioctl); +unsigned struct_ap_control_sz = sizeof(ap_control); +unsigned struct_apm_ctl_sz = sizeof(apm_ctl); +unsigned struct_apm_event_info_sz = sizeof(apm_event_info); +unsigned struct_apm_power_info_sz = sizeof(apm_power_info); +unsigned struct_atabusiodetach_args_sz = sizeof(atabusiodetach_args); +unsigned struct_atabusioscan_args_sz = sizeof(atabusioscan_args); +unsigned struct_ath_diag_sz = sizeof(ath_diag); +unsigned struct_atm_flowmap_sz = sizeof(atm_flowmap); +unsigned struct_atm_pseudoioctl_sz = sizeof(atm_pseudoioctl); +unsigned struct_audio_buf_info_sz = sizeof(audio_buf_info); +unsigned struct_audio_device_sz = sizeof(audio_device); +unsigned struct_audio_encoding_sz = sizeof(audio_encoding); +unsigned struct_audio_info_sz = sizeof(audio_info); +unsigned struct_audio_offset_sz = sizeof(audio_offset); +unsigned struct_bio_locate_sz = sizeof(bio_locate); +unsigned struct_bioc_alarm_sz = sizeof(bioc_alarm); +unsigned struct_bioc_blink_sz = sizeof(bioc_blink); +unsigned struct_bioc_disk_sz = sizeof(bioc_disk); +unsigned struct_bioc_inq_sz = sizeof(bioc_inq); +unsigned struct_bioc_setstate_sz = sizeof(bioc_setstate); +unsigned struct_bioc_vol_sz = sizeof(bioc_vol); +unsigned struct_bioc_volops_sz = sizeof(bioc_volops); +unsigned struct_bktr_chnlset_sz = sizeof(bktr_chnlset); +unsigned struct_bktr_remote_sz = sizeof(bktr_remote); +unsigned struct_blue_conf_sz = sizeof(blue_conf); +unsigned struct_blue_interface_sz = sizeof(blue_interface); +unsigned struct_blue_stats_sz = sizeof(blue_stats); +unsigned struct_bpf_dltlist_sz = sizeof(bpf_dltlist); +unsigned struct_bpf_program_sz = sizeof(bpf_program); +unsigned struct_bpf_stat_old_sz = sizeof(bpf_stat_old); +unsigned struct_bpf_stat_sz = sizeof(bpf_stat); +unsigned struct_bpf_version_sz = sizeof(bpf_version); +unsigned struct_btreq_sz = sizeof(btreq); +unsigned struct_btsco_info_sz = sizeof(btsco_info); +unsigned struct_buffmem_desc_sz = sizeof(buffmem_desc); +unsigned struct_cbq_add_class_sz = sizeof(cbq_add_class); +unsigned struct_cbq_add_filter_sz = sizeof(cbq_add_filter); +unsigned struct_cbq_delete_class_sz = sizeof(cbq_delete_class); +unsigned struct_cbq_delete_filter_sz = sizeof(cbq_delete_filter); +unsigned struct_cbq_getstats_sz = sizeof(cbq_getstats); +unsigned struct_cbq_interface_sz = sizeof(cbq_interface); +unsigned struct_cbq_modify_class_sz = sizeof(cbq_modify_class); +unsigned struct_ccd_ioctl_sz = sizeof(ccd_ioctl); +unsigned struct_cdnr_add_element_sz = sizeof(cdnr_add_element); +unsigned struct_cdnr_add_filter_sz = sizeof(cdnr_add_filter); +unsigned struct_cdnr_add_tbmeter_sz = sizeof(cdnr_add_tbmeter); +unsigned struct_cdnr_add_trtcm_sz = sizeof(cdnr_add_trtcm); +unsigned struct_cdnr_add_tswtcm_sz = sizeof(cdnr_add_tswtcm); +unsigned struct_cdnr_delete_element_sz = sizeof(cdnr_delete_element); +unsigned struct_cdnr_delete_filter_sz = sizeof(cdnr_delete_filter); +unsigned struct_cdnr_get_stats_sz = sizeof(cdnr_get_stats); +unsigned struct_cdnr_interface_sz = sizeof(cdnr_interface); +unsigned struct_cdnr_modify_tbmeter_sz = sizeof(cdnr_modify_tbmeter); +unsigned struct_cdnr_modify_trtcm_sz = sizeof(cdnr_modify_trtcm); +unsigned struct_cdnr_modify_tswtcm_sz = sizeof(cdnr_modify_tswtcm); +unsigned struct_cdnr_tbmeter_stats_sz = sizeof(cdnr_tbmeter_stats); +unsigned struct_cdnr_tcm_stats_sz = sizeof(cdnr_tcm_stats); +unsigned struct_cgd_ioctl_sz = sizeof(cgd_ioctl); +unsigned struct_cgd_user_sz = sizeof(cgd_user); +unsigned struct_changer_element_status_request_sz = + sizeof(changer_element_status_request); +unsigned struct_changer_exchange_request_sz = sizeof(changer_exchange_request); +unsigned struct_changer_move_request_sz = sizeof(changer_move_request); +unsigned struct_changer_params_sz = sizeof(changer_params); +unsigned struct_changer_position_request_sz = sizeof(changer_position_request); +unsigned struct_changer_set_voltag_request_sz = + sizeof(changer_set_voltag_request); +unsigned struct_clockctl_adjtime_sz = sizeof(clockctl_adjtime); +unsigned struct_clockctl_clock_settime_sz = sizeof(clockctl_clock_settime); +unsigned struct_clockctl_ntp_adjtime_sz = sizeof(clockctl_ntp_adjtime); +unsigned struct_clockctl_settimeofday_sz = sizeof(clockctl_settimeofday); +unsigned struct_cnwistats_sz = sizeof(cnwistats); +unsigned struct_cnwitrail_sz = sizeof(cnwitrail); +unsigned struct_cnwstatus_sz = sizeof(cnwstatus); +unsigned struct_count_info_sz = sizeof(count_info); +unsigned struct_cpu_ucode_sz = sizeof(cpu_ucode); +unsigned struct_cpu_ucode_version_sz = sizeof(cpu_ucode_version); +unsigned struct_crypt_kop_sz = sizeof(crypt_kop); +unsigned struct_crypt_mkop_sz = sizeof(crypt_mkop); +unsigned struct_crypt_mop_sz = sizeof(crypt_mop); +unsigned struct_crypt_op_sz = sizeof(crypt_op); +unsigned struct_crypt_result_sz = sizeof(crypt_result); +unsigned struct_crypt_sfop_sz = sizeof(crypt_sfop); +unsigned struct_crypt_sgop_sz = sizeof(crypt_sgop); +unsigned struct_cryptret_sz = sizeof(cryptret); +unsigned struct_devdetachargs_sz = sizeof(devdetachargs); +unsigned struct_devlistargs_sz = sizeof(devlistargs); +unsigned struct_devpmargs_sz = sizeof(devpmargs); +unsigned struct_devrescanargs_sz = sizeof(devrescanargs); +unsigned struct_disk_badsecinfo_sz = sizeof(disk_badsecinfo); +unsigned struct_disk_strategy_sz = sizeof(disk_strategy); +unsigned struct_disklabel_sz = sizeof(disklabel); +unsigned struct_dkbad_sz = sizeof(dkbad); +unsigned struct_dkwedge_info_sz = sizeof(dkwedge_info); +unsigned struct_dkwedge_list_sz = sizeof(dkwedge_list); +unsigned struct_dmio_setfunc_sz = sizeof(dmio_setfunc); +unsigned struct_dmx_pes_filter_params_sz = sizeof(dmx_pes_filter_params); +unsigned struct_dmx_sct_filter_params_sz = sizeof(dmx_sct_filter_params); +unsigned struct_dmx_stc_sz = sizeof(dmx_stc); +unsigned struct_dvb_diseqc_master_cmd_sz = sizeof(dvb_diseqc_master_cmd); +unsigned struct_dvb_diseqc_slave_reply_sz = sizeof(dvb_diseqc_slave_reply); +unsigned struct_dvb_frontend_event_sz = sizeof(dvb_frontend_event); +unsigned struct_dvb_frontend_info_sz = sizeof(dvb_frontend_info); +unsigned struct_dvb_frontend_parameters_sz = sizeof(dvb_frontend_parameters); +unsigned struct_eccapreq_sz = sizeof(eccapreq); +unsigned struct_fbcmap_sz = sizeof(fbcmap); +unsigned struct_fbcurpos_sz = sizeof(fbcurpos); +unsigned struct_fbcursor_sz = sizeof(fbcursor); +unsigned struct_fbgattr_sz = sizeof(fbgattr); +unsigned struct_fbsattr_sz = sizeof(fbsattr); +unsigned struct_fbtype_sz = sizeof(fbtype); +unsigned struct_fdformat_cmd_sz = sizeof(fdformat_cmd); +unsigned struct_fdformat_parms_sz = sizeof(fdformat_parms); +unsigned struct_fifoq_conf_sz = sizeof(fifoq_conf); +unsigned struct_fifoq_getstats_sz = sizeof(fifoq_getstats); +unsigned struct_fifoq_interface_sz = sizeof(fifoq_interface); +unsigned struct_format_op_sz = sizeof(format_op); +unsigned struct_fss_get_sz = sizeof(fss_get); +unsigned struct_fss_set_sz = sizeof(fss_set); +unsigned struct_gpio_attach_sz = sizeof(gpio_attach); +unsigned struct_gpio_info_sz = sizeof(gpio_info); +unsigned struct_gpio_req_sz = sizeof(gpio_req); +unsigned struct_gpio_set_sz = sizeof(gpio_set); +unsigned struct_hfsc_add_class_sz = sizeof(hfsc_add_class); +unsigned struct_hfsc_add_filter_sz = sizeof(hfsc_add_filter); +unsigned struct_hfsc_attach_sz = sizeof(hfsc_attach); +unsigned struct_hfsc_class_stats_sz = sizeof(hfsc_class_stats); +unsigned struct_hfsc_delete_class_sz = sizeof(hfsc_delete_class); +unsigned struct_hfsc_delete_filter_sz = sizeof(hfsc_delete_filter); +unsigned struct_hfsc_interface_sz = sizeof(hfsc_interface); +unsigned struct_hfsc_modify_class_sz = sizeof(hfsc_modify_class); +unsigned struct_hpcfb_dsp_op_sz = sizeof(hpcfb_dsp_op); +unsigned struct_hpcfb_dspconf_sz = sizeof(hpcfb_dspconf); +unsigned struct_hpcfb_fbconf_sz = sizeof(hpcfb_fbconf); +unsigned struct_if_addrprefreq_sz = sizeof(if_addrprefreq); +unsigned struct_if_clonereq_sz = sizeof(if_clonereq); +unsigned struct_if_laddrreq_sz = sizeof(if_laddrreq); +unsigned struct_ifaddr_sz = sizeof(ifaddr); +unsigned struct_ifaliasreq_sz = sizeof(ifaliasreq); +unsigned struct_ifcapreq_sz = sizeof(ifcapreq); +unsigned struct_ifconf_sz = sizeof(ifconf); +unsigned struct_ifdatareq_sz = sizeof(ifdatareq); +unsigned struct_ifdrv_sz = sizeof(ifdrv); +unsigned struct_ifmediareq_sz = sizeof(ifmediareq); +unsigned struct_ifpppcstatsreq_sz = sizeof(ifpppcstatsreq); +unsigned struct_ifpppstatsreq_sz = sizeof(ifpppstatsreq); +unsigned struct_ifreq_sz = sizeof(ifreq); +unsigned struct_in6_addrpolicy_sz = sizeof(in6_addrpolicy); +unsigned struct_in6_ndireq_sz = sizeof(in6_ndireq); +unsigned struct_ioc_load_unload_sz = sizeof(ioc_load_unload); +unsigned struct_ioc_patch_sz = sizeof(ioc_patch); +unsigned struct_ioc_play_blocks_sz = sizeof(ioc_play_blocks); +unsigned struct_ioc_play_msf_sz = sizeof(ioc_play_msf); +unsigned struct_ioc_play_track_sz = sizeof(ioc_play_track); +unsigned struct_ioc_read_subchannel_sz = sizeof(ioc_read_subchannel); +unsigned struct_ioc_read_toc_entry_sz = sizeof(ioc_read_toc_entry); +unsigned struct_ioc_toc_header_sz = sizeof(ioc_toc_header); +unsigned struct_ioc_vol_sz = sizeof(ioc_vol); +unsigned struct_ioctl_pt_sz = sizeof(ioctl_pt); +unsigned struct_ioppt_sz = sizeof(ioppt); +unsigned struct_iovec_sz = sizeof(iovec); +unsigned struct_ipfobj_sz = sizeof(ipfobj); +unsigned struct_irda_params_sz = sizeof(irda_params); +unsigned struct_isp_fc_device_sz = sizeof(isp_fc_device); +unsigned struct_isp_fc_tsk_mgmt_sz = sizeof(isp_fc_tsk_mgmt); +unsigned struct_isp_hba_device_sz = sizeof(isp_hba_device); +unsigned struct_isv_cmd_sz = sizeof(isv_cmd); +unsigned struct_jobs_add_class_sz = sizeof(jobs_add_class); +unsigned struct_jobs_add_filter_sz = sizeof(jobs_add_filter); +unsigned struct_jobs_attach_sz = sizeof(jobs_attach); +unsigned struct_jobs_class_stats_sz = sizeof(jobs_class_stats); +unsigned struct_jobs_delete_class_sz = sizeof(jobs_delete_class); +unsigned struct_jobs_delete_filter_sz = sizeof(jobs_delete_filter); +unsigned struct_jobs_interface_sz = sizeof(jobs_interface); +unsigned struct_jobs_modify_class_sz = sizeof(jobs_modify_class); +unsigned struct_kbentry_sz = sizeof(kbentry); +unsigned struct_kfilter_mapping_sz = sizeof(kfilter_mapping); +unsigned struct_kiockeymap_sz = sizeof(kiockeymap); +unsigned struct_ksyms_gsymbol_sz = sizeof(ksyms_gsymbol); +unsigned struct_ksyms_gvalue_sz = sizeof(ksyms_gvalue); +unsigned struct_ksyms_ogsymbol_sz = sizeof(ksyms_ogsymbol); +unsigned struct_kttcp_io_args_sz = sizeof(kttcp_io_args); +unsigned struct_ltchars_sz = sizeof(ltchars); +unsigned struct_lua_create_sz = sizeof(struct lua_create); +unsigned struct_lua_info_sz = sizeof(struct lua_info); +unsigned struct_lua_load_sz = sizeof(struct lua_load); +unsigned struct_lua_require_sz = sizeof(lua_require); +unsigned struct_mbpp_param_sz = sizeof(mbpp_param); +unsigned struct_md_conf_sz = sizeof(md_conf); +unsigned struct_meteor_capframe_sz = sizeof(meteor_capframe); +unsigned struct_meteor_counts_sz = sizeof(meteor_counts); +unsigned struct_meteor_geomet_sz = sizeof(meteor_geomet); +unsigned struct_meteor_pixfmt_sz = sizeof(meteor_pixfmt); +unsigned struct_meteor_video_sz = sizeof(meteor_video); +unsigned struct_mlx_cinfo_sz = sizeof(mlx_cinfo); +unsigned struct_mlx_pause_sz = sizeof(mlx_pause); +unsigned struct_mlx_rebuild_request_sz = sizeof(mlx_rebuild_request); +unsigned struct_mlx_rebuild_status_sz = sizeof(mlx_rebuild_status); +unsigned struct_mlx_usercommand_sz = sizeof(mlx_usercommand); +unsigned struct_mly_user_command_sz = sizeof(mly_user_command); +unsigned struct_mly_user_health_sz = sizeof(mly_user_health); +unsigned struct_mtget_sz = sizeof(mtget); +unsigned struct_mtop_sz = sizeof(mtop); +unsigned struct_npf_ioctl_table_sz = sizeof(npf_ioctl_table); +unsigned struct_npioctl_sz = sizeof(npioctl); +unsigned struct_nvme_pt_command_sz = sizeof(nvme_pt_command); +unsigned struct_ochanger_element_status_request_sz = + sizeof(ochanger_element_status_request); +unsigned struct_ofiocdesc_sz = sizeof(ofiocdesc); +unsigned struct_okiockey_sz = sizeof(okiockey); +unsigned struct_ortentry_sz = sizeof(ortentry); +unsigned struct_oscsi_addr_sz = sizeof(oscsi_addr); +unsigned struct_oss_audioinfo_sz = sizeof(oss_audioinfo); +unsigned struct_oss_sysinfo_sz = sizeof(oss_sysinfo); +unsigned struct_pciio_bdf_cfgreg_sz = sizeof(pciio_bdf_cfgreg); +unsigned struct_pciio_businfo_sz = sizeof(pciio_businfo); +unsigned struct_pciio_cfgreg_sz = sizeof(pciio_cfgreg); +unsigned struct_pciio_drvname_sz = sizeof(pciio_drvname); +unsigned struct_pciio_drvnameonbus_sz = sizeof(pciio_drvnameonbus); +unsigned struct_pcvtid_sz = sizeof(pcvtid); +unsigned struct_pf_osfp_ioctl_sz = sizeof(pf_osfp_ioctl); +unsigned struct_pf_status_sz = sizeof(pf_status); +unsigned struct_pfioc_altq_sz = sizeof(pfioc_altq); +unsigned struct_pfioc_if_sz = sizeof(pfioc_if); +unsigned struct_pfioc_iface_sz = sizeof(pfioc_iface); +unsigned struct_pfioc_limit_sz = sizeof(pfioc_limit); +unsigned struct_pfioc_natlook_sz = sizeof(pfioc_natlook); +unsigned struct_pfioc_pooladdr_sz = sizeof(pfioc_pooladdr); +unsigned struct_pfioc_qstats_sz = sizeof(pfioc_qstats); +unsigned struct_pfioc_rule_sz = sizeof(pfioc_rule); +unsigned struct_pfioc_ruleset_sz = sizeof(pfioc_ruleset); +unsigned struct_pfioc_src_node_kill_sz = sizeof(pfioc_src_node_kill); +unsigned struct_pfioc_src_nodes_sz = sizeof(pfioc_src_nodes); +unsigned struct_pfioc_state_kill_sz = sizeof(pfioc_state_kill); +unsigned struct_pfioc_state_sz = sizeof(pfioc_state); +unsigned struct_pfioc_states_sz = sizeof(pfioc_states); +unsigned struct_pfioc_table_sz = sizeof(pfioc_table); +unsigned struct_pfioc_tm_sz = sizeof(pfioc_tm); +unsigned struct_pfioc_trans_sz = sizeof(pfioc_trans); +unsigned struct_plistref_sz = sizeof(plistref); +unsigned struct_power_type_sz = sizeof(power_type); +unsigned struct_ppp_idle_sz = sizeof(ppp_idle); +unsigned struct_ppp_option_data_sz = sizeof(ppp_option_data); +unsigned struct_ppp_rawin_sz = sizeof(ppp_rawin); +unsigned struct_pppoeconnectionstate_sz = sizeof(pppoeconnectionstate); +unsigned struct_pppoediscparms_sz = sizeof(pppoediscparms); +unsigned struct_priq_add_class_sz = sizeof(priq_add_class); +unsigned struct_priq_add_filter_sz = sizeof(priq_add_filter); +unsigned struct_priq_class_stats_sz = sizeof(priq_class_stats); +unsigned struct_priq_delete_class_sz = sizeof(priq_delete_class); +unsigned struct_priq_delete_filter_sz = sizeof(priq_delete_filter); +unsigned struct_priq_interface_sz = sizeof(priq_interface); +unsigned struct_priq_modify_class_sz = sizeof(priq_modify_class); +unsigned struct_ptmget_sz = sizeof(ptmget); +unsigned struct_pvctxreq_sz = sizeof(pvctxreq); +unsigned struct_radio_info_sz = sizeof(radio_info); +unsigned struct_red_conf_sz = sizeof(red_conf); +unsigned struct_red_interface_sz = sizeof(red_interface); +unsigned struct_red_stats_sz = sizeof(red_stats); +unsigned struct_redparams_sz = sizeof(redparams); +unsigned struct_rf_pmparams_sz = sizeof(rf_pmparams); +unsigned struct_rf_pmstat_sz = sizeof(rf_pmstat); +unsigned struct_rf_recon_req_sz = sizeof(rf_recon_req); +unsigned struct_rio_conf_sz = sizeof(rio_conf); +unsigned struct_rio_interface_sz = sizeof(rio_interface); +unsigned struct_rio_stats_sz = sizeof(rio_stats); +unsigned struct_satlink_id_sz = sizeof(satlink_id); +unsigned struct_scan_io_sz = sizeof(scan_io); +unsigned struct_scbusaccel_args_sz = sizeof(scbusaccel_args); +unsigned struct_scbusiodetach_args_sz = sizeof(scbusiodetach_args); +unsigned struct_scbusioscan_args_sz = sizeof(scbusioscan_args); +unsigned struct_scsi_addr_sz = sizeof(scsi_addr); +unsigned struct_seq_event_rec_sz = sizeof(seq_event_rec); +unsigned struct_session_op_sz = sizeof(session_op); +unsigned struct_sgttyb_sz = sizeof(sgttyb); +unsigned struct_sioc_sg_req_sz = sizeof(sioc_sg_req); +unsigned struct_sioc_vif_req_sz = sizeof(sioc_vif_req); +unsigned struct_smbioc_flags_sz = sizeof(smbioc_flags); +unsigned struct_smbioc_lookup_sz = sizeof(smbioc_lookup); +unsigned struct_smbioc_oshare_sz = sizeof(smbioc_oshare); +unsigned struct_smbioc_ossn_sz = sizeof(smbioc_ossn); +unsigned struct_smbioc_rq_sz = sizeof(smbioc_rq); +unsigned struct_smbioc_rw_sz = sizeof(smbioc_rw); +unsigned struct_spppauthcfg_sz = sizeof(spppauthcfg); +unsigned struct_spppauthfailuresettings_sz = sizeof(spppauthfailuresettings); +unsigned struct_spppauthfailurestats_sz = sizeof(spppauthfailurestats); +unsigned struct_spppdnsaddrs_sz = sizeof(spppdnsaddrs); +unsigned struct_spppdnssettings_sz = sizeof(spppdnssettings); +unsigned struct_spppidletimeout_sz = sizeof(spppidletimeout); +unsigned struct_spppkeepalivesettings_sz = sizeof(spppkeepalivesettings); +unsigned struct_sppplcpcfg_sz = sizeof(sppplcpcfg); +unsigned struct_spppstatus_sz = sizeof(spppstatus); +unsigned struct_spppstatusncp_sz = sizeof(spppstatusncp); +unsigned struct_srt_rt_sz = sizeof(srt_rt); +unsigned struct_stic_xinfo_sz = sizeof(stic_xinfo); +unsigned struct_sun_dkctlr_sz = sizeof(sun_dkctlr); +unsigned struct_sun_dkgeom_sz = sizeof(sun_dkgeom); +unsigned struct_sun_dkpart_sz = sizeof(sun_dkpart); +unsigned struct_synth_info_sz = sizeof(synth_info); +unsigned struct_tbrreq_sz = sizeof(tbrreq); +unsigned struct_tchars_sz = sizeof(tchars); +unsigned struct_termios_sz = sizeof(termios); +unsigned struct_timeval_sz = sizeof(timeval); +unsigned struct_twe_drivecommand_sz = sizeof(twe_drivecommand); +unsigned struct_twe_paramcommand_sz = sizeof(twe_paramcommand); +unsigned struct_twe_usercommand_sz = sizeof(twe_usercommand); +unsigned struct_ukyopon_identify_sz = sizeof(ukyopon_identify); +unsigned struct_urio_command_sz = sizeof(urio_command); +unsigned struct_usb_alt_interface_sz = sizeof(usb_alt_interface); +unsigned struct_usb_bulk_ra_wb_opt_sz = sizeof(usb_bulk_ra_wb_opt); +unsigned struct_usb_config_desc_sz = sizeof(usb_config_desc); +unsigned struct_usb_ctl_report_desc_sz = sizeof(usb_ctl_report_desc); +unsigned struct_usb_ctl_report_sz = sizeof(usb_ctl_report); +unsigned struct_usb_ctl_request_sz = sizeof(usb_ctl_request); +unsigned struct_usb_device_info_old_sz = sizeof(usb_device_info_old); +unsigned struct_usb_device_info_sz = sizeof(usb_device_info); +unsigned struct_usb_device_stats_sz = sizeof(usb_device_stats); +unsigned struct_usb_endpoint_desc_sz = sizeof(usb_endpoint_desc); +unsigned struct_usb_full_desc_sz = sizeof(usb_full_desc); +unsigned struct_usb_interface_desc_sz = sizeof(usb_interface_desc); +unsigned struct_usb_string_desc_sz = sizeof(usb_string_desc); +unsigned struct_utoppy_readfile_sz = sizeof(utoppy_readfile); +unsigned struct_utoppy_rename_sz = sizeof(utoppy_rename); +unsigned struct_utoppy_stats_sz = sizeof(utoppy_stats); +unsigned struct_utoppy_writefile_sz = sizeof(utoppy_writefile); +unsigned struct_v4l2_audio_sz = sizeof(v4l2_audio); +unsigned struct_v4l2_audioout_sz = sizeof(v4l2_audioout); +unsigned struct_v4l2_buffer_sz = sizeof(v4l2_buffer); +unsigned struct_v4l2_capability_sz = sizeof(v4l2_capability); +unsigned struct_v4l2_control_sz = sizeof(v4l2_control); +unsigned struct_v4l2_crop_sz = sizeof(v4l2_crop); +unsigned struct_v4l2_cropcap_sz = sizeof(v4l2_cropcap); +unsigned struct_v4l2_fmtdesc_sz = sizeof(v4l2_fmtdesc); +unsigned struct_v4l2_format_sz = sizeof(v4l2_format); +unsigned struct_v4l2_framebuffer_sz = sizeof(v4l2_framebuffer); +unsigned struct_v4l2_frequency_sz = sizeof(v4l2_frequency); +unsigned struct_v4l2_frmivalenum_sz = sizeof(v4l2_frmivalenum); +unsigned struct_v4l2_frmsizeenum_sz = sizeof(v4l2_frmsizeenum); +unsigned struct_v4l2_input_sz = sizeof(v4l2_input); +unsigned struct_v4l2_jpegcompression_sz = sizeof(v4l2_jpegcompression); +unsigned struct_v4l2_modulator_sz = sizeof(v4l2_modulator); +unsigned struct_v4l2_output_sz = sizeof(v4l2_output); +unsigned struct_v4l2_queryctrl_sz = sizeof(v4l2_queryctrl); +unsigned struct_v4l2_querymenu_sz = sizeof(v4l2_querymenu); +unsigned struct_v4l2_requestbuffers_sz = sizeof(v4l2_requestbuffers); +unsigned struct_v4l2_standard_sz = sizeof(v4l2_standard); +unsigned struct_v4l2_streamparm_sz = sizeof(v4l2_streamparm); +unsigned struct_v4l2_tuner_sz = sizeof(v4l2_tuner); +unsigned struct_vnd_ioctl_sz = sizeof(vnd_ioctl); +unsigned struct_vnd_user_sz = sizeof(vnd_user); +unsigned struct_vt_stat_sz = sizeof(vt_stat); +unsigned struct_wdog_conf_sz = sizeof(wdog_conf); +unsigned struct_wdog_mode_sz = sizeof(wdog_mode); +unsigned struct_wfq_conf_sz = sizeof(wfq_conf); +unsigned struct_wfq_getqid_sz = sizeof(wfq_getqid); +unsigned struct_wfq_getstats_sz = sizeof(wfq_getstats); +unsigned struct_wfq_interface_sz = sizeof(wfq_interface); +unsigned struct_wfq_setweight_sz = sizeof(wfq_setweight); +unsigned struct_winsize_sz = sizeof(winsize); +unsigned struct_wscons_event_sz = sizeof(wscons_event); +unsigned struct_wsdisplay_addscreendata_sz = sizeof(wsdisplay_addscreendata); +unsigned struct_wsdisplay_char_sz = sizeof(wsdisplay_char); +unsigned struct_wsdisplay_cmap_sz = sizeof(wsdisplay_cmap); +unsigned struct_wsdisplay_curpos_sz = sizeof(wsdisplay_curpos); +unsigned struct_wsdisplay_cursor_sz = sizeof(wsdisplay_cursor); +unsigned struct_wsdisplay_delscreendata_sz = sizeof(wsdisplay_delscreendata); +unsigned struct_wsdisplay_fbinfo_sz = sizeof(wsdisplay_fbinfo); +unsigned struct_wsdisplay_font_sz = sizeof(wsdisplay_font); +unsigned struct_wsdisplay_kbddata_sz = sizeof(wsdisplay_kbddata); +unsigned struct_wsdisplay_msgattrs_sz = sizeof(wsdisplay_msgattrs); +unsigned struct_wsdisplay_param_sz = sizeof(wsdisplay_param); +unsigned struct_wsdisplay_scroll_data_sz = sizeof(wsdisplay_scroll_data); +unsigned struct_wsdisplay_usefontdata_sz = sizeof(wsdisplay_usefontdata); +unsigned struct_wsdisplayio_blit_sz = sizeof(wsdisplayio_blit); +unsigned struct_wsdisplayio_bus_id_sz = sizeof(wsdisplayio_bus_id); +unsigned struct_wsdisplayio_edid_info_sz = sizeof(wsdisplayio_edid_info); +unsigned struct_wsdisplayio_fbinfo_sz = sizeof(wsdisplayio_fbinfo); +unsigned struct_wskbd_bell_data_sz = sizeof(wskbd_bell_data); +unsigned struct_wskbd_keyrepeat_data_sz = sizeof(wskbd_keyrepeat_data); +unsigned struct_wskbd_map_data_sz = sizeof(wskbd_map_data); +unsigned struct_wskbd_scroll_data_sz = sizeof(wskbd_scroll_data); +unsigned struct_wsmouse_calibcoords_sz = sizeof(wsmouse_calibcoords); +unsigned struct_wsmouse_id_sz = sizeof(wsmouse_id); +unsigned struct_wsmouse_repeat_sz = sizeof(wsmouse_repeat); +unsigned struct_wsmux_device_list_sz = sizeof(wsmux_device_list); +unsigned struct_wsmux_device_sz = sizeof(wsmux_device); +unsigned struct_xd_iocmd_sz = sizeof(xd_iocmd); + +unsigned struct_scsireq_sz = sizeof(struct scsireq); +unsigned struct_tone_sz = sizeof(tone_t); +unsigned union_twe_statrequest_sz = sizeof(union twe_statrequest); +unsigned struct_usb_device_descriptor_sz = sizeof(usb_device_descriptor_t); +unsigned struct_vt_mode_sz = sizeof(struct vt_mode); +unsigned struct__old_mixer_info_sz = sizeof(struct _old_mixer_info); +unsigned struct__agp_allocate_sz = sizeof(struct _agp_allocate); +unsigned struct__agp_bind_sz = sizeof(struct _agp_bind); +unsigned struct__agp_info_sz = sizeof(struct _agp_info); +unsigned struct__agp_setup_sz = sizeof(struct _agp_setup); +unsigned struct__agp_unbind_sz = sizeof(struct _agp_unbind); +unsigned struct_atareq_sz = sizeof(struct atareq); +unsigned struct_cpustate_sz = sizeof(struct cpustate); +unsigned struct_dmx_caps_sz = sizeof(struct dmx_caps); +unsigned enum_dmx_source_sz = sizeof(dmx_source_t); +unsigned union_dvd_authinfo_sz = sizeof(dvd_authinfo); +unsigned union_dvd_struct_sz = sizeof(dvd_struct); +unsigned enum_v4l2_priority_sz = sizeof(enum v4l2_priority); +unsigned struct_envsys_basic_info_sz = sizeof(struct envsys_basic_info); +unsigned struct_envsys_tre_data_sz = sizeof(struct envsys_tre_data); +unsigned enum_fe_sec_mini_cmd_sz = sizeof(enum fe_sec_mini_cmd); +unsigned enum_fe_sec_tone_mode_sz = sizeof(enum fe_sec_tone_mode); +unsigned enum_fe_sec_voltage_sz = sizeof(enum fe_sec_voltage); +unsigned enum_fe_status_sz = sizeof(enum fe_status); +unsigned struct_gdt_ctrt_sz = sizeof(struct gdt_ctrt); +unsigned struct_gdt_event_sz = sizeof(struct gdt_event); +unsigned struct_gdt_osv_sz = sizeof(struct gdt_osv); +unsigned struct_gdt_rescan_sz = sizeof(struct gdt_rescan); +unsigned struct_gdt_statist_sz = sizeof(struct gdt_statist); +unsigned struct_gdt_ucmd_sz = sizeof(struct gdt_ucmd); +unsigned struct_iscsi_conn_status_parameters_sz = + sizeof(iscsi_conn_status_parameters_t); +unsigned struct_iscsi_get_version_parameters_sz = + sizeof(iscsi_get_version_parameters_t); +unsigned struct_iscsi_iocommand_parameters_sz = + sizeof(iscsi_iocommand_parameters_t); +unsigned struct_iscsi_login_parameters_sz = sizeof(iscsi_login_parameters_t); +unsigned struct_iscsi_logout_parameters_sz = sizeof(iscsi_logout_parameters_t); +unsigned struct_iscsi_register_event_parameters_sz = + sizeof(iscsi_register_event_parameters_t); +unsigned struct_iscsi_remove_parameters_sz = sizeof(iscsi_remove_parameters_t); +unsigned struct_iscsi_send_targets_parameters_sz = + sizeof(iscsi_send_targets_parameters_t); +unsigned struct_iscsi_set_node_name_parameters_sz = + sizeof(iscsi_set_node_name_parameters_t); +unsigned struct_iscsi_wait_event_parameters_sz = + sizeof(iscsi_wait_event_parameters_t); +unsigned struct_isp_stats_sz = sizeof(isp_stats_t); +unsigned struct_lsenable_sz = sizeof(struct lsenable); +unsigned struct_lsdisable_sz = sizeof(struct lsdisable); +unsigned struct_mixer_ctrl_sz = sizeof(struct mixer_ctrl); +unsigned struct_mixer_devinfo_sz = sizeof(struct mixer_devinfo); +unsigned struct_mpu_command_rec_sz = sizeof(mpu_command_rec); +unsigned struct_rndstat_sz = sizeof(rndstat_t); +unsigned struct_rndstat_name_sz = sizeof(rndstat_name_t); +unsigned struct_rndctl_sz = sizeof(rndctl_t); +unsigned struct_rnddata_sz = sizeof(rnddata_t); +unsigned struct_rndpoolstat_sz = sizeof(rndpoolstat_t); +unsigned struct_rndstat_est_sz = sizeof(rndstat_est_t); +unsigned struct_rndstat_est_name_sz = sizeof(rndstat_est_name_t); +unsigned struct_pps_params_sz = sizeof(pps_params_t); +unsigned struct_pps_info_sz = sizeof(pps_info_t); +unsigned struct_mixer_info_sz = sizeof(struct mixer_info); +unsigned struct_RF_SparetWait_sz = sizeof(RF_SparetWait_t); +unsigned struct_RF_ComponentLabel_sz = sizeof(RF_ComponentLabel_t); +unsigned struct_RF_SingleComponent_sz = sizeof(RF_SingleComponent_t); +unsigned struct_RF_ProgressInfo_sz = sizeof(RF_ProgressInfo_t); const unsigned IOCTL_NOT_PRESENT = 0; -unsigned IOCTL_FIOASYNC = FIOASYNC; +unsigned IOCTL_AFM_ADDFMAP = AFM_ADDFMAP; +unsigned IOCTL_AFM_DELFMAP = AFM_DELFMAP; +unsigned IOCTL_AFM_CLEANFMAP = AFM_CLEANFMAP; +unsigned IOCTL_AFM_GETFMAP = AFM_GETFMAP; +unsigned IOCTL_ALTQGTYPE = ALTQGTYPE; +unsigned IOCTL_ALTQTBRSET = ALTQTBRSET; +unsigned IOCTL_ALTQTBRGET = ALTQTBRGET; +unsigned IOCTL_BLUE_IF_ATTACH = BLUE_IF_ATTACH; +unsigned IOCTL_BLUE_IF_DETACH = BLUE_IF_DETACH; +unsigned IOCTL_BLUE_ENABLE = BLUE_ENABLE; +unsigned IOCTL_BLUE_DISABLE = BLUE_DISABLE; +unsigned IOCTL_BLUE_CONFIG = BLUE_CONFIG; +unsigned IOCTL_BLUE_GETSTATS = BLUE_GETSTATS; +unsigned IOCTL_CBQ_IF_ATTACH = CBQ_IF_ATTACH; +unsigned IOCTL_CBQ_IF_DETACH = CBQ_IF_DETACH; +unsigned IOCTL_CBQ_ENABLE = CBQ_ENABLE; +unsigned IOCTL_CBQ_DISABLE = CBQ_DISABLE; +unsigned IOCTL_CBQ_CLEAR_HIERARCHY = CBQ_CLEAR_HIERARCHY; +unsigned IOCTL_CBQ_ADD_CLASS = CBQ_ADD_CLASS; +unsigned IOCTL_CBQ_DEL_CLASS = CBQ_DEL_CLASS; +unsigned IOCTL_CBQ_MODIFY_CLASS = CBQ_MODIFY_CLASS; +unsigned IOCTL_CBQ_ADD_FILTER = CBQ_ADD_FILTER; +unsigned IOCTL_CBQ_DEL_FILTER = CBQ_DEL_FILTER; +unsigned IOCTL_CBQ_GETSTATS = CBQ_GETSTATS; +unsigned IOCTL_CDNR_IF_ATTACH = CDNR_IF_ATTACH; +unsigned IOCTL_CDNR_IF_DETACH = CDNR_IF_DETACH; +unsigned IOCTL_CDNR_ENABLE = CDNR_ENABLE; +unsigned IOCTL_CDNR_DISABLE = CDNR_DISABLE; +unsigned IOCTL_CDNR_ADD_FILTER = CDNR_ADD_FILTER; +unsigned IOCTL_CDNR_DEL_FILTER = CDNR_DEL_FILTER; +unsigned IOCTL_CDNR_GETSTATS = CDNR_GETSTATS; +unsigned IOCTL_CDNR_ADD_ELEM = CDNR_ADD_ELEM; +unsigned IOCTL_CDNR_DEL_ELEM = CDNR_DEL_ELEM; +unsigned IOCTL_CDNR_ADD_TBM = CDNR_ADD_TBM; +unsigned IOCTL_CDNR_MOD_TBM = CDNR_MOD_TBM; +unsigned IOCTL_CDNR_TBM_STATS = CDNR_TBM_STATS; +unsigned IOCTL_CDNR_ADD_TCM = CDNR_ADD_TCM; +unsigned IOCTL_CDNR_MOD_TCM = CDNR_MOD_TCM; +unsigned IOCTL_CDNR_TCM_STATS = CDNR_TCM_STATS; +unsigned IOCTL_CDNR_ADD_TSW = CDNR_ADD_TSW; +unsigned IOCTL_CDNR_MOD_TSW = CDNR_MOD_TSW; +unsigned IOCTL_FIFOQ_IF_ATTACH = FIFOQ_IF_ATTACH; +unsigned IOCTL_FIFOQ_IF_DETACH = FIFOQ_IF_DETACH; +unsigned IOCTL_FIFOQ_ENABLE = FIFOQ_ENABLE; +unsigned IOCTL_FIFOQ_DISABLE = FIFOQ_DISABLE; +unsigned IOCTL_FIFOQ_CONFIG = FIFOQ_CONFIG; +unsigned IOCTL_FIFOQ_GETSTATS = FIFOQ_GETSTATS; +unsigned IOCTL_HFSC_IF_ATTACH = HFSC_IF_ATTACH; +unsigned IOCTL_HFSC_IF_DETACH = HFSC_IF_DETACH; +unsigned IOCTL_HFSC_ENABLE = HFSC_ENABLE; +unsigned IOCTL_HFSC_DISABLE = HFSC_DISABLE; +unsigned IOCTL_HFSC_CLEAR_HIERARCHY = HFSC_CLEAR_HIERARCHY; +unsigned IOCTL_HFSC_ADD_CLASS = HFSC_ADD_CLASS; +unsigned IOCTL_HFSC_DEL_CLASS = HFSC_DEL_CLASS; +unsigned IOCTL_HFSC_MOD_CLASS = HFSC_MOD_CLASS; +unsigned IOCTL_HFSC_ADD_FILTER = HFSC_ADD_FILTER; +unsigned IOCTL_HFSC_DEL_FILTER = HFSC_DEL_FILTER; +unsigned IOCTL_HFSC_GETSTATS = HFSC_GETSTATS; +unsigned IOCTL_JOBS_IF_ATTACH = JOBS_IF_ATTACH; +unsigned IOCTL_JOBS_IF_DETACH = JOBS_IF_DETACH; +unsigned IOCTL_JOBS_ENABLE = JOBS_ENABLE; +unsigned IOCTL_JOBS_DISABLE = JOBS_DISABLE; +unsigned IOCTL_JOBS_CLEAR = JOBS_CLEAR; +unsigned IOCTL_JOBS_ADD_CLASS = JOBS_ADD_CLASS; +unsigned IOCTL_JOBS_DEL_CLASS = JOBS_DEL_CLASS; +unsigned IOCTL_JOBS_MOD_CLASS = JOBS_MOD_CLASS; +unsigned IOCTL_JOBS_ADD_FILTER = JOBS_ADD_FILTER; +unsigned IOCTL_JOBS_DEL_FILTER = JOBS_DEL_FILTER; +unsigned IOCTL_JOBS_GETSTATS = JOBS_GETSTATS; +unsigned IOCTL_PRIQ_IF_ATTACH = PRIQ_IF_ATTACH; +unsigned IOCTL_PRIQ_IF_DETACH = PRIQ_IF_DETACH; +unsigned IOCTL_PRIQ_ENABLE = PRIQ_ENABLE; +unsigned IOCTL_PRIQ_DISABLE = PRIQ_DISABLE; +unsigned IOCTL_PRIQ_CLEAR = PRIQ_CLEAR; +unsigned IOCTL_PRIQ_ADD_CLASS = PRIQ_ADD_CLASS; +unsigned IOCTL_PRIQ_DEL_CLASS = PRIQ_DEL_CLASS; +unsigned IOCTL_PRIQ_MOD_CLASS = PRIQ_MOD_CLASS; +unsigned IOCTL_PRIQ_ADD_FILTER = PRIQ_ADD_FILTER; +unsigned IOCTL_PRIQ_DEL_FILTER = PRIQ_DEL_FILTER; +unsigned IOCTL_PRIQ_GETSTATS = PRIQ_GETSTATS; +unsigned IOCTL_RED_IF_ATTACH = RED_IF_ATTACH; +unsigned IOCTL_RED_IF_DETACH = RED_IF_DETACH; +unsigned IOCTL_RED_ENABLE = RED_ENABLE; +unsigned IOCTL_RED_DISABLE = RED_DISABLE; +unsigned IOCTL_RED_CONFIG = RED_CONFIG; +unsigned IOCTL_RED_GETSTATS = RED_GETSTATS; +unsigned IOCTL_RED_SETDEFAULTS = RED_SETDEFAULTS; +unsigned IOCTL_RIO_IF_ATTACH = RIO_IF_ATTACH; +unsigned IOCTL_RIO_IF_DETACH = RIO_IF_DETACH; +unsigned IOCTL_RIO_ENABLE = RIO_ENABLE; +unsigned IOCTL_RIO_DISABLE = RIO_DISABLE; +unsigned IOCTL_RIO_CONFIG = RIO_CONFIG; +unsigned IOCTL_RIO_GETSTATS = RIO_GETSTATS; +unsigned IOCTL_RIO_SETDEFAULTS = RIO_SETDEFAULTS; +unsigned IOCTL_WFQ_IF_ATTACH = WFQ_IF_ATTACH; +unsigned IOCTL_WFQ_IF_DETACH = WFQ_IF_DETACH; +unsigned IOCTL_WFQ_ENABLE = WFQ_ENABLE; +unsigned IOCTL_WFQ_DISABLE = WFQ_DISABLE; +unsigned IOCTL_WFQ_CONFIG = WFQ_CONFIG; +unsigned IOCTL_WFQ_GET_STATS = WFQ_GET_STATS; +unsigned IOCTL_WFQ_GET_QID = WFQ_GET_QID; +unsigned IOCTL_WFQ_SET_WEIGHT = WFQ_SET_WEIGHT; +unsigned IOCTL_CRIOGET = CRIOGET; +unsigned IOCTL_CIOCFSESSION = CIOCFSESSION; +unsigned IOCTL_CIOCKEY = CIOCKEY; +unsigned IOCTL_CIOCNFKEYM = CIOCNFKEYM; +unsigned IOCTL_CIOCNFSESSION = CIOCNFSESSION; +unsigned IOCTL_CIOCNCRYPTRETM = CIOCNCRYPTRETM; +unsigned IOCTL_CIOCNCRYPTRET = CIOCNCRYPTRET; +unsigned IOCTL_CIOCGSESSION = CIOCGSESSION; +unsigned IOCTL_CIOCNGSESSION = CIOCNGSESSION; +unsigned IOCTL_CIOCCRYPT = CIOCCRYPT; +unsigned IOCTL_CIOCNCRYPTM = CIOCNCRYPTM; +unsigned IOCTL_CIOCASYMFEAT = CIOCASYMFEAT; +unsigned IOCTL_APM_IOC_REJECT = APM_IOC_REJECT; +unsigned IOCTL_APM_IOC_STANDBY = APM_IOC_STANDBY; +unsigned IOCTL_APM_IOC_SUSPEND = APM_IOC_SUSPEND; +unsigned IOCTL_OAPM_IOC_GETPOWER = OAPM_IOC_GETPOWER; +unsigned IOCTL_APM_IOC_GETPOWER = APM_IOC_GETPOWER; +unsigned IOCTL_APM_IOC_NEXTEVENT = APM_IOC_NEXTEVENT; +unsigned IOCTL_APM_IOC_DEV_CTL = APM_IOC_DEV_CTL; +unsigned IOCTL_NETBSD_DM_IOCTL = NETBSD_DM_IOCTL; +unsigned IOCTL_DMIO_SETFUNC = DMIO_SETFUNC; +unsigned IOCTL_DMX_START = DMX_START; +unsigned IOCTL_DMX_STOP = DMX_STOP; +unsigned IOCTL_DMX_SET_FILTER = DMX_SET_FILTER; +unsigned IOCTL_DMX_SET_PES_FILTER = DMX_SET_PES_FILTER; +unsigned IOCTL_DMX_SET_BUFFER_SIZE = DMX_SET_BUFFER_SIZE; +unsigned IOCTL_DMX_GET_STC = DMX_GET_STC; +unsigned IOCTL_DMX_ADD_PID = DMX_ADD_PID; +unsigned IOCTL_DMX_REMOVE_PID = DMX_REMOVE_PID; +unsigned IOCTL_DMX_GET_CAPS = DMX_GET_CAPS; +unsigned IOCTL_DMX_SET_SOURCE = DMX_SET_SOURCE; +unsigned IOCTL_FE_READ_STATUS = FE_READ_STATUS; +unsigned IOCTL_FE_READ_BER = FE_READ_BER; +unsigned IOCTL_FE_READ_SNR = FE_READ_SNR; +unsigned IOCTL_FE_READ_SIGNAL_STRENGTH = FE_READ_SIGNAL_STRENGTH; +unsigned IOCTL_FE_READ_UNCORRECTED_BLOCKS = FE_READ_UNCORRECTED_BLOCKS; +unsigned IOCTL_FE_SET_FRONTEND = FE_SET_FRONTEND; +unsigned IOCTL_FE_GET_FRONTEND = FE_GET_FRONTEND; +unsigned IOCTL_FE_GET_EVENT = FE_GET_EVENT; +unsigned IOCTL_FE_GET_INFO = FE_GET_INFO; +unsigned IOCTL_FE_DISEQC_RESET_OVERLOAD = FE_DISEQC_RESET_OVERLOAD; +unsigned IOCTL_FE_DISEQC_SEND_MASTER_CMD = FE_DISEQC_SEND_MASTER_CMD; +unsigned IOCTL_FE_DISEQC_RECV_SLAVE_REPLY = FE_DISEQC_RECV_SLAVE_REPLY; +unsigned IOCTL_FE_DISEQC_SEND_BURST = FE_DISEQC_SEND_BURST; +unsigned IOCTL_FE_SET_TONE = FE_SET_TONE; +unsigned IOCTL_FE_SET_VOLTAGE = FE_SET_VOLTAGE; +unsigned IOCTL_FE_ENABLE_HIGH_LNB_VOLTAGE = FE_ENABLE_HIGH_LNB_VOLTAGE; +unsigned IOCTL_FE_SET_FRONTEND_TUNE_MODE = FE_SET_FRONTEND_TUNE_MODE; +unsigned IOCTL_FE_DISHNETWORK_SEND_LEGACY_CMD = FE_DISHNETWORK_SEND_LEGACY_CMD; +unsigned IOCTL_FILEMON_SET_FD = FILEMON_SET_FD; +unsigned IOCTL_FILEMON_SET_PID = FILEMON_SET_PID; +unsigned IOCTL_HDAUDIO_FGRP_INFO = HDAUDIO_FGRP_INFO; +unsigned IOCTL_HDAUDIO_FGRP_GETCONFIG = HDAUDIO_FGRP_GETCONFIG; +unsigned IOCTL_HDAUDIO_FGRP_SETCONFIG = HDAUDIO_FGRP_SETCONFIG; +unsigned IOCTL_HDAUDIO_FGRP_WIDGET_INFO = HDAUDIO_FGRP_WIDGET_INFO; +unsigned IOCTL_HDAUDIO_FGRP_CODEC_INFO = HDAUDIO_FGRP_CODEC_INFO; +unsigned IOCTL_HDAUDIO_AFG_WIDGET_INFO = HDAUDIO_AFG_WIDGET_INFO; +unsigned IOCTL_HDAUDIO_AFG_CODEC_INFO = HDAUDIO_AFG_CODEC_INFO; +unsigned IOCTL_CEC_GET_PHYS_ADDR = CEC_GET_PHYS_ADDR; +unsigned IOCTL_CEC_GET_LOG_ADDRS = CEC_GET_LOG_ADDRS; +unsigned IOCTL_CEC_SET_LOG_ADDRS = CEC_SET_LOG_ADDRS; +unsigned IOCTL_CEC_GET_VENDOR_ID = CEC_GET_VENDOR_ID; +unsigned IOCTL_HPCFBIO_GCONF = HPCFBIO_GCONF; +unsigned IOCTL_HPCFBIO_SCONF = HPCFBIO_SCONF; +unsigned IOCTL_HPCFBIO_GDSPCONF = HPCFBIO_GDSPCONF; +unsigned IOCTL_HPCFBIO_SDSPCONF = HPCFBIO_SDSPCONF; +unsigned IOCTL_HPCFBIO_GOP = HPCFBIO_GOP; +unsigned IOCTL_HPCFBIO_SOP = HPCFBIO_SOP; +unsigned IOCTL_IOPIOCPT = IOPIOCPT; +unsigned IOCTL_IOPIOCGLCT = IOPIOCGLCT; +unsigned IOCTL_IOPIOCGSTATUS = IOPIOCGSTATUS; +unsigned IOCTL_IOPIOCRECONFIG = IOPIOCRECONFIG; +unsigned IOCTL_IOPIOCGTIDMAP = IOPIOCGTIDMAP; +unsigned IOCTL_SIOCGATHSTATS = SIOCGATHSTATS; +unsigned IOCTL_SIOCGATHDIAG = SIOCGATHDIAG; +unsigned IOCTL_METEORCAPTUR = METEORCAPTUR; +unsigned IOCTL_METEORCAPFRM = METEORCAPFRM; +unsigned IOCTL_METEORSETGEO = METEORSETGEO; +unsigned IOCTL_METEORGETGEO = METEORGETGEO; +unsigned IOCTL_METEORSTATUS = METEORSTATUS; +unsigned IOCTL_METEORSHUE = METEORSHUE; +unsigned IOCTL_METEORGHUE = METEORGHUE; +unsigned IOCTL_METEORSFMT = METEORSFMT; +unsigned IOCTL_METEORGFMT = METEORGFMT; +unsigned IOCTL_METEORSINPUT = METEORSINPUT; +unsigned IOCTL_METEORGINPUT = METEORGINPUT; +unsigned IOCTL_METEORSCHCV = METEORSCHCV; +unsigned IOCTL_METEORGCHCV = METEORGCHCV; +unsigned IOCTL_METEORSCOUNT = METEORSCOUNT; +unsigned IOCTL_METEORGCOUNT = METEORGCOUNT; +unsigned IOCTL_METEORSFPS = METEORSFPS; +unsigned IOCTL_METEORGFPS = METEORGFPS; +unsigned IOCTL_METEORSSIGNAL = METEORSSIGNAL; +unsigned IOCTL_METEORGSIGNAL = METEORGSIGNAL; +unsigned IOCTL_METEORSVIDEO = METEORSVIDEO; +unsigned IOCTL_METEORGVIDEO = METEORGVIDEO; +unsigned IOCTL_METEORSBRIG = METEORSBRIG; +unsigned IOCTL_METEORGBRIG = METEORGBRIG; +unsigned IOCTL_METEORSCSAT = METEORSCSAT; +unsigned IOCTL_METEORGCSAT = METEORGCSAT; +unsigned IOCTL_METEORSCONT = METEORSCONT; +unsigned IOCTL_METEORGCONT = METEORGCONT; +unsigned IOCTL_METEORSHWS = METEORSHWS; +unsigned IOCTL_METEORGHWS = METEORGHWS; +unsigned IOCTL_METEORSVWS = METEORSVWS; +unsigned IOCTL_METEORGVWS = METEORGVWS; +unsigned IOCTL_METEORSTS = METEORSTS; +unsigned IOCTL_METEORGTS = METEORGTS; +unsigned IOCTL_TVTUNER_SETCHNL = TVTUNER_SETCHNL; +unsigned IOCTL_TVTUNER_GETCHNL = TVTUNER_GETCHNL; +unsigned IOCTL_TVTUNER_SETTYPE = TVTUNER_SETTYPE; +unsigned IOCTL_TVTUNER_GETTYPE = TVTUNER_GETTYPE; +unsigned IOCTL_TVTUNER_GETSTATUS = TVTUNER_GETSTATUS; +unsigned IOCTL_TVTUNER_SETFREQ = TVTUNER_SETFREQ; +unsigned IOCTL_TVTUNER_GETFREQ = TVTUNER_GETFREQ; +unsigned IOCTL_TVTUNER_SETAFC = TVTUNER_SETAFC; +unsigned IOCTL_TVTUNER_GETAFC = TVTUNER_GETAFC; +unsigned IOCTL_RADIO_SETMODE = RADIO_SETMODE; +unsigned IOCTL_RADIO_GETMODE = RADIO_GETMODE; +unsigned IOCTL_RADIO_SETFREQ = RADIO_SETFREQ; +unsigned IOCTL_RADIO_GETFREQ = RADIO_GETFREQ; +unsigned IOCTL_METEORSACTPIXFMT = METEORSACTPIXFMT; +unsigned IOCTL_METEORGACTPIXFMT = METEORGACTPIXFMT; +unsigned IOCTL_METEORGSUPPIXFMT = METEORGSUPPIXFMT; +unsigned IOCTL_TVTUNER_GETCHNLSET = TVTUNER_GETCHNLSET; +unsigned IOCTL_REMOTE_GETKEY = REMOTE_GETKEY; +unsigned IOCTL_GDT_IOCTL_GENERAL = GDT_IOCTL_GENERAL; +unsigned IOCTL_GDT_IOCTL_DRVERS = GDT_IOCTL_DRVERS; +unsigned IOCTL_GDT_IOCTL_CTRTYPE = GDT_IOCTL_CTRTYPE; +unsigned IOCTL_GDT_IOCTL_OSVERS = GDT_IOCTL_OSVERS; +unsigned IOCTL_GDT_IOCTL_CTRCNT = GDT_IOCTL_CTRCNT; +unsigned IOCTL_GDT_IOCTL_EVENT = GDT_IOCTL_EVENT; +unsigned IOCTL_GDT_IOCTL_STATIST = GDT_IOCTL_STATIST; +unsigned IOCTL_GDT_IOCTL_RESCAN = GDT_IOCTL_RESCAN; +unsigned IOCTL_ISP_SDBLEV = ISP_SDBLEV; +unsigned IOCTL_ISP_RESETHBA = ISP_RESETHBA; +unsigned IOCTL_ISP_RESCAN = ISP_RESCAN; +unsigned IOCTL_ISP_SETROLE = ISP_SETROLE; +unsigned IOCTL_ISP_GETROLE = ISP_GETROLE; +unsigned IOCTL_ISP_GET_STATS = ISP_GET_STATS; +unsigned IOCTL_ISP_CLR_STATS = ISP_CLR_STATS; +unsigned IOCTL_ISP_FC_LIP = ISP_FC_LIP; +unsigned IOCTL_ISP_FC_GETDINFO = ISP_FC_GETDINFO; +unsigned IOCTL_ISP_GET_FW_CRASH_DUMP = ISP_GET_FW_CRASH_DUMP; +unsigned IOCTL_ISP_FORCE_CRASH_DUMP = ISP_FORCE_CRASH_DUMP; +unsigned IOCTL_ISP_FC_GETHINFO = ISP_FC_GETHINFO; +unsigned IOCTL_ISP_TSK_MGMT = ISP_TSK_MGMT; +unsigned IOCTL_ISP_FC_GETDLIST = ISP_FC_GETDLIST; +unsigned IOCTL_MLXD_STATUS = MLXD_STATUS; +unsigned IOCTL_MLXD_CHECKASYNC = MLXD_CHECKASYNC; +unsigned IOCTL_MLXD_DETACH = MLXD_DETACH; +unsigned IOCTL_MLX_RESCAN_DRIVES = MLX_RESCAN_DRIVES; +unsigned IOCTL_MLX_PAUSE_CHANNEL = MLX_PAUSE_CHANNEL; +unsigned IOCTL_MLX_COMMAND = MLX_COMMAND; +unsigned IOCTL_MLX_REBUILDASYNC = MLX_REBUILDASYNC; +unsigned IOCTL_MLX_REBUILDSTAT = MLX_REBUILDSTAT; +unsigned IOCTL_MLX_GET_SYSDRIVE = MLX_GET_SYSDRIVE; +unsigned IOCTL_MLX_GET_CINFO = MLX_GET_CINFO; +unsigned IOCTL_NVME_PASSTHROUGH_CMD = NVME_PASSTHROUGH_CMD; +unsigned IOCTL_IRDA_RESET_PARAMS = IRDA_RESET_PARAMS; +unsigned IOCTL_IRDA_SET_PARAMS = IRDA_SET_PARAMS; +unsigned IOCTL_IRDA_GET_SPEEDMASK = IRDA_GET_SPEEDMASK; +unsigned IOCTL_IRDA_GET_TURNAROUNDMASK = IRDA_GET_TURNAROUNDMASK; +unsigned IOCTL_IRFRAMETTY_GET_DEVICE = IRFRAMETTY_GET_DEVICE; +unsigned IOCTL_IRFRAMETTY_GET_DONGLE = IRFRAMETTY_GET_DONGLE; +unsigned IOCTL_IRFRAMETTY_SET_DONGLE = IRFRAMETTY_SET_DONGLE; +unsigned IOCTL_SATIORESET = SATIORESET; +unsigned IOCTL_SATIOGID = SATIOGID; +unsigned IOCTL_SATIOSBUFSIZE = SATIOSBUFSIZE; +unsigned IOCTL_ISV_CMD = ISV_CMD; +unsigned IOCTL_WTQICMD = WTQICMD; +unsigned IOCTL_ISCSI_GET_VERSION = ISCSI_GET_VERSION; +unsigned IOCTL_ISCSI_LOGIN = ISCSI_LOGIN; +unsigned IOCTL_ISCSI_LOGOUT = ISCSI_LOGOUT; +unsigned IOCTL_ISCSI_ADD_CONNECTION = ISCSI_ADD_CONNECTION; +unsigned IOCTL_ISCSI_RESTORE_CONNECTION = ISCSI_RESTORE_CONNECTION; +unsigned IOCTL_ISCSI_REMOVE_CONNECTION = ISCSI_REMOVE_CONNECTION; +unsigned IOCTL_ISCSI_CONNECTION_STATUS = ISCSI_CONNECTION_STATUS; +unsigned IOCTL_ISCSI_SEND_TARGETS = ISCSI_SEND_TARGETS; +unsigned IOCTL_ISCSI_SET_NODE_NAME = ISCSI_SET_NODE_NAME; +unsigned IOCTL_ISCSI_IO_COMMAND = ISCSI_IO_COMMAND; +unsigned IOCTL_ISCSI_REGISTER_EVENT = ISCSI_REGISTER_EVENT; +unsigned IOCTL_ISCSI_DEREGISTER_EVENT = ISCSI_DEREGISTER_EVENT; +unsigned IOCTL_ISCSI_WAIT_EVENT = ISCSI_WAIT_EVENT; +unsigned IOCTL_ISCSI_POLL_EVENT = ISCSI_POLL_EVENT; +unsigned IOCTL_OFIOCGET = OFIOCGET; +unsigned IOCTL_OFIOCSET = OFIOCSET; +unsigned IOCTL_OFIOCNEXTPROP = OFIOCNEXTPROP; +unsigned IOCTL_OFIOCGETOPTNODE = OFIOCGETOPTNODE; +unsigned IOCTL_OFIOCGETNEXT = OFIOCGETNEXT; +unsigned IOCTL_OFIOCGETCHILD = OFIOCGETCHILD; +unsigned IOCTL_OFIOCFINDDEVICE = OFIOCFINDDEVICE; +unsigned IOCTL_AMR_IO_VERSION = AMR_IO_VERSION; +unsigned IOCTL_AMR_IO_COMMAND = AMR_IO_COMMAND; +unsigned IOCTL_MLYIO_COMMAND = MLYIO_COMMAND; +unsigned IOCTL_MLYIO_HEALTH = MLYIO_HEALTH; +unsigned IOCTL_PCI_IOC_CFGREAD = PCI_IOC_CFGREAD; +unsigned IOCTL_PCI_IOC_CFGWRITE = PCI_IOC_CFGWRITE; +unsigned IOCTL_PCI_IOC_BDF_CFGREAD = PCI_IOC_BDF_CFGREAD; +unsigned IOCTL_PCI_IOC_BDF_CFGWRITE = PCI_IOC_BDF_CFGWRITE; +unsigned IOCTL_PCI_IOC_BUSINFO = PCI_IOC_BUSINFO; +unsigned IOCTL_PCI_IOC_DRVNAME = PCI_IOC_DRVNAME; +unsigned IOCTL_PCI_IOC_DRVNAMEONBUS = PCI_IOC_DRVNAMEONBUS; +unsigned IOCTL_TWEIO_COMMAND = TWEIO_COMMAND; +unsigned IOCTL_TWEIO_STATS = TWEIO_STATS; +unsigned IOCTL_TWEIO_AEN_POLL = TWEIO_AEN_POLL; +unsigned IOCTL_TWEIO_AEN_WAIT = TWEIO_AEN_WAIT; +unsigned IOCTL_TWEIO_SET_PARAM = TWEIO_SET_PARAM; +unsigned IOCTL_TWEIO_GET_PARAM = TWEIO_GET_PARAM; +unsigned IOCTL_TWEIO_RESET = TWEIO_RESET; +unsigned IOCTL_TWEIO_ADD_UNIT = TWEIO_ADD_UNIT; +unsigned IOCTL_TWEIO_DEL_UNIT = TWEIO_DEL_UNIT; +unsigned IOCTL_SIOCSCNWDOMAIN = SIOCSCNWDOMAIN; +unsigned IOCTL_SIOCGCNWDOMAIN = SIOCGCNWDOMAIN; +unsigned IOCTL_SIOCSCNWKEY = SIOCSCNWKEY; +unsigned IOCTL_SIOCGCNWSTATUS = SIOCGCNWSTATUS; +unsigned IOCTL_SIOCGCNWSTATS = SIOCGCNWSTATS; +unsigned IOCTL_SIOCGCNWTRAIL = SIOCGCNWTRAIL; +unsigned IOCTL_SIOCGRAYSIGLEV = SIOCGRAYSIGLEV; +unsigned IOCTL_RAIDFRAME_SHUTDOWN = RAIDFRAME_SHUTDOWN; +unsigned IOCTL_RAIDFRAME_TUR = RAIDFRAME_TUR; +unsigned IOCTL_RAIDFRAME_FAIL_DISK = RAIDFRAME_FAIL_DISK; +unsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS = RAIDFRAME_CHECK_RECON_STATUS; +unsigned IOCTL_RAIDFRAME_REWRITEPARITY = RAIDFRAME_REWRITEPARITY; +unsigned IOCTL_RAIDFRAME_COPYBACK = RAIDFRAME_COPYBACK; +unsigned IOCTL_RAIDFRAME_SPARET_WAIT = RAIDFRAME_SPARET_WAIT; +unsigned IOCTL_RAIDFRAME_SEND_SPARET = RAIDFRAME_SEND_SPARET; +unsigned IOCTL_RAIDFRAME_ABORT_SPARET_WAIT = RAIDFRAME_ABORT_SPARET_WAIT; +unsigned IOCTL_RAIDFRAME_START_ATRACE = RAIDFRAME_START_ATRACE; +unsigned IOCTL_RAIDFRAME_STOP_ATRACE = RAIDFRAME_STOP_ATRACE; +unsigned IOCTL_RAIDFRAME_GET_SIZE = RAIDFRAME_GET_SIZE; +unsigned IOCTL_RAIDFRAME_RESET_ACCTOTALS = RAIDFRAME_RESET_ACCTOTALS; +unsigned IOCTL_RAIDFRAME_KEEP_ACCTOTALS = RAIDFRAME_KEEP_ACCTOTALS; +unsigned IOCTL_RAIDFRAME_GET_COMPONENT_LABEL = RAIDFRAME_GET_COMPONENT_LABEL; +unsigned IOCTL_RAIDFRAME_SET_COMPONENT_LABEL = RAIDFRAME_SET_COMPONENT_LABEL; +unsigned IOCTL_RAIDFRAME_INIT_LABELS = RAIDFRAME_INIT_LABELS; +unsigned IOCTL_RAIDFRAME_ADD_HOT_SPARE = RAIDFRAME_ADD_HOT_SPARE; +unsigned IOCTL_RAIDFRAME_REMOVE_HOT_SPARE = RAIDFRAME_REMOVE_HOT_SPARE; +unsigned IOCTL_RAIDFRAME_REBUILD_IN_PLACE = RAIDFRAME_REBUILD_IN_PLACE; +unsigned IOCTL_RAIDFRAME_CHECK_PARITY = RAIDFRAME_CHECK_PARITY; +unsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS = + RAIDFRAME_CHECK_PARITYREWRITE_STATUS; +unsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS = + RAIDFRAME_CHECK_COPYBACK_STATUS; +unsigned IOCTL_RAIDFRAME_SET_AUTOCONFIG = RAIDFRAME_SET_AUTOCONFIG; +unsigned IOCTL_RAIDFRAME_SET_ROOT = RAIDFRAME_SET_ROOT; +unsigned IOCTL_RAIDFRAME_DELETE_COMPONENT = RAIDFRAME_DELETE_COMPONENT; +unsigned IOCTL_RAIDFRAME_INCORPORATE_HOT_SPARE = + RAIDFRAME_INCORPORATE_HOT_SPARE; +unsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS_EXT = + RAIDFRAME_CHECK_RECON_STATUS_EXT; +unsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT = + RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT; +unsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS_EXT = + RAIDFRAME_CHECK_COPYBACK_STATUS_EXT; +unsigned IOCTL_RAIDFRAME_CONFIGURE = RAIDFRAME_CONFIGURE; +unsigned IOCTL_RAIDFRAME_GET_INFO = RAIDFRAME_GET_INFO; +unsigned IOCTL_RAIDFRAME_PARITYMAP_STATUS = RAIDFRAME_PARITYMAP_STATUS; +unsigned IOCTL_RAIDFRAME_PARITYMAP_GET_DISABLE = + RAIDFRAME_PARITYMAP_GET_DISABLE; +unsigned IOCTL_RAIDFRAME_PARITYMAP_SET_DISABLE = + RAIDFRAME_PARITYMAP_SET_DISABLE; +unsigned IOCTL_RAIDFRAME_PARITYMAP_SET_PARAMS = RAIDFRAME_PARITYMAP_SET_PARAMS; +unsigned IOCTL_RAIDFRAME_SET_LAST_UNIT = RAIDFRAME_SET_LAST_UNIT; +unsigned IOCTL_MBPPIOCSPARAM = MBPPIOCSPARAM; +unsigned IOCTL_MBPPIOCGPARAM = MBPPIOCGPARAM; +unsigned IOCTL_MBPPIOCGSTAT = MBPPIOCGSTAT; +unsigned IOCTL_SESIOC_GETNOBJ = SESIOC_GETNOBJ; +unsigned IOCTL_SESIOC_GETOBJMAP = SESIOC_GETOBJMAP; +unsigned IOCTL_SESIOC_GETENCSTAT = SESIOC_GETENCSTAT; +unsigned IOCTL_SESIOC_SETENCSTAT = SESIOC_SETENCSTAT; +unsigned IOCTL_SESIOC_GETOBJSTAT = SESIOC_GETOBJSTAT; +unsigned IOCTL_SESIOC_SETOBJSTAT = SESIOC_SETOBJSTAT; +unsigned IOCTL_SESIOC_GETTEXT = SESIOC_GETTEXT; +unsigned IOCTL_SESIOC_INIT = SESIOC_INIT; +unsigned IOCTL_SUN_DKIOCGGEOM = SUN_DKIOCGGEOM; +unsigned IOCTL_SUN_DKIOCINFO = SUN_DKIOCINFO; +unsigned IOCTL_SUN_DKIOCGPART = SUN_DKIOCGPART; +unsigned IOCTL_FBIOGTYPE = FBIOGTYPE; +unsigned IOCTL_FBIOPUTCMAP = FBIOPUTCMAP; +unsigned IOCTL_FBIOGETCMAP = FBIOGETCMAP; +unsigned IOCTL_FBIOGATTR = FBIOGATTR; +unsigned IOCTL_FBIOSVIDEO = FBIOSVIDEO; +unsigned IOCTL_FBIOGVIDEO = FBIOGVIDEO; +unsigned IOCTL_FBIOSCURSOR = FBIOSCURSOR; +unsigned IOCTL_FBIOGCURSOR = FBIOGCURSOR; +unsigned IOCTL_FBIOSCURPOS = FBIOSCURPOS; +unsigned IOCTL_FBIOGCURPOS = FBIOGCURPOS; +unsigned IOCTL_FBIOGCURMAX = FBIOGCURMAX; +unsigned IOCTL_KIOCTRANS = KIOCTRANS; +unsigned IOCTL_KIOCSETKEY = KIOCSETKEY; +unsigned IOCTL_KIOCGETKEY = KIOCGETKEY; +unsigned IOCTL_KIOCGTRANS = KIOCGTRANS; +unsigned IOCTL_KIOCCMD = KIOCCMD; +unsigned IOCTL_KIOCTYPE = KIOCTYPE; +unsigned IOCTL_KIOCSDIRECT = KIOCSDIRECT; +unsigned IOCTL_KIOCSKEY = KIOCSKEY; +unsigned IOCTL_KIOCGKEY = KIOCGKEY; +unsigned IOCTL_KIOCSLED = KIOCSLED; +unsigned IOCTL_KIOCGLED = KIOCGLED; +unsigned IOCTL_KIOCLAYOUT = KIOCLAYOUT; +unsigned IOCTL_VUIDSFORMAT = VUIDSFORMAT; +unsigned IOCTL_VUIDGFORMAT = VUIDGFORMAT; +unsigned IOCTL_STICIO_GXINFO = STICIO_GXINFO; +unsigned IOCTL_STICIO_RESET = STICIO_RESET; +unsigned IOCTL_STICIO_STARTQ = STICIO_STARTQ; +unsigned IOCTL_STICIO_STOPQ = STICIO_STOPQ; +unsigned IOCTL_UKYOPON_IDENTIFY = UKYOPON_IDENTIFY; +unsigned IOCTL_URIO_SEND_COMMAND = URIO_SEND_COMMAND; +unsigned IOCTL_URIO_RECV_COMMAND = URIO_RECV_COMMAND; +unsigned IOCTL_USB_REQUEST = USB_REQUEST; +unsigned IOCTL_USB_SETDEBUG = USB_SETDEBUG; +unsigned IOCTL_USB_DISCOVER = USB_DISCOVER; +unsigned IOCTL_USB_DEVICEINFO = USB_DEVICEINFO; +unsigned IOCTL_USB_DEVICEINFO_OLD = USB_DEVICEINFO_OLD; +unsigned IOCTL_USB_DEVICESTATS = USB_DEVICESTATS; +unsigned IOCTL_USB_GET_REPORT_DESC = USB_GET_REPORT_DESC; +unsigned IOCTL_USB_SET_IMMED = USB_SET_IMMED; +unsigned IOCTL_USB_GET_REPORT = USB_GET_REPORT; +unsigned IOCTL_USB_SET_REPORT = USB_SET_REPORT; +unsigned IOCTL_USB_GET_REPORT_ID = USB_GET_REPORT_ID; +unsigned IOCTL_USB_GET_CONFIG = USB_GET_CONFIG; +unsigned IOCTL_USB_SET_CONFIG = USB_SET_CONFIG; +unsigned IOCTL_USB_GET_ALTINTERFACE = USB_GET_ALTINTERFACE; +unsigned IOCTL_USB_SET_ALTINTERFACE = USB_SET_ALTINTERFACE; +unsigned IOCTL_USB_GET_NO_ALT = USB_GET_NO_ALT; +unsigned IOCTL_USB_GET_DEVICE_DESC = USB_GET_DEVICE_DESC; +unsigned IOCTL_USB_GET_CONFIG_DESC = USB_GET_CONFIG_DESC; +unsigned IOCTL_USB_GET_INTERFACE_DESC = USB_GET_INTERFACE_DESC; +unsigned IOCTL_USB_GET_ENDPOINT_DESC = USB_GET_ENDPOINT_DESC; +unsigned IOCTL_USB_GET_FULL_DESC = USB_GET_FULL_DESC; +unsigned IOCTL_USB_GET_STRING_DESC = USB_GET_STRING_DESC; +unsigned IOCTL_USB_DO_REQUEST = USB_DO_REQUEST; +unsigned IOCTL_USB_GET_DEVICEINFO = USB_GET_DEVICEINFO; +unsigned IOCTL_USB_GET_DEVICEINFO_OLD = USB_GET_DEVICEINFO_OLD; +unsigned IOCTL_USB_SET_SHORT_XFER = USB_SET_SHORT_XFER; +unsigned IOCTL_USB_SET_TIMEOUT = USB_SET_TIMEOUT; +unsigned IOCTL_USB_SET_BULK_RA = USB_SET_BULK_RA; +unsigned IOCTL_USB_SET_BULK_WB = USB_SET_BULK_WB; +unsigned IOCTL_USB_SET_BULK_RA_OPT = USB_SET_BULK_RA_OPT; +unsigned IOCTL_USB_SET_BULK_WB_OPT = USB_SET_BULK_WB_OPT; +unsigned IOCTL_USB_GET_CM_OVER_DATA = USB_GET_CM_OVER_DATA; +unsigned IOCTL_USB_SET_CM_OVER_DATA = USB_SET_CM_OVER_DATA; +unsigned IOCTL_UTOPPYIOTURBO = UTOPPYIOTURBO; +unsigned IOCTL_UTOPPYIOCANCEL = UTOPPYIOCANCEL; +unsigned IOCTL_UTOPPYIOREBOOT = UTOPPYIOREBOOT; +unsigned IOCTL_UTOPPYIOSTATS = UTOPPYIOSTATS; +unsigned IOCTL_UTOPPYIORENAME = UTOPPYIORENAME; +unsigned IOCTL_UTOPPYIOMKDIR = UTOPPYIOMKDIR; +unsigned IOCTL_UTOPPYIODELETE = UTOPPYIODELETE; +unsigned IOCTL_UTOPPYIOREADDIR = UTOPPYIOREADDIR; +unsigned IOCTL_UTOPPYIOREADFILE = UTOPPYIOREADFILE; +unsigned IOCTL_UTOPPYIOWRITEFILE = UTOPPYIOWRITEFILE; +unsigned IOCTL_DIOSXDCMD = DIOSXDCMD; +unsigned IOCTL_VT_OPENQRY = VT_OPENQRY; +unsigned IOCTL_VT_SETMODE = VT_SETMODE; +unsigned IOCTL_VT_GETMODE = VT_GETMODE; +unsigned IOCTL_VT_RELDISP = VT_RELDISP; +unsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE; +unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE; +unsigned IOCTL_VT_GETACTIVE = VT_GETACTIVE; +unsigned IOCTL_VT_GETSTATE = VT_GETSTATE; +unsigned IOCTL_KDGETKBENT = KDGETKBENT; +unsigned IOCTL_KDGKBMODE = KDGKBMODE; +unsigned IOCTL_KDSKBMODE = KDSKBMODE; +unsigned IOCTL_KDMKTONE = KDMKTONE; +unsigned IOCTL_KDSETMODE = KDSETMODE; +unsigned IOCTL_KDENABIO = KDENABIO; +unsigned IOCTL_KDDISABIO = KDDISABIO; +unsigned IOCTL_KDGKBTYPE = KDGKBTYPE; +unsigned IOCTL_KDGETLED = KDGETLED; +unsigned IOCTL_KDSETLED = KDSETLED; +unsigned IOCTL_KDSETRAD = KDSETRAD; +unsigned IOCTL_VGAPCVTID = VGAPCVTID; +unsigned IOCTL_CONS_GETVERS = CONS_GETVERS; +unsigned IOCTL_WSKBDIO_GTYPE = WSKBDIO_GTYPE; +unsigned IOCTL_WSKBDIO_BELL = WSKBDIO_BELL; +unsigned IOCTL_WSKBDIO_COMPLEXBELL = WSKBDIO_COMPLEXBELL; +unsigned IOCTL_WSKBDIO_SETBELL = WSKBDIO_SETBELL; +unsigned IOCTL_WSKBDIO_GETBELL = WSKBDIO_GETBELL; +unsigned IOCTL_WSKBDIO_SETDEFAULTBELL = WSKBDIO_SETDEFAULTBELL; +unsigned IOCTL_WSKBDIO_GETDEFAULTBELL = WSKBDIO_GETDEFAULTBELL; +unsigned IOCTL_WSKBDIO_SETKEYREPEAT = WSKBDIO_SETKEYREPEAT; +unsigned IOCTL_WSKBDIO_GETKEYREPEAT = WSKBDIO_GETKEYREPEAT; +unsigned IOCTL_WSKBDIO_SETDEFAULTKEYREPEAT = WSKBDIO_SETDEFAULTKEYREPEAT; +unsigned IOCTL_WSKBDIO_GETDEFAULTKEYREPEAT = WSKBDIO_GETDEFAULTKEYREPEAT; +unsigned IOCTL_WSKBDIO_SETLEDS = WSKBDIO_SETLEDS; +unsigned IOCTL_WSKBDIO_GETLEDS = WSKBDIO_GETLEDS; +unsigned IOCTL_WSKBDIO_GETMAP = WSKBDIO_GETMAP; +unsigned IOCTL_WSKBDIO_SETMAP = WSKBDIO_SETMAP; +unsigned IOCTL_WSKBDIO_GETENCODING = WSKBDIO_GETENCODING; +unsigned IOCTL_WSKBDIO_SETENCODING = WSKBDIO_SETENCODING; +unsigned IOCTL_WSKBDIO_SETMODE = WSKBDIO_SETMODE; +unsigned IOCTL_WSKBDIO_GETMODE = WSKBDIO_GETMODE; +unsigned IOCTL_WSKBDIO_SETKEYCLICK = WSKBDIO_SETKEYCLICK; +unsigned IOCTL_WSKBDIO_GETKEYCLICK = WSKBDIO_GETKEYCLICK; +unsigned IOCTL_WSKBDIO_GETSCROLL = WSKBDIO_GETSCROLL; +unsigned IOCTL_WSKBDIO_SETSCROLL = WSKBDIO_SETSCROLL; +unsigned IOCTL_WSKBDIO_SETVERSION = WSKBDIO_SETVERSION; +unsigned IOCTL_WSMOUSEIO_GTYPE = WSMOUSEIO_GTYPE; +unsigned IOCTL_WSMOUSEIO_SRES = WSMOUSEIO_SRES; +unsigned IOCTL_WSMOUSEIO_SSCALE = WSMOUSEIO_SSCALE; +unsigned IOCTL_WSMOUSEIO_SRATE = WSMOUSEIO_SRATE; +unsigned IOCTL_WSMOUSEIO_SCALIBCOORDS = WSMOUSEIO_SCALIBCOORDS; +unsigned IOCTL_WSMOUSEIO_GCALIBCOORDS = WSMOUSEIO_GCALIBCOORDS; +unsigned IOCTL_WSMOUSEIO_GETID = WSMOUSEIO_GETID; +unsigned IOCTL_WSMOUSEIO_GETREPEAT = WSMOUSEIO_GETREPEAT; +unsigned IOCTL_WSMOUSEIO_SETREPEAT = WSMOUSEIO_SETREPEAT; +unsigned IOCTL_WSMOUSEIO_SETVERSION = WSMOUSEIO_SETVERSION; +unsigned IOCTL_WSDISPLAYIO_GTYPE = WSDISPLAYIO_GTYPE; +unsigned IOCTL_WSDISPLAYIO_GINFO = WSDISPLAYIO_GINFO; +unsigned IOCTL_WSDISPLAYIO_GETCMAP = WSDISPLAYIO_GETCMAP; +unsigned IOCTL_WSDISPLAYIO_PUTCMAP = WSDISPLAYIO_PUTCMAP; +unsigned IOCTL_WSDISPLAYIO_GVIDEO = WSDISPLAYIO_GVIDEO; +unsigned IOCTL_WSDISPLAYIO_SVIDEO = WSDISPLAYIO_SVIDEO; +unsigned IOCTL_WSDISPLAYIO_GCURPOS = WSDISPLAYIO_GCURPOS; +unsigned IOCTL_WSDISPLAYIO_SCURPOS = WSDISPLAYIO_SCURPOS; +unsigned IOCTL_WSDISPLAYIO_GCURMAX = WSDISPLAYIO_GCURMAX; +unsigned IOCTL_WSDISPLAYIO_GCURSOR = WSDISPLAYIO_GCURSOR; +unsigned IOCTL_WSDISPLAYIO_SCURSOR = WSDISPLAYIO_SCURSOR; +unsigned IOCTL_WSDISPLAYIO_GMODE = WSDISPLAYIO_GMODE; +unsigned IOCTL_WSDISPLAYIO_SMODE = WSDISPLAYIO_SMODE; +unsigned IOCTL_WSDISPLAYIO_LDFONT = WSDISPLAYIO_LDFONT; +unsigned IOCTL_WSDISPLAYIO_ADDSCREEN = WSDISPLAYIO_ADDSCREEN; +unsigned IOCTL_WSDISPLAYIO_DELSCREEN = WSDISPLAYIO_DELSCREEN; +unsigned IOCTL_WSDISPLAYIO_SFONT = WSDISPLAYIO_SFONT; +unsigned IOCTL__O_WSDISPLAYIO_SETKEYBOARD = _O_WSDISPLAYIO_SETKEYBOARD; +unsigned IOCTL_WSDISPLAYIO_GETPARAM = WSDISPLAYIO_GETPARAM; +unsigned IOCTL_WSDISPLAYIO_SETPARAM = WSDISPLAYIO_SETPARAM; +unsigned IOCTL_WSDISPLAYIO_GETACTIVESCREEN = WSDISPLAYIO_GETACTIVESCREEN; +unsigned IOCTL_WSDISPLAYIO_GETWSCHAR = WSDISPLAYIO_GETWSCHAR; +unsigned IOCTL_WSDISPLAYIO_PUTWSCHAR = WSDISPLAYIO_PUTWSCHAR; +unsigned IOCTL_WSDISPLAYIO_DGSCROLL = WSDISPLAYIO_DGSCROLL; +unsigned IOCTL_WSDISPLAYIO_DSSCROLL = WSDISPLAYIO_DSSCROLL; +unsigned IOCTL_WSDISPLAYIO_GMSGATTRS = WSDISPLAYIO_GMSGATTRS; +unsigned IOCTL_WSDISPLAYIO_SMSGATTRS = WSDISPLAYIO_SMSGATTRS; +unsigned IOCTL_WSDISPLAYIO_GBORDER = WSDISPLAYIO_GBORDER; +unsigned IOCTL_WSDISPLAYIO_SBORDER = WSDISPLAYIO_SBORDER; +unsigned IOCTL_WSDISPLAYIO_SSPLASH = WSDISPLAYIO_SSPLASH; +unsigned IOCTL_WSDISPLAYIO_SPROGRESS = WSDISPLAYIO_SPROGRESS; +unsigned IOCTL_WSDISPLAYIO_LINEBYTES = WSDISPLAYIO_LINEBYTES; +unsigned IOCTL_WSDISPLAYIO_SETVERSION = WSDISPLAYIO_SETVERSION; +unsigned IOCTL_WSMUXIO_ADD_DEVICE = WSMUXIO_ADD_DEVICE; +unsigned IOCTL_WSMUXIO_REMOVE_DEVICE = WSMUXIO_REMOVE_DEVICE; +unsigned IOCTL_WSMUXIO_LIST_DEVICES = WSMUXIO_LIST_DEVICES; +unsigned IOCTL_WSMUXIO_INJECTEVENT = WSMUXIO_INJECTEVENT; +unsigned IOCTL_WSDISPLAYIO_GET_BUSID = WSDISPLAYIO_GET_BUSID; +unsigned IOCTL_WSDISPLAYIO_GET_EDID = WSDISPLAYIO_GET_EDID; +unsigned IOCTL_WSDISPLAYIO_SET_POLLING = WSDISPLAYIO_SET_POLLING; +unsigned IOCTL_WSDISPLAYIO_GET_FBINFO = WSDISPLAYIO_GET_FBINFO; +unsigned IOCTL_WSDISPLAYIO_DOBLIT = WSDISPLAYIO_DOBLIT; +unsigned IOCTL_WSDISPLAYIO_WAITBLIT = WSDISPLAYIO_WAITBLIT; +unsigned IOCTL_BIOCLOCATE = BIOCLOCATE; +unsigned IOCTL_BIOCINQ = BIOCINQ; +unsigned IOCTL_BIOCDISK_NOVOL = BIOCDISK_NOVOL; +unsigned IOCTL_BIOCDISK = BIOCDISK; +unsigned IOCTL_BIOCVOL = BIOCVOL; +unsigned IOCTL_BIOCALARM = BIOCALARM; +unsigned IOCTL_BIOCBLINK = BIOCBLINK; +unsigned IOCTL_BIOCSETSTATE = BIOCSETSTATE; +unsigned IOCTL_BIOCVOLOPS = BIOCVOLOPS; +unsigned IOCTL_MD_GETCONF = MD_GETCONF; +unsigned IOCTL_MD_SETCONF = MD_SETCONF; +unsigned IOCTL_CCDIOCSET = CCDIOCSET; +unsigned IOCTL_CCDIOCCLR = CCDIOCCLR; +unsigned IOCTL_CGDIOCSET = CGDIOCSET; +unsigned IOCTL_CGDIOCCLR = CGDIOCCLR; +unsigned IOCTL_CGDIOCGET = CGDIOCGET; +unsigned IOCTL_FSSIOCSET = FSSIOCSET; +unsigned IOCTL_FSSIOCGET = FSSIOCGET; +unsigned IOCTL_FSSIOCCLR = FSSIOCCLR; +unsigned IOCTL_FSSIOFSET = FSSIOFSET; +unsigned IOCTL_FSSIOFGET = FSSIOFGET; +unsigned IOCTL_BTDEV_ATTACH = BTDEV_ATTACH; +unsigned IOCTL_BTDEV_DETACH = BTDEV_DETACH; +unsigned IOCTL_BTSCO_GETINFO = BTSCO_GETINFO; +unsigned IOCTL_KTTCP_IO_SEND = KTTCP_IO_SEND; +unsigned IOCTL_KTTCP_IO_RECV = KTTCP_IO_RECV; +unsigned IOCTL_IOC_LOCKSTAT_GVERSION = IOC_LOCKSTAT_GVERSION; +unsigned IOCTL_IOC_LOCKSTAT_ENABLE = IOC_LOCKSTAT_ENABLE; +unsigned IOCTL_IOC_LOCKSTAT_DISABLE = IOC_LOCKSTAT_DISABLE; +unsigned IOCTL_VNDIOCSET = VNDIOCSET; +unsigned IOCTL_VNDIOCCLR = VNDIOCCLR; +unsigned IOCTL_VNDIOCGET = VNDIOCGET; +unsigned IOCTL_SPKRTONE = SPKRTONE; +unsigned IOCTL_SPKRTUNE = SPKRTUNE; +unsigned IOCTL_SPKRGETVOL = SPKRGETVOL; +unsigned IOCTL_SPKRSETVOL = SPKRSETVOL; +unsigned IOCTL_BIOCGBLEN = BIOCGBLEN; +unsigned IOCTL_BIOCSBLEN = BIOCSBLEN; +unsigned IOCTL_BIOCSETF = BIOCSETF; +unsigned IOCTL_BIOCFLUSH = BIOCFLUSH; +unsigned IOCTL_BIOCPROMISC = BIOCPROMISC; +unsigned IOCTL_BIOCGDLT = BIOCGDLT; +unsigned IOCTL_BIOCGETIF = BIOCGETIF; +unsigned IOCTL_BIOCSETIF = BIOCSETIF; +unsigned IOCTL_BIOCGSTATS = BIOCGSTATS; +unsigned IOCTL_BIOCGSTATSOLD = BIOCGSTATSOLD; +unsigned IOCTL_BIOCIMMEDIATE = BIOCIMMEDIATE; +unsigned IOCTL_BIOCVERSION = BIOCVERSION; +unsigned IOCTL_BIOCSTCPF = BIOCSTCPF; +unsigned IOCTL_BIOCSUDPF = BIOCSUDPF; +unsigned IOCTL_BIOCGHDRCMPLT = BIOCGHDRCMPLT; +unsigned IOCTL_BIOCSHDRCMPLT = BIOCSHDRCMPLT; +unsigned IOCTL_BIOCSDLT = BIOCSDLT; +unsigned IOCTL_BIOCGDLTLIST = BIOCGDLTLIST; +unsigned IOCTL_BIOCGSEESENT = BIOCGSEESENT; +unsigned IOCTL_BIOCSSEESENT = BIOCSSEESENT; +unsigned IOCTL_BIOCSRTIMEOUT = BIOCSRTIMEOUT; +unsigned IOCTL_BIOCGRTIMEOUT = BIOCGRTIMEOUT; +unsigned IOCTL_BIOCGFEEDBACK = BIOCGFEEDBACK; +unsigned IOCTL_BIOCSFEEDBACK = BIOCSFEEDBACK; +unsigned IOCTL_SIOCRAWATM = SIOCRAWATM; +unsigned IOCTL_SIOCATMENA = SIOCATMENA; +unsigned IOCTL_SIOCATMDIS = SIOCATMDIS; +unsigned IOCTL_SIOCSPVCTX = SIOCSPVCTX; +unsigned IOCTL_SIOCGPVCTX = SIOCGPVCTX; +unsigned IOCTL_SIOCSPVCSIF = SIOCSPVCSIF; +unsigned IOCTL_SIOCGPVCSIF = SIOCGPVCSIF; +unsigned IOCTL_GRESADDRS = GRESADDRS; +unsigned IOCTL_GRESADDRD = GRESADDRD; +unsigned IOCTL_GREGADDRS = GREGADDRS; +unsigned IOCTL_GREGADDRD = GREGADDRD; +unsigned IOCTL_GRESPROTO = GRESPROTO; +unsigned IOCTL_GREGPROTO = GREGPROTO; +unsigned IOCTL_GRESSOCK = GRESSOCK; +unsigned IOCTL_GREDSOCK = GREDSOCK; +unsigned IOCTL_PPPIOCGRAWIN = PPPIOCGRAWIN; +unsigned IOCTL_PPPIOCGFLAGS = PPPIOCGFLAGS; +unsigned IOCTL_PPPIOCSFLAGS = PPPIOCSFLAGS; +unsigned IOCTL_PPPIOCGASYNCMAP = PPPIOCGASYNCMAP; +unsigned IOCTL_PPPIOCSASYNCMAP = PPPIOCSASYNCMAP; +unsigned IOCTL_PPPIOCGUNIT = PPPIOCGUNIT; +unsigned IOCTL_PPPIOCGRASYNCMAP = PPPIOCGRASYNCMAP; +unsigned IOCTL_PPPIOCSRASYNCMAP = PPPIOCSRASYNCMAP; +unsigned IOCTL_PPPIOCGMRU = PPPIOCGMRU; +unsigned IOCTL_PPPIOCSMRU = PPPIOCSMRU; +unsigned IOCTL_PPPIOCSMAXCID = PPPIOCSMAXCID; +unsigned IOCTL_PPPIOCGXASYNCMAP = PPPIOCGXASYNCMAP; +unsigned IOCTL_PPPIOCSXASYNCMAP = PPPIOCSXASYNCMAP; +unsigned IOCTL_PPPIOCXFERUNIT = PPPIOCXFERUNIT; +unsigned IOCTL_PPPIOCSCOMPRESS = PPPIOCSCOMPRESS; +unsigned IOCTL_PPPIOCGNPMODE = PPPIOCGNPMODE; +unsigned IOCTL_PPPIOCSNPMODE = PPPIOCSNPMODE; +unsigned IOCTL_PPPIOCGIDLE = PPPIOCGIDLE; +unsigned IOCTL_PPPIOCGMTU = PPPIOCGMTU; +unsigned IOCTL_PPPIOCSMTU = PPPIOCSMTU; +unsigned IOCTL_SIOCGPPPSTATS = SIOCGPPPSTATS; +unsigned IOCTL_SIOCGPPPCSTATS = SIOCGPPPCSTATS; +unsigned IOCTL_IOC_NPF_VERSION = IOC_NPF_VERSION; +unsigned IOCTL_IOC_NPF_SWITCH = IOC_NPF_SWITCH; +unsigned IOCTL_IOC_NPF_LOAD = IOC_NPF_LOAD; +unsigned IOCTL_IOC_NPF_TABLE = IOC_NPF_TABLE; +unsigned IOCTL_IOC_NPF_STATS = IOC_NPF_STATS; +unsigned IOCTL_IOC_NPF_SAVE = IOC_NPF_SAVE; +unsigned IOCTL_IOC_NPF_RULE = IOC_NPF_RULE; +unsigned IOCTL_IOC_NPF_CONN_LOOKUP = IOC_NPF_CONN_LOOKUP; +unsigned IOCTL_PPPOESETPARMS = PPPOESETPARMS; +unsigned IOCTL_PPPOEGETPARMS = PPPOEGETPARMS; +unsigned IOCTL_PPPOEGETSESSION = PPPOEGETSESSION; +unsigned IOCTL_SPPPGETAUTHCFG = SPPPGETAUTHCFG; +unsigned IOCTL_SPPPSETAUTHCFG = SPPPSETAUTHCFG; +unsigned IOCTL_SPPPGETLCPCFG = SPPPGETLCPCFG; +unsigned IOCTL_SPPPSETLCPCFG = SPPPSETLCPCFG; +unsigned IOCTL_SPPPGETSTATUS = SPPPGETSTATUS; +unsigned IOCTL_SPPPGETSTATUSNCP = SPPPGETSTATUSNCP; +unsigned IOCTL_SPPPGETIDLETO = SPPPGETIDLETO; +unsigned IOCTL_SPPPSETIDLETO = SPPPSETIDLETO; +unsigned IOCTL_SPPPGETAUTHFAILURES = SPPPGETAUTHFAILURES; +unsigned IOCTL_SPPPSETAUTHFAILURE = SPPPSETAUTHFAILURE; +unsigned IOCTL_SPPPSETDNSOPTS = SPPPSETDNSOPTS; +unsigned IOCTL_SPPPGETDNSOPTS = SPPPGETDNSOPTS; +unsigned IOCTL_SPPPGETDNSADDRS = SPPPGETDNSADDRS; +unsigned IOCTL_SPPPSETKEEPALIVE = SPPPSETKEEPALIVE; +unsigned IOCTL_SPPPGETKEEPALIVE = SPPPGETKEEPALIVE; +unsigned IOCTL_SRT_GETNRT = SRT_GETNRT; +unsigned IOCTL_SRT_GETRT = SRT_GETRT; +unsigned IOCTL_SRT_SETRT = SRT_SETRT; +unsigned IOCTL_SRT_DELRT = SRT_DELRT; +unsigned IOCTL_SRT_SFLAGS = SRT_SFLAGS; +unsigned IOCTL_SRT_GFLAGS = SRT_GFLAGS; +unsigned IOCTL_SRT_SGFLAGS = SRT_SGFLAGS; +unsigned IOCTL_SRT_DEBUG = SRT_DEBUG; +unsigned IOCTL_TAPGIFNAME = TAPGIFNAME; +unsigned IOCTL_TUNSDEBUG = TUNSDEBUG; +unsigned IOCTL_TUNGDEBUG = TUNGDEBUG; +unsigned IOCTL_TUNSIFMODE = TUNSIFMODE; +unsigned IOCTL_TUNSLMODE = TUNSLMODE; +unsigned IOCTL_TUNSIFHEAD = TUNSIFHEAD; +unsigned IOCTL_TUNGIFHEAD = TUNGIFHEAD; +unsigned IOCTL_DIOCSTART = DIOCSTART; +unsigned IOCTL_DIOCSTOP = DIOCSTOP; +unsigned IOCTL_DIOCADDRULE = DIOCADDRULE; +unsigned IOCTL_DIOCGETRULES = DIOCGETRULES; +unsigned IOCTL_DIOCGETRULE = DIOCGETRULE; +unsigned IOCTL_DIOCSETLCK = DIOCSETLCK; +unsigned IOCTL_DIOCCLRSTATES = DIOCCLRSTATES; +unsigned IOCTL_DIOCGETSTATE = DIOCGETSTATE; +unsigned IOCTL_DIOCSETSTATUSIF = DIOCSETSTATUSIF; +unsigned IOCTL_DIOCGETSTATUS = DIOCGETSTATUS; +unsigned IOCTL_DIOCCLRSTATUS = DIOCCLRSTATUS; +unsigned IOCTL_DIOCNATLOOK = DIOCNATLOOK; +unsigned IOCTL_DIOCSETDEBUG = DIOCSETDEBUG; +unsigned IOCTL_DIOCGETSTATES = DIOCGETSTATES; +unsigned IOCTL_DIOCCHANGERULE = DIOCCHANGERULE; +unsigned IOCTL_DIOCSETTIMEOUT = DIOCSETTIMEOUT; +unsigned IOCTL_DIOCGETTIMEOUT = DIOCGETTIMEOUT; +unsigned IOCTL_DIOCADDSTATE = DIOCADDSTATE; +unsigned IOCTL_DIOCCLRRULECTRS = DIOCCLRRULECTRS; +unsigned IOCTL_DIOCGETLIMIT = DIOCGETLIMIT; +unsigned IOCTL_DIOCSETLIMIT = DIOCSETLIMIT; +unsigned IOCTL_DIOCKILLSTATES = DIOCKILLSTATES; +unsigned IOCTL_DIOCSTARTALTQ = DIOCSTARTALTQ; +unsigned IOCTL_DIOCSTOPALTQ = DIOCSTOPALTQ; +unsigned IOCTL_DIOCADDALTQ = DIOCADDALTQ; +unsigned IOCTL_DIOCGETALTQS = DIOCGETALTQS; +unsigned IOCTL_DIOCGETALTQ = DIOCGETALTQ; +unsigned IOCTL_DIOCCHANGEALTQ = DIOCCHANGEALTQ; +unsigned IOCTL_DIOCGETQSTATS = DIOCGETQSTATS; +unsigned IOCTL_DIOCBEGINADDRS = DIOCBEGINADDRS; +unsigned IOCTL_DIOCADDADDR = DIOCADDADDR; +unsigned IOCTL_DIOCGETADDRS = DIOCGETADDRS; +unsigned IOCTL_DIOCGETADDR = DIOCGETADDR; +unsigned IOCTL_DIOCCHANGEADDR = DIOCCHANGEADDR; +unsigned IOCTL_DIOCADDSTATES = DIOCADDSTATES; +unsigned IOCTL_DIOCGETRULESETS = DIOCGETRULESETS; +unsigned IOCTL_DIOCGETRULESET = DIOCGETRULESET; +unsigned IOCTL_DIOCRCLRTABLES = DIOCRCLRTABLES; +unsigned IOCTL_DIOCRADDTABLES = DIOCRADDTABLES; +unsigned IOCTL_DIOCRDELTABLES = DIOCRDELTABLES; +unsigned IOCTL_DIOCRGETTABLES = DIOCRGETTABLES; +unsigned IOCTL_DIOCRGETTSTATS = DIOCRGETTSTATS; +unsigned IOCTL_DIOCRCLRTSTATS = DIOCRCLRTSTATS; +unsigned IOCTL_DIOCRCLRADDRS = DIOCRCLRADDRS; +unsigned IOCTL_DIOCRADDADDRS = DIOCRADDADDRS; +unsigned IOCTL_DIOCRDELADDRS = DIOCRDELADDRS; +unsigned IOCTL_DIOCRSETADDRS = DIOCRSETADDRS; +unsigned IOCTL_DIOCRGETADDRS = DIOCRGETADDRS; +unsigned IOCTL_DIOCRGETASTATS = DIOCRGETASTATS; +unsigned IOCTL_DIOCRCLRASTATS = DIOCRCLRASTATS; +unsigned IOCTL_DIOCRTSTADDRS = DIOCRTSTADDRS; +unsigned IOCTL_DIOCRSETTFLAGS = DIOCRSETTFLAGS; +unsigned IOCTL_DIOCRINADEFINE = DIOCRINADEFINE; +unsigned IOCTL_DIOCOSFPFLUSH = DIOCOSFPFLUSH; +unsigned IOCTL_DIOCOSFPADD = DIOCOSFPADD; +unsigned IOCTL_DIOCOSFPGET = DIOCOSFPGET; +unsigned IOCTL_DIOCXBEGIN = DIOCXBEGIN; +unsigned IOCTL_DIOCXCOMMIT = DIOCXCOMMIT; +unsigned IOCTL_DIOCXROLLBACK = DIOCXROLLBACK; +unsigned IOCTL_DIOCGETSRCNODES = DIOCGETSRCNODES; +unsigned IOCTL_DIOCCLRSRCNODES = DIOCCLRSRCNODES; +unsigned IOCTL_DIOCSETHOSTID = DIOCSETHOSTID; +unsigned IOCTL_DIOCIGETIFACES = DIOCIGETIFACES; +unsigned IOCTL_DIOCSETIFFLAG = DIOCSETIFFLAG; +unsigned IOCTL_DIOCCLRIFFLAG = DIOCCLRIFFLAG; +unsigned IOCTL_DIOCKILLSRCNODES = DIOCKILLSRCNODES; +unsigned IOCTL_SLIOCGUNIT = SLIOCGUNIT; +unsigned IOCTL_SIOCGBTINFO = SIOCGBTINFO; +unsigned IOCTL_SIOCGBTINFOA = SIOCGBTINFOA; +unsigned IOCTL_SIOCNBTINFO = SIOCNBTINFO; +unsigned IOCTL_SIOCSBTFLAGS = SIOCSBTFLAGS; +unsigned IOCTL_SIOCSBTPOLICY = SIOCSBTPOLICY; +unsigned IOCTL_SIOCSBTPTYPE = SIOCSBTPTYPE; +unsigned IOCTL_SIOCGBTSTATS = SIOCGBTSTATS; +unsigned IOCTL_SIOCZBTSTATS = SIOCZBTSTATS; +unsigned IOCTL_SIOCBTDUMP = SIOCBTDUMP; +unsigned IOCTL_SIOCSBTSCOMTU = SIOCSBTSCOMTU; +unsigned IOCTL_SIOCGBTFEAT = SIOCGBTFEAT; +unsigned IOCTL_SIOCADNAT = SIOCADNAT; +unsigned IOCTL_SIOCRMNAT = SIOCRMNAT; +unsigned IOCTL_SIOCGNATS = SIOCGNATS; +unsigned IOCTL_SIOCGNATL = SIOCGNATL; +unsigned IOCTL_SIOCPURGENAT = SIOCPURGENAT; +unsigned IOCTL_SIOCSIFINFO_FLAGS = SIOCSIFINFO_FLAGS; +unsigned IOCTL_SIOCAADDRCTL_POLICY = SIOCAADDRCTL_POLICY; +unsigned IOCTL_SIOCDADDRCTL_POLICY = SIOCDADDRCTL_POLICY; +unsigned IOCTL_SMBIOC_OPENSESSION = SMBIOC_OPENSESSION; +unsigned IOCTL_SMBIOC_OPENSHARE = SMBIOC_OPENSHARE; +unsigned IOCTL_SMBIOC_REQUEST = SMBIOC_REQUEST; +unsigned IOCTL_SMBIOC_SETFLAGS = SMBIOC_SETFLAGS; +unsigned IOCTL_SMBIOC_LOOKUP = SMBIOC_LOOKUP; +unsigned IOCTL_SMBIOC_READ = SMBIOC_READ; +unsigned IOCTL_SMBIOC_WRITE = SMBIOC_WRITE; +unsigned IOCTL_AGPIOC_INFO = AGPIOC_INFO; +unsigned IOCTL_AGPIOC_ACQUIRE = AGPIOC_ACQUIRE; +unsigned IOCTL_AGPIOC_RELEASE = AGPIOC_RELEASE; +unsigned IOCTL_AGPIOC_SETUP = AGPIOC_SETUP; +unsigned IOCTL_AGPIOC_ALLOCATE = AGPIOC_ALLOCATE; +unsigned IOCTL_AGPIOC_DEALLOCATE = AGPIOC_DEALLOCATE; +unsigned IOCTL_AGPIOC_BIND = AGPIOC_BIND; +unsigned IOCTL_AGPIOC_UNBIND = AGPIOC_UNBIND; +unsigned IOCTL_AUDIO_GETINFO = AUDIO_GETINFO; +unsigned IOCTL_AUDIO_SETINFO = AUDIO_SETINFO; +unsigned IOCTL_AUDIO_DRAIN = AUDIO_DRAIN; +unsigned IOCTL_AUDIO_FLUSH = AUDIO_FLUSH; +unsigned IOCTL_AUDIO_WSEEK = AUDIO_WSEEK; +unsigned IOCTL_AUDIO_RERROR = AUDIO_RERROR; +unsigned IOCTL_AUDIO_GETDEV = AUDIO_GETDEV; +unsigned IOCTL_AUDIO_GETENC = AUDIO_GETENC; +unsigned IOCTL_AUDIO_GETFD = AUDIO_GETFD; +unsigned IOCTL_AUDIO_SETFD = AUDIO_SETFD; +unsigned IOCTL_AUDIO_PERROR = AUDIO_PERROR; +unsigned IOCTL_AUDIO_GETIOFFS = AUDIO_GETIOFFS; +unsigned IOCTL_AUDIO_GETOOFFS = AUDIO_GETOOFFS; +unsigned IOCTL_AUDIO_GETPROPS = AUDIO_GETPROPS; +unsigned IOCTL_AUDIO_GETBUFINFO = AUDIO_GETBUFINFO; +unsigned IOCTL_AUDIO_SETCHAN = AUDIO_SETCHAN; +unsigned IOCTL_AUDIO_GETCHAN = AUDIO_GETCHAN; +unsigned IOCTL_AUDIO_MIXER_READ = AUDIO_MIXER_READ; +unsigned IOCTL_AUDIO_MIXER_WRITE = AUDIO_MIXER_WRITE; +unsigned IOCTL_AUDIO_MIXER_DEVINFO = AUDIO_MIXER_DEVINFO; +unsigned IOCTL_ATAIOCCOMMAND = ATAIOCCOMMAND; +unsigned IOCTL_ATABUSIOSCAN = ATABUSIOSCAN; +unsigned IOCTL_ATABUSIORESET = ATABUSIORESET; +unsigned IOCTL_ATABUSIODETACH = ATABUSIODETACH; +unsigned IOCTL_CDIOCPLAYTRACKS = CDIOCPLAYTRACKS; +unsigned IOCTL_CDIOCPLAYBLOCKS = CDIOCPLAYBLOCKS; +unsigned IOCTL_CDIOCREADSUBCHANNEL = CDIOCREADSUBCHANNEL; +unsigned IOCTL_CDIOREADTOCHEADER = CDIOREADTOCHEADER; +unsigned IOCTL_CDIOREADTOCENTRIES = CDIOREADTOCENTRIES; +unsigned IOCTL_CDIOREADMSADDR = CDIOREADMSADDR; +unsigned IOCTL_CDIOCSETPATCH = CDIOCSETPATCH; +unsigned IOCTL_CDIOCGETVOL = CDIOCGETVOL; +unsigned IOCTL_CDIOCSETVOL = CDIOCSETVOL; +unsigned IOCTL_CDIOCSETMONO = CDIOCSETMONO; +unsigned IOCTL_CDIOCSETSTEREO = CDIOCSETSTEREO; +unsigned IOCTL_CDIOCSETMUTE = CDIOCSETMUTE; +unsigned IOCTL_CDIOCSETLEFT = CDIOCSETLEFT; +unsigned IOCTL_CDIOCSETRIGHT = CDIOCSETRIGHT; +unsigned IOCTL_CDIOCSETDEBUG = CDIOCSETDEBUG; +unsigned IOCTL_CDIOCCLRDEBUG = CDIOCCLRDEBUG; +unsigned IOCTL_CDIOCPAUSE = CDIOCPAUSE; +unsigned IOCTL_CDIOCRESUME = CDIOCRESUME; +unsigned IOCTL_CDIOCRESET = CDIOCRESET; +unsigned IOCTL_CDIOCSTART = CDIOCSTART; +unsigned IOCTL_CDIOCSTOP = CDIOCSTOP; +unsigned IOCTL_CDIOCEJECT = CDIOCEJECT; +unsigned IOCTL_CDIOCALLOW = CDIOCALLOW; +unsigned IOCTL_CDIOCPREVENT = CDIOCPREVENT; +unsigned IOCTL_CDIOCCLOSE = CDIOCCLOSE; +unsigned IOCTL_CDIOCPLAYMSF = CDIOCPLAYMSF; +unsigned IOCTL_CDIOCLOADUNLOAD = CDIOCLOADUNLOAD; +unsigned IOCTL_CHIOMOVE = CHIOMOVE; +unsigned IOCTL_CHIOEXCHANGE = CHIOEXCHANGE; +unsigned IOCTL_CHIOPOSITION = CHIOPOSITION; +unsigned IOCTL_CHIOGPICKER = CHIOGPICKER; +unsigned IOCTL_CHIOSPICKER = CHIOSPICKER; +unsigned IOCTL_CHIOGPARAMS = CHIOGPARAMS; +unsigned IOCTL_CHIOIELEM = CHIOIELEM; +unsigned IOCTL_OCHIOGSTATUS = OCHIOGSTATUS; +unsigned IOCTL_CHIOGSTATUS = CHIOGSTATUS; +unsigned IOCTL_CHIOSVOLTAG = CHIOSVOLTAG; +unsigned IOCTL_CLOCKCTL_SETTIMEOFDAY = CLOCKCTL_SETTIMEOFDAY; +unsigned IOCTL_CLOCKCTL_ADJTIME = CLOCKCTL_ADJTIME; +unsigned IOCTL_CLOCKCTL_CLOCK_SETTIME = CLOCKCTL_CLOCK_SETTIME; +unsigned IOCTL_CLOCKCTL_NTP_ADJTIME = CLOCKCTL_NTP_ADJTIME; +unsigned IOCTL_IOC_CPU_SETSTATE = IOC_CPU_SETSTATE; +unsigned IOCTL_IOC_CPU_GETSTATE = IOC_CPU_GETSTATE; +unsigned IOCTL_IOC_CPU_GETCOUNT = IOC_CPU_GETCOUNT; +unsigned IOCTL_IOC_CPU_MAPID = IOC_CPU_MAPID; +unsigned IOCTL_IOC_CPU_UCODE_GET_VERSION = IOC_CPU_UCODE_GET_VERSION; +unsigned IOCTL_IOC_CPU_UCODE_APPLY = IOC_CPU_UCODE_APPLY; +unsigned IOCTL_DIOCGDINFO = DIOCGDINFO; +unsigned IOCTL_DIOCSDINFO = DIOCSDINFO; +unsigned IOCTL_DIOCWDINFO = DIOCWDINFO; +unsigned IOCTL_DIOCRFORMAT = DIOCRFORMAT; +unsigned IOCTL_DIOCWFORMAT = DIOCWFORMAT; +unsigned IOCTL_DIOCSSTEP = DIOCSSTEP; +unsigned IOCTL_DIOCSRETRIES = DIOCSRETRIES; +unsigned IOCTL_DIOCKLABEL = DIOCKLABEL; +unsigned IOCTL_DIOCWLABEL = DIOCWLABEL; +unsigned IOCTL_DIOCSBAD = DIOCSBAD; +unsigned IOCTL_DIOCEJECT = DIOCEJECT; +unsigned IOCTL_ODIOCEJECT = ODIOCEJECT; +unsigned IOCTL_DIOCLOCK = DIOCLOCK; +unsigned IOCTL_DIOCGDEFLABEL = DIOCGDEFLABEL; +unsigned IOCTL_DIOCCLRLABEL = DIOCCLRLABEL; +unsigned IOCTL_DIOCGCACHE = DIOCGCACHE; +unsigned IOCTL_DIOCSCACHE = DIOCSCACHE; +unsigned IOCTL_DIOCCACHESYNC = DIOCCACHESYNC; +unsigned IOCTL_DIOCBSLIST = DIOCBSLIST; +unsigned IOCTL_DIOCBSFLUSH = DIOCBSFLUSH; +unsigned IOCTL_DIOCAWEDGE = DIOCAWEDGE; +unsigned IOCTL_DIOCGWEDGEINFO = DIOCGWEDGEINFO; +unsigned IOCTL_DIOCDWEDGE = DIOCDWEDGE; +unsigned IOCTL_DIOCLWEDGES = DIOCLWEDGES; +unsigned IOCTL_DIOCGSTRATEGY = DIOCGSTRATEGY; +unsigned IOCTL_DIOCSSTRATEGY = DIOCSSTRATEGY; +unsigned IOCTL_DIOCGDISKINFO = DIOCGDISKINFO; +unsigned IOCTL_DIOCTUR = DIOCTUR; +unsigned IOCTL_DIOCMWEDGES = DIOCMWEDGES; +unsigned IOCTL_DIOCGSECTORSIZE = DIOCGSECTORSIZE; +unsigned IOCTL_DIOCGMEDIASIZE = DIOCGMEDIASIZE; +unsigned IOCTL_DRVDETACHDEV = DRVDETACHDEV; +unsigned IOCTL_DRVRESCANBUS = DRVRESCANBUS; +unsigned IOCTL_DRVCTLCOMMAND = DRVCTLCOMMAND; +unsigned IOCTL_DRVRESUMEDEV = DRVRESUMEDEV; +unsigned IOCTL_DRVLISTDEV = DRVLISTDEV; +unsigned IOCTL_DRVGETEVENT = DRVGETEVENT; +unsigned IOCTL_DRVSUSPENDDEV = DRVSUSPENDDEV; +unsigned IOCTL_DVD_READ_STRUCT = DVD_READ_STRUCT; +unsigned IOCTL_DVD_WRITE_STRUCT = DVD_WRITE_STRUCT; +unsigned IOCTL_DVD_AUTH = DVD_AUTH; +unsigned IOCTL_ENVSYS_GETDICTIONARY = ENVSYS_GETDICTIONARY; +unsigned IOCTL_ENVSYS_SETDICTIONARY = ENVSYS_SETDICTIONARY; +unsigned IOCTL_ENVSYS_REMOVEPROPS = ENVSYS_REMOVEPROPS; +unsigned IOCTL_ENVSYS_GTREDATA = ENVSYS_GTREDATA; +unsigned IOCTL_ENVSYS_GTREINFO = ENVSYS_GTREINFO; +unsigned IOCTL_KFILTER_BYFILTER = KFILTER_BYFILTER; +unsigned IOCTL_KFILTER_BYNAME = KFILTER_BYNAME; +unsigned IOCTL_FDIOCGETOPTS = FDIOCGETOPTS; +unsigned IOCTL_FDIOCSETOPTS = FDIOCSETOPTS; +unsigned IOCTL_FDIOCSETFORMAT = FDIOCSETFORMAT; +unsigned IOCTL_FDIOCGETFORMAT = FDIOCGETFORMAT; +unsigned IOCTL_FDIOCFORMAT_TRACK = FDIOCFORMAT_TRACK; unsigned IOCTL_FIOCLEX = FIOCLEX; -unsigned IOCTL_FIOGETOWN = FIOGETOWN; -unsigned IOCTL_FIONBIO = FIONBIO; unsigned IOCTL_FIONCLEX = FIONCLEX; +unsigned IOCTL_FIONREAD = FIONREAD; +unsigned IOCTL_FIONBIO = FIONBIO; +unsigned IOCTL_FIOASYNC = FIOASYNC; unsigned IOCTL_FIOSETOWN = FIOSETOWN; -unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI; +unsigned IOCTL_FIOGETOWN = FIOGETOWN; +unsigned IOCTL_OFIOGETBMAP = OFIOGETBMAP; +unsigned IOCTL_FIOGETBMAP = FIOGETBMAP; +unsigned IOCTL_FIONWRITE = FIONWRITE; +unsigned IOCTL_FIONSPACE = FIONSPACE; +unsigned IOCTL_GPIOINFO = GPIOINFO; +unsigned IOCTL_GPIOSET = GPIOSET; +unsigned IOCTL_GPIOUNSET = GPIOUNSET; +unsigned IOCTL_GPIOREAD = GPIOREAD; +unsigned IOCTL_GPIOWRITE = GPIOWRITE; +unsigned IOCTL_GPIOTOGGLE = GPIOTOGGLE; +unsigned IOCTL_GPIOATTACH = GPIOATTACH; +unsigned IOCTL_PTIOCNETBSD = PTIOCNETBSD; +unsigned IOCTL_PTIOCSUNOS = PTIOCSUNOS; +unsigned IOCTL_PTIOCLINUX = PTIOCLINUX; +unsigned IOCTL_PTIOCFREEBSD = PTIOCFREEBSD; +unsigned IOCTL_PTIOCULTRIX = PTIOCULTRIX; +unsigned IOCTL_TIOCHPCL = TIOCHPCL; +unsigned IOCTL_TIOCGETP = TIOCGETP; +unsigned IOCTL_TIOCSETP = TIOCSETP; +unsigned IOCTL_TIOCSETN = TIOCSETN; +unsigned IOCTL_TIOCSETC = TIOCSETC; +unsigned IOCTL_TIOCGETC = TIOCGETC; +unsigned IOCTL_TIOCLBIS = TIOCLBIS; +unsigned IOCTL_TIOCLBIC = TIOCLBIC; +unsigned IOCTL_TIOCLSET = TIOCLSET; +unsigned IOCTL_TIOCLGET = TIOCLGET; +unsigned IOCTL_TIOCSLTC = TIOCSLTC; +unsigned IOCTL_TIOCGLTC = TIOCGLTC; +unsigned IOCTL_OTIOCCONS = OTIOCCONS; +unsigned IOCTL_JOY_SETTIMEOUT = JOY_SETTIMEOUT; +unsigned IOCTL_JOY_GETTIMEOUT = JOY_GETTIMEOUT; +unsigned IOCTL_JOY_SET_X_OFFSET = JOY_SET_X_OFFSET; +unsigned IOCTL_JOY_SET_Y_OFFSET = JOY_SET_Y_OFFSET; +unsigned IOCTL_JOY_GET_X_OFFSET = JOY_GET_X_OFFSET; +unsigned IOCTL_JOY_GET_Y_OFFSET = JOY_GET_Y_OFFSET; +unsigned IOCTL_OKIOCGSYMBOL = OKIOCGSYMBOL; +unsigned IOCTL_OKIOCGVALUE = OKIOCGVALUE; +unsigned IOCTL_KIOCGSIZE = KIOCGSIZE; +unsigned IOCTL_KIOCGVALUE = KIOCGVALUE; +unsigned IOCTL_KIOCGSYMBOL = KIOCGSYMBOL; +unsigned IOCTL_LUAINFO = LUAINFO; +unsigned IOCTL_LUACREATE = LUACREATE; +unsigned IOCTL_LUADESTROY = LUADESTROY; +unsigned IOCTL_LUAREQUIRE = LUAREQUIRE; +unsigned IOCTL_LUALOAD = LUALOAD; +unsigned IOCTL_MIDI_PRETIME = MIDI_PRETIME; +unsigned IOCTL_MIDI_MPUMODE = MIDI_MPUMODE; +unsigned IOCTL_MIDI_MPUCMD = MIDI_MPUCMD; +unsigned IOCTL_SEQUENCER_RESET = SEQUENCER_RESET; +unsigned IOCTL_SEQUENCER_SYNC = SEQUENCER_SYNC; +unsigned IOCTL_SEQUENCER_INFO = SEQUENCER_INFO; +unsigned IOCTL_SEQUENCER_CTRLRATE = SEQUENCER_CTRLRATE; +unsigned IOCTL_SEQUENCER_GETOUTCOUNT = SEQUENCER_GETOUTCOUNT; +unsigned IOCTL_SEQUENCER_GETINCOUNT = SEQUENCER_GETINCOUNT; +unsigned IOCTL_SEQUENCER_RESETSAMPLES = SEQUENCER_RESETSAMPLES; +unsigned IOCTL_SEQUENCER_NRSYNTHS = SEQUENCER_NRSYNTHS; +unsigned IOCTL_SEQUENCER_NRMIDIS = SEQUENCER_NRMIDIS; +unsigned IOCTL_SEQUENCER_THRESHOLD = SEQUENCER_THRESHOLD; +unsigned IOCTL_SEQUENCER_MEMAVL = SEQUENCER_MEMAVL; +unsigned IOCTL_SEQUENCER_PANIC = SEQUENCER_PANIC; +unsigned IOCTL_SEQUENCER_OUTOFBAND = SEQUENCER_OUTOFBAND; +unsigned IOCTL_SEQUENCER_GETTIME = SEQUENCER_GETTIME; +unsigned IOCTL_SEQUENCER_TMR_TIMEBASE = SEQUENCER_TMR_TIMEBASE; +unsigned IOCTL_SEQUENCER_TMR_START = SEQUENCER_TMR_START; +unsigned IOCTL_SEQUENCER_TMR_STOP = SEQUENCER_TMR_STOP; +unsigned IOCTL_SEQUENCER_TMR_CONTINUE = SEQUENCER_TMR_CONTINUE; +unsigned IOCTL_SEQUENCER_TMR_TEMPO = SEQUENCER_TMR_TEMPO; +unsigned IOCTL_SEQUENCER_TMR_SOURCE = SEQUENCER_TMR_SOURCE; +unsigned IOCTL_SEQUENCER_TMR_METRONOME = SEQUENCER_TMR_METRONOME; +unsigned IOCTL_SEQUENCER_TMR_SELECT = SEQUENCER_TMR_SELECT; +unsigned IOCTL_MTIOCTOP = MTIOCTOP; +unsigned IOCTL_MTIOCGET = MTIOCGET; +unsigned IOCTL_MTIOCIEOT = MTIOCIEOT; +unsigned IOCTL_MTIOCEEOT = MTIOCEEOT; +unsigned IOCTL_MTIOCRDSPOS = MTIOCRDSPOS; +unsigned IOCTL_MTIOCRDHPOS = MTIOCRDHPOS; +unsigned IOCTL_MTIOCSLOCATE = MTIOCSLOCATE; +unsigned IOCTL_MTIOCHLOCATE = MTIOCHLOCATE; +unsigned IOCTL_POWER_EVENT_RECVDICT = POWER_EVENT_RECVDICT; +unsigned IOCTL_POWER_IOC_GET_TYPE = POWER_IOC_GET_TYPE; +unsigned IOCTL_POWER_IOC_GET_TYPE_WITH_LOSSAGE = + POWER_IOC_GET_TYPE_WITH_LOSSAGE; +unsigned IOCTL_RIOCGINFO = RIOCGINFO; +unsigned IOCTL_RIOCSINFO = RIOCSINFO; +unsigned IOCTL_RIOCSSRCH = RIOCSSRCH; +unsigned IOCTL_RNDGETENTCNT = RNDGETENTCNT; +unsigned IOCTL_RNDGETSRCNUM = RNDGETSRCNUM; +unsigned IOCTL_RNDGETSRCNAME = RNDGETSRCNAME; +unsigned IOCTL_RNDCTL = RNDCTL; +unsigned IOCTL_RNDADDDATA = RNDADDDATA; +unsigned IOCTL_RNDGETPOOLSTAT = RNDGETPOOLSTAT; +unsigned IOCTL_RNDGETESTNUM = RNDGETESTNUM; +unsigned IOCTL_RNDGETESTNAME = RNDGETESTNAME; +unsigned IOCTL_SCIOCGET = SCIOCGET; +unsigned IOCTL_SCIOCSET = SCIOCSET; +unsigned IOCTL_SCIOCRESTART = SCIOCRESTART; +unsigned IOCTL_SCIOC_USE_ADF = SCIOC_USE_ADF; +unsigned IOCTL_SCIOCCOMMAND = SCIOCCOMMAND; +unsigned IOCTL_SCIOCDEBUG = SCIOCDEBUG; +unsigned IOCTL_SCIOCIDENTIFY = SCIOCIDENTIFY; +unsigned IOCTL_OSCIOCIDENTIFY = OSCIOCIDENTIFY; +unsigned IOCTL_SCIOCDECONFIG = SCIOCDECONFIG; +unsigned IOCTL_SCIOCRECONFIG = SCIOCRECONFIG; +unsigned IOCTL_SCIOCRESET = SCIOCRESET; +unsigned IOCTL_SCBUSIOSCAN = SCBUSIOSCAN; +unsigned IOCTL_SCBUSIORESET = SCBUSIORESET; +unsigned IOCTL_SCBUSIODETACH = SCBUSIODETACH; +unsigned IOCTL_SCBUSACCEL = SCBUSACCEL; +unsigned IOCTL_SCBUSIOLLSCAN = SCBUSIOLLSCAN; +unsigned IOCTL_SIOCSHIWAT = SIOCSHIWAT; +unsigned IOCTL_SIOCGHIWAT = SIOCGHIWAT; +unsigned IOCTL_SIOCSLOWAT = SIOCSLOWAT; +unsigned IOCTL_SIOCGLOWAT = SIOCGLOWAT; unsigned IOCTL_SIOCATMARK = SIOCATMARK; -unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI; -unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR; -unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR; -unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF; -unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR; -unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS; -unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC; -unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU; -unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK; +unsigned IOCTL_SIOCSPGRP = SIOCSPGRP; unsigned IOCTL_SIOCGPGRP = SIOCGPGRP; +unsigned IOCTL_SIOCADDRT = SIOCADDRT; +unsigned IOCTL_SIOCDELRT = SIOCDELRT; unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR; -unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR; +unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR; unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR; +unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR; unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS; +unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS; +unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR; +unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR; +unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF; +unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK; +unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK; +unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC; unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC; +unsigned IOCTL_SIOCDIFADDR = SIOCDIFADDR; +unsigned IOCTL_SIOCAIFADDR = SIOCAIFADDR; +unsigned IOCTL_SIOCGIFALIAS = SIOCGIFALIAS; +unsigned IOCTL_SIOCGIFAFLAG_IN = SIOCGIFAFLAG_IN; +unsigned IOCTL_SIOCALIFADDR = SIOCALIFADDR; +unsigned IOCTL_SIOCGLIFADDR = SIOCGLIFADDR; +unsigned IOCTL_SIOCDLIFADDR = SIOCDLIFADDR; +unsigned IOCTL_SIOCSIFADDRPREF = SIOCSIFADDRPREF; +unsigned IOCTL_SIOCGIFADDRPREF = SIOCGIFADDRPREF; +unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI; +unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI; +unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT; +unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT; +unsigned IOCTL_SIOCSIFMEDIA = SIOCSIFMEDIA; +unsigned IOCTL_SIOCGIFMEDIA = SIOCGIFMEDIA; +unsigned IOCTL_SIOCSIFGENERIC = SIOCSIFGENERIC; +unsigned IOCTL_SIOCGIFGENERIC = SIOCGIFGENERIC; +unsigned IOCTL_SIOCSIFPHYADDR = SIOCSIFPHYADDR; +unsigned IOCTL_SIOCGIFPSRCADDR = SIOCGIFPSRCADDR; +unsigned IOCTL_SIOCGIFPDSTADDR = SIOCGIFPDSTADDR; +unsigned IOCTL_SIOCDIFPHYADDR = SIOCDIFPHYADDR; +unsigned IOCTL_SIOCSLIFPHYADDR = SIOCSLIFPHYADDR; +unsigned IOCTL_SIOCGLIFPHYADDR = SIOCGLIFPHYADDR; unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU; -unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK; -unsigned IOCTL_SIOCSPGRP = SIOCSPGRP; -unsigned IOCTL_TIOCCONS = TIOCCONS; +unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU; +unsigned IOCTL_SIOCSDRVSPEC = SIOCSDRVSPEC; +unsigned IOCTL_SIOCGDRVSPEC = SIOCGDRVSPEC; +unsigned IOCTL_SIOCIFCREATE = SIOCIFCREATE; +unsigned IOCTL_SIOCIFDESTROY = SIOCIFDESTROY; +unsigned IOCTL_SIOCIFGCLONERS = SIOCIFGCLONERS; +unsigned IOCTL_SIOCGIFDLT = SIOCGIFDLT; +unsigned IOCTL_SIOCGIFCAP = SIOCGIFCAP; +unsigned IOCTL_SIOCSIFCAP = SIOCSIFCAP; +unsigned IOCTL_SIOCSVH = SIOCSVH; +unsigned IOCTL_SIOCGVH = SIOCGVH; +unsigned IOCTL_SIOCINITIFADDR = SIOCINITIFADDR; +unsigned IOCTL_SIOCGIFDATA = SIOCGIFDATA; +unsigned IOCTL_SIOCZIFDATA = SIOCZIFDATA; +unsigned IOCTL_SIOCGLINKSTR = SIOCGLINKSTR; +unsigned IOCTL_SIOCSLINKSTR = SIOCSLINKSTR; +unsigned IOCTL_SIOCGETHERCAP = SIOCGETHERCAP; +unsigned IOCTL_SIOCGIFINDEX = SIOCGIFINDEX; +unsigned IOCTL_SIOCSETPFSYNC = SIOCSETPFSYNC; +unsigned IOCTL_SIOCGETPFSYNC = SIOCGETPFSYNC; +unsigned IOCTL_PPS_IOC_CREATE = PPS_IOC_CREATE; +unsigned IOCTL_PPS_IOC_DESTROY = PPS_IOC_DESTROY; +unsigned IOCTL_PPS_IOC_SETPARAMS = PPS_IOC_SETPARAMS; +unsigned IOCTL_PPS_IOC_GETPARAMS = PPS_IOC_GETPARAMS; +unsigned IOCTL_PPS_IOC_GETCAP = PPS_IOC_GETCAP; +unsigned IOCTL_PPS_IOC_FETCH = PPS_IOC_FETCH; +unsigned IOCTL_PPS_IOC_KCBIND = PPS_IOC_KCBIND; unsigned IOCTL_TIOCEXCL = TIOCEXCL; -unsigned IOCTL_TIOCGETD = TIOCGETD; -unsigned IOCTL_TIOCGPGRP = TIOCGPGRP; -unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ; -unsigned IOCTL_TIOCMBIC = TIOCMBIC; -unsigned IOCTL_TIOCMBIS = TIOCMBIS; -unsigned IOCTL_TIOCMGET = TIOCMGET; -unsigned IOCTL_TIOCMSET = TIOCMSET; -unsigned IOCTL_TIOCNOTTY = TIOCNOTTY; unsigned IOCTL_TIOCNXCL = TIOCNXCL; -unsigned IOCTL_TIOCOUTQ = TIOCOUTQ; -unsigned IOCTL_TIOCPKT = TIOCPKT; -unsigned IOCTL_TIOCSCTTY = TIOCSCTTY; +unsigned IOCTL_TIOCFLUSH = TIOCFLUSH; +unsigned IOCTL_TIOCGETA = TIOCGETA; +unsigned IOCTL_TIOCSETA = TIOCSETA; +unsigned IOCTL_TIOCSETAW = TIOCSETAW; +unsigned IOCTL_TIOCSETAF = TIOCSETAF; +unsigned IOCTL_TIOCGETD = TIOCGETD; unsigned IOCTL_TIOCSETD = TIOCSETD; +unsigned IOCTL_TIOCGLINED = TIOCGLINED; +unsigned IOCTL_TIOCSLINED = TIOCSLINED; +unsigned IOCTL_TIOCSBRK = TIOCSBRK; +unsigned IOCTL_TIOCCBRK = TIOCCBRK; +unsigned IOCTL_TIOCSDTR = TIOCSDTR; +unsigned IOCTL_TIOCCDTR = TIOCCDTR; +unsigned IOCTL_TIOCGPGRP = TIOCGPGRP; unsigned IOCTL_TIOCSPGRP = TIOCSPGRP; +unsigned IOCTL_TIOCOUTQ = TIOCOUTQ; unsigned IOCTL_TIOCSTI = TIOCSTI; +unsigned IOCTL_TIOCNOTTY = TIOCNOTTY; +unsigned IOCTL_TIOCPKT = TIOCPKT; +unsigned IOCTL_TIOCSTOP = TIOCSTOP; +unsigned IOCTL_TIOCSTART = TIOCSTART; +unsigned IOCTL_TIOCMSET = TIOCMSET; +unsigned IOCTL_TIOCMBIS = TIOCMBIS; +unsigned IOCTL_TIOCMBIC = TIOCMBIC; +unsigned IOCTL_TIOCMGET = TIOCMGET; +unsigned IOCTL_TIOCREMOTE = TIOCREMOTE; +unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ; unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ; -unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT; -unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT; +unsigned IOCTL_TIOCUCNTL = TIOCUCNTL; +unsigned IOCTL_TIOCSTAT = TIOCSTAT; +unsigned IOCTL_TIOCGSID = TIOCGSID; +unsigned IOCTL_TIOCCONS = TIOCCONS; +unsigned IOCTL_TIOCSCTTY = TIOCSCTTY; +unsigned IOCTL_TIOCEXT = TIOCEXT; +unsigned IOCTL_TIOCSIG = TIOCSIG; +unsigned IOCTL_TIOCDRAIN = TIOCDRAIN; +unsigned IOCTL_TIOCGFLAGS = TIOCGFLAGS; +unsigned IOCTL_TIOCSFLAGS = TIOCSFLAGS; +unsigned IOCTL_TIOCDCDTIMESTAMP = TIOCDCDTIMESTAMP; +unsigned IOCTL_TIOCRCVFRAME = TIOCRCVFRAME; +unsigned IOCTL_TIOCXMTFRAME = TIOCXMTFRAME; +unsigned IOCTL_TIOCPTMGET = TIOCPTMGET; +unsigned IOCTL_TIOCGRANTPT = TIOCGRANTPT; +unsigned IOCTL_TIOCPTSNAME = TIOCPTSNAME; +unsigned IOCTL_TIOCSQSIZE = TIOCSQSIZE; +unsigned IOCTL_TIOCGQSIZE = TIOCGQSIZE; +unsigned IOCTL_VERIEXEC_LOAD = VERIEXEC_LOAD; +unsigned IOCTL_VERIEXEC_TABLESIZE = VERIEXEC_TABLESIZE; +unsigned IOCTL_VERIEXEC_DELETE = VERIEXEC_DELETE; +unsigned IOCTL_VERIEXEC_QUERY = VERIEXEC_QUERY; +unsigned IOCTL_VERIEXEC_DUMP = VERIEXEC_DUMP; +unsigned IOCTL_VERIEXEC_FLUSH = VERIEXEC_FLUSH; +unsigned IOCTL_VIDIOC_QUERYCAP = VIDIOC_QUERYCAP; +unsigned IOCTL_VIDIOC_RESERVED = VIDIOC_RESERVED; +unsigned IOCTL_VIDIOC_ENUM_FMT = VIDIOC_ENUM_FMT; +unsigned IOCTL_VIDIOC_G_FMT = VIDIOC_G_FMT; +unsigned IOCTL_VIDIOC_S_FMT = VIDIOC_S_FMT; +unsigned IOCTL_VIDIOC_REQBUFS = VIDIOC_REQBUFS; +unsigned IOCTL_VIDIOC_QUERYBUF = VIDIOC_QUERYBUF; +unsigned IOCTL_VIDIOC_G_FBUF = VIDIOC_G_FBUF; +unsigned IOCTL_VIDIOC_S_FBUF = VIDIOC_S_FBUF; +unsigned IOCTL_VIDIOC_OVERLAY = VIDIOC_OVERLAY; +unsigned IOCTL_VIDIOC_QBUF = VIDIOC_QBUF; +unsigned IOCTL_VIDIOC_DQBUF = VIDIOC_DQBUF; +unsigned IOCTL_VIDIOC_STREAMON = VIDIOC_STREAMON; +unsigned IOCTL_VIDIOC_STREAMOFF = VIDIOC_STREAMOFF; +unsigned IOCTL_VIDIOC_G_PARM = VIDIOC_G_PARM; +unsigned IOCTL_VIDIOC_S_PARM = VIDIOC_S_PARM; +unsigned IOCTL_VIDIOC_G_STD = VIDIOC_G_STD; +unsigned IOCTL_VIDIOC_S_STD = VIDIOC_S_STD; +unsigned IOCTL_VIDIOC_ENUMSTD = VIDIOC_ENUMSTD; +unsigned IOCTL_VIDIOC_ENUMINPUT = VIDIOC_ENUMINPUT; +unsigned IOCTL_VIDIOC_G_CTRL = VIDIOC_G_CTRL; +unsigned IOCTL_VIDIOC_S_CTRL = VIDIOC_S_CTRL; +unsigned IOCTL_VIDIOC_G_TUNER = VIDIOC_G_TUNER; +unsigned IOCTL_VIDIOC_S_TUNER = VIDIOC_S_TUNER; +unsigned IOCTL_VIDIOC_G_AUDIO = VIDIOC_G_AUDIO; +unsigned IOCTL_VIDIOC_S_AUDIO = VIDIOC_S_AUDIO; +unsigned IOCTL_VIDIOC_QUERYCTRL = VIDIOC_QUERYCTRL; +unsigned IOCTL_VIDIOC_QUERYMENU = VIDIOC_QUERYMENU; +unsigned IOCTL_VIDIOC_G_INPUT = VIDIOC_G_INPUT; +unsigned IOCTL_VIDIOC_S_INPUT = VIDIOC_S_INPUT; +unsigned IOCTL_VIDIOC_G_OUTPUT = VIDIOC_G_OUTPUT; +unsigned IOCTL_VIDIOC_S_OUTPUT = VIDIOC_S_OUTPUT; +unsigned IOCTL_VIDIOC_ENUMOUTPUT = VIDIOC_ENUMOUTPUT; +unsigned IOCTL_VIDIOC_G_AUDOUT = VIDIOC_G_AUDOUT; +unsigned IOCTL_VIDIOC_S_AUDOUT = VIDIOC_S_AUDOUT; +unsigned IOCTL_VIDIOC_G_MODULATOR = VIDIOC_G_MODULATOR; +unsigned IOCTL_VIDIOC_S_MODULATOR = VIDIOC_S_MODULATOR; +unsigned IOCTL_VIDIOC_G_FREQUENCY = VIDIOC_G_FREQUENCY; +unsigned IOCTL_VIDIOC_S_FREQUENCY = VIDIOC_S_FREQUENCY; +unsigned IOCTL_VIDIOC_CROPCAP = VIDIOC_CROPCAP; +unsigned IOCTL_VIDIOC_G_CROP = VIDIOC_G_CROP; +unsigned IOCTL_VIDIOC_S_CROP = VIDIOC_S_CROP; +unsigned IOCTL_VIDIOC_G_JPEGCOMP = VIDIOC_G_JPEGCOMP; +unsigned IOCTL_VIDIOC_S_JPEGCOMP = VIDIOC_S_JPEGCOMP; +unsigned IOCTL_VIDIOC_QUERYSTD = VIDIOC_QUERYSTD; +unsigned IOCTL_VIDIOC_TRY_FMT = VIDIOC_TRY_FMT; +unsigned IOCTL_VIDIOC_ENUMAUDIO = VIDIOC_ENUMAUDIO; +unsigned IOCTL_VIDIOC_ENUMAUDOUT = VIDIOC_ENUMAUDOUT; +unsigned IOCTL_VIDIOC_G_PRIORITY = VIDIOC_G_PRIORITY; +unsigned IOCTL_VIDIOC_S_PRIORITY = VIDIOC_S_PRIORITY; +unsigned IOCTL_VIDIOC_ENUM_FRAMESIZES = VIDIOC_ENUM_FRAMESIZES; +unsigned IOCTL_VIDIOC_ENUM_FRAMEINTERVALS = VIDIOC_ENUM_FRAMEINTERVALS; +unsigned IOCTL_WDOGIOC_GMODE = WDOGIOC_GMODE; +unsigned IOCTL_WDOGIOC_SMODE = WDOGIOC_SMODE; +unsigned IOCTL_WDOGIOC_WHICH = WDOGIOC_WHICH; +unsigned IOCTL_WDOGIOC_TICKLE = WDOGIOC_TICKLE; +unsigned IOCTL_WDOGIOC_GTICKLER = WDOGIOC_GTICKLER; +unsigned IOCTL_WDOGIOC_GWDOGS = WDOGIOC_GWDOGS; +unsigned IOCTL_SNDCTL_DSP_RESET = SNDCTL_DSP_RESET; +unsigned IOCTL_SNDCTL_DSP_SYNC = SNDCTL_DSP_SYNC; +unsigned IOCTL_SNDCTL_DSP_SPEED = SNDCTL_DSP_SPEED; +unsigned IOCTL_SOUND_PCM_READ_RATE = SOUND_PCM_READ_RATE; +unsigned IOCTL_SNDCTL_DSP_STEREO = SNDCTL_DSP_STEREO; +unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE; +unsigned IOCTL_SNDCTL_DSP_SETFMT = SNDCTL_DSP_SETFMT; +unsigned IOCTL_SOUND_PCM_READ_BITS = SOUND_PCM_READ_BITS; +unsigned IOCTL_SNDCTL_DSP_CHANNELS = SNDCTL_DSP_CHANNELS; +unsigned IOCTL_SOUND_PCM_READ_CHANNELS = SOUND_PCM_READ_CHANNELS; +unsigned IOCTL_SOUND_PCM_WRITE_FILTER = SOUND_PCM_WRITE_FILTER; +unsigned IOCTL_SOUND_PCM_READ_FILTER = SOUND_PCM_READ_FILTER; +unsigned IOCTL_SNDCTL_DSP_POST = SNDCTL_DSP_POST; +unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE = SNDCTL_DSP_SUBDIVIDE; +unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT = SNDCTL_DSP_SETFRAGMENT; +unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS; +unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE; +unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE; +unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK; +unsigned IOCTL_SNDCTL_DSP_GETCAPS = SNDCTL_DSP_GETCAPS; +unsigned IOCTL_SNDCTL_DSP_GETTRIGGER = SNDCTL_DSP_GETTRIGGER; +unsigned IOCTL_SNDCTL_DSP_SETTRIGGER = SNDCTL_DSP_SETTRIGGER; +unsigned IOCTL_SNDCTL_DSP_GETIPTR = SNDCTL_DSP_GETIPTR; +unsigned IOCTL_SNDCTL_DSP_GETOPTR = SNDCTL_DSP_GETOPTR; +unsigned IOCTL_SNDCTL_DSP_MAPINBUF = SNDCTL_DSP_MAPINBUF; +unsigned IOCTL_SNDCTL_DSP_MAPOUTBUF = SNDCTL_DSP_MAPOUTBUF; +unsigned IOCTL_SNDCTL_DSP_SETSYNCRO = SNDCTL_DSP_SETSYNCRO; +unsigned IOCTL_SNDCTL_DSP_SETDUPLEX = SNDCTL_DSP_SETDUPLEX; +unsigned IOCTL_SNDCTL_DSP_PROFILE = SNDCTL_DSP_PROFILE; +unsigned IOCTL_SNDCTL_DSP_GETODELAY = SNDCTL_DSP_GETODELAY; +unsigned IOCTL_SOUND_MIXER_INFO = SOUND_MIXER_INFO; +unsigned IOCTL_SOUND_OLD_MIXER_INFO = SOUND_OLD_MIXER_INFO; +unsigned IOCTL_OSS_GETVERSION = OSS_GETVERSION; +unsigned IOCTL_SNDCTL_SYSINFO = SNDCTL_SYSINFO; +unsigned IOCTL_SNDCTL_AUDIOINFO = SNDCTL_AUDIOINFO; +unsigned IOCTL_SNDCTL_ENGINEINFO = SNDCTL_ENGINEINFO; +unsigned IOCTL_SNDCTL_DSP_GETPLAYVOL = SNDCTL_DSP_GETPLAYVOL; +unsigned IOCTL_SNDCTL_DSP_SETPLAYVOL = SNDCTL_DSP_SETPLAYVOL; +unsigned IOCTL_SNDCTL_DSP_GETRECVOL = SNDCTL_DSP_GETRECVOL; +unsigned IOCTL_SNDCTL_DSP_SETRECVOL = SNDCTL_DSP_SETRECVOL; +unsigned IOCTL_SNDCTL_DSP_SKIP = SNDCTL_DSP_SKIP; +unsigned IOCTL_SNDCTL_DSP_SILENCE = SNDCTL_DSP_SILENCE; const int si_SEGV_MAPERR = SEGV_MAPERR; const int si_SEGV_ACCERR = SEGV_ACCERR; diff --git a/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h b/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h index 6823004d7b41..1718e3b1e8ea 100644 --- a/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h +++ b/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h @@ -57,33 +57,36 @@ extern unsigned ucontext_t_sz; extern unsigned struct_rlimit_sz; extern unsigned struct_utimbuf_sz; extern unsigned struct_timespec_sz; +extern unsigned struct_sembuf_sz; -struct __sanitizer_iocb { +extern unsigned struct_kevent_sz; + +union __sanitizer_sigval { + int sival_int; + uptr sival_ptr; +}; + +struct __sanitizer_sigevent { + int sigev_notify; + int sigev_signo; + union __sanitizer_sigval sigev_value; + uptr sigev_notify_function; + uptr sigev_notify_attributes; +}; + +struct __sanitizer_aiocb { u64 aio_offset; uptr aio_buf; - long aio_nbytes; - u32 aio_fildes; - u32 aio_lio_opcode; - long aio_reqprio; -#if SANITIZER_WORDSIZE == 64 - u8 aio_sigevent[32]; -#else - u8 aio_sigevent[20]; -#endif - u32 _state; - u32 _errno; + uptr aio_nbytes; + int aio_fildes; + int aio_lio_opcode; + int aio_reqprio; + struct __sanitizer_sigevent aio_sigevent; + int _state; + int _errno; long _retval; }; -struct __sanitizer___sysctl_args { - int *name; - int nlen; - void *oldval; - uptr *oldlenp; - void *newval; - uptr newlen; -}; - struct __sanitizer_sem_t { uptr data[5]; }; @@ -110,6 +113,19 @@ struct __sanitizer_shmid_ds { void *_shm_internal; }; +struct __sanitizer_protoent { + char *p_name; + char **p_aliases; + int p_proto; +}; + +struct __sanitizer_netent { + char *n_name; + char **n_aliases; + int n_addrtype; + u32 n_net; +}; + extern unsigned struct_msqid_ds_sz; extern unsigned struct_mq_attr_sz; extern unsigned struct_timex_sz; @@ -131,9 +147,27 @@ struct __sanitizer_ifaddrs { unsigned int ifa_addrflags; }; +typedef unsigned int __sanitizer_socklen_t; + typedef unsigned __sanitizer_pthread_key_t; typedef long long __sanitizer_time_t; +typedef int __sanitizer_suseconds_t; + +struct __sanitizer_timeval { + __sanitizer_time_t tv_sec; + __sanitizer_suseconds_t tv_usec; +}; + +struct __sanitizer_itimerval { + struct __sanitizer_timeval it_interval; + struct __sanitizer_timeval it_value; +}; + +struct __sanitizer_timespec { + __sanitizer_time_t tv_sec; + long tv_nsec; +}; struct __sanitizer_passwd { char *pw_name; @@ -189,6 +223,12 @@ struct __sanitizer_msghdr { unsigned msg_controllen; int msg_flags; }; + +struct __sanitizer_mmsghdr { + struct __sanitizer_msghdr msg_hdr; + unsigned int msg_len; +}; + struct __sanitizer_cmsghdr { unsigned cmsg_len; int cmsg_level; @@ -241,6 +281,22 @@ struct __sanitizer_sigaction { int sa_flags; }; +extern unsigned struct_sigaltstack_sz; + +typedef unsigned int __sanitizer_sigset13_t; + +struct __sanitizer_sigaction13 { + __sanitizer_sighandler_ptr osa_handler; + __sanitizer_sigset13_t osa_mask; + int osa_flags; +}; + +struct __sanitizer_sigaltstack { + void *ss_sp; + uptr ss_size; + int ss_flags; +}; + typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t; struct __sanitizer_kernel_sigaction_t { @@ -298,6 +354,8 @@ struct __sanitizer_pollfd { typedef unsigned __sanitizer_nfds_t; +typedef int __sanitizer_lwpid_t; + struct __sanitizer_glob_t { uptr gl_pathc; uptr gl_matchc; @@ -317,6 +375,48 @@ extern int glob_altdirfunc; extern unsigned path_max; +extern int struct_ttyent_sz; + +extern int ptrace_pt_io; +extern int ptrace_pt_lwpinfo; +extern int ptrace_pt_set_event_mask; +extern int ptrace_pt_get_event_mask; +extern int ptrace_pt_get_process_state; +extern int ptrace_pt_set_siginfo; +extern int ptrace_pt_get_siginfo; +extern int ptrace_piod_read_d; +extern int ptrace_piod_write_d; +extern int ptrace_piod_read_i; +extern int ptrace_piod_write_i; +extern int ptrace_piod_read_auxv; +extern int ptrace_pt_setregs; +extern int ptrace_pt_getregs; +extern int ptrace_pt_setfpregs; +extern int ptrace_pt_getfpregs; +extern int ptrace_pt_setdbregs; +extern int ptrace_pt_getdbregs; + +struct __sanitizer_ptrace_io_desc { + int piod_op; + void *piod_offs; + void *piod_addr; + uptr piod_len; +}; + +struct __sanitizer_ptrace_lwpinfo { + __sanitizer_lwpid_t pl_lwpid; + int pl_event; +}; + +extern unsigned struct_ptrace_ptrace_io_desc_struct_sz; +extern unsigned struct_ptrace_ptrace_lwpinfo_struct_sz; +extern unsigned struct_ptrace_ptrace_event_struct_sz; +extern unsigned struct_ptrace_ptrace_siginfo_struct_sz; + +extern unsigned struct_ptrace_reg_struct_sz; +extern unsigned struct_ptrace_fpreg_struct_sz; +extern unsigned struct_ptrace_dbreg_struct_sz; + struct __sanitizer_wordexp_t { uptr we_wordc; char **we_wordv; @@ -350,6 +450,16 @@ struct __sanitizer_ifconf { } ifc_ifcu; }; +struct __sanitizer_ttyent { + char *ty_name; + char *ty_getty; + char *ty_type; + int ty_status; + char *ty_window; + char *ty_comment; + char *ty_class; +}; + #define IOC_NRBITS 8 #define IOC_TYPEBITS 8 #define IOC_SIZEBITS 14 @@ -374,185 +484,1713 @@ struct __sanitizer_ifconf { #define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK) #define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK) -extern unsigned struct_ifreq_sz; -extern unsigned struct_termios_sz; -extern unsigned struct_winsize_sz; - -extern unsigned struct_arpreq_sz; +// ioctl request identifiers +extern unsigned struct_altqreq_sz; +extern unsigned struct_amr_user_ioctl_sz; +extern unsigned struct_ap_control_sz; +extern unsigned struct_apm_ctl_sz; +extern unsigned struct_apm_event_info_sz; +extern unsigned struct_apm_power_info_sz; +extern unsigned struct_atabusiodetach_args_sz; +extern unsigned struct_atabusioscan_args_sz; +extern unsigned struct_ath_diag_sz; +extern unsigned struct_atm_flowmap_sz; +extern unsigned struct_atm_pseudoioctl_sz; +extern unsigned struct_audio_buf_info_sz; +extern unsigned struct_audio_device_sz; +extern unsigned struct_audio_encoding_sz; +extern unsigned struct_audio_info_sz; +extern unsigned struct_audio_offset_sz; +extern unsigned struct_bio_locate_sz; +extern unsigned struct_bioc_alarm_sz; +extern unsigned struct_bioc_blink_sz; +extern unsigned struct_bioc_disk_sz; +extern unsigned struct_bioc_inq_sz; +extern unsigned struct_bioc_setstate_sz; +extern unsigned struct_bioc_vol_sz; +extern unsigned struct_bioc_volops_sz; +extern unsigned struct_bktr_chnlset_sz; +extern unsigned struct_bktr_remote_sz; +extern unsigned struct_blue_conf_sz; +extern unsigned struct_blue_interface_sz; +extern unsigned struct_blue_stats_sz; +extern unsigned struct_bpf_dltlist_sz; +extern unsigned struct_bpf_program_sz; +extern unsigned struct_bpf_stat_old_sz; +extern unsigned struct_bpf_stat_sz; +extern unsigned struct_bpf_version_sz; +extern unsigned struct_btreq_sz; +extern unsigned struct_btsco_info_sz; +extern unsigned struct_buffmem_desc_sz; +extern unsigned struct_cbq_add_class_sz; +extern unsigned struct_cbq_add_filter_sz; +extern unsigned struct_cbq_delete_class_sz; +extern unsigned struct_cbq_delete_filter_sz; +extern unsigned struct_cbq_getstats_sz; +extern unsigned struct_cbq_interface_sz; +extern unsigned struct_cbq_modify_class_sz; +extern unsigned struct_ccd_ioctl_sz; +extern unsigned struct_cdnr_add_element_sz; +extern unsigned struct_cdnr_add_filter_sz; +extern unsigned struct_cdnr_add_tbmeter_sz; +extern unsigned struct_cdnr_add_trtcm_sz; +extern unsigned struct_cdnr_add_tswtcm_sz; +extern unsigned struct_cdnr_delete_element_sz; +extern unsigned struct_cdnr_delete_filter_sz; +extern unsigned struct_cdnr_get_stats_sz; +extern unsigned struct_cdnr_interface_sz; +extern unsigned struct_cdnr_modify_tbmeter_sz; +extern unsigned struct_cdnr_modify_trtcm_sz; +extern unsigned struct_cdnr_modify_tswtcm_sz; +extern unsigned struct_cdnr_tbmeter_stats_sz; +extern unsigned struct_cdnr_tcm_stats_sz; +extern unsigned struct_cgd_ioctl_sz; +extern unsigned struct_cgd_user_sz; +extern unsigned struct_changer_element_status_request_sz; +extern unsigned struct_changer_exchange_request_sz; +extern unsigned struct_changer_move_request_sz; +extern unsigned struct_changer_params_sz; +extern unsigned struct_changer_position_request_sz; +extern unsigned struct_changer_set_voltag_request_sz; +extern unsigned struct_clockctl_adjtime_sz; +extern unsigned struct_clockctl_clock_settime_sz; +extern unsigned struct_clockctl_ntp_adjtime_sz; +extern unsigned struct_clockctl_settimeofday_sz; +extern unsigned struct_cnwistats_sz; +extern unsigned struct_cnwitrail_sz; +extern unsigned struct_cnwstatus_sz; +extern unsigned struct_count_info_sz; +extern unsigned struct_cpu_ucode_sz; +extern unsigned struct_cpu_ucode_version_sz; +extern unsigned struct_crypt_kop_sz; +extern unsigned struct_crypt_mkop_sz; +extern unsigned struct_crypt_mop_sz; +extern unsigned struct_crypt_op_sz; +extern unsigned struct_crypt_result_sz; +extern unsigned struct_crypt_sfop_sz; +extern unsigned struct_crypt_sgop_sz; +extern unsigned struct_cryptret_sz; +extern unsigned struct_devdetachargs_sz; +extern unsigned struct_devlistargs_sz; +extern unsigned struct_devpmargs_sz; +extern unsigned struct_devrescanargs_sz; +extern unsigned struct_disk_badsecinfo_sz; +extern unsigned struct_disk_strategy_sz; +extern unsigned struct_disklabel_sz; +extern unsigned struct_dkbad_sz; +extern unsigned struct_dkwedge_info_sz; +extern unsigned struct_dkwedge_list_sz; +extern unsigned struct_dmio_setfunc_sz; +extern unsigned struct_dmx_pes_filter_params_sz; +extern unsigned struct_dmx_sct_filter_params_sz; +extern unsigned struct_dmx_stc_sz; +extern unsigned struct_dvb_diseqc_master_cmd_sz; +extern unsigned struct_dvb_diseqc_slave_reply_sz; +extern unsigned struct_dvb_frontend_event_sz; +extern unsigned struct_dvb_frontend_info_sz; +extern unsigned struct_dvb_frontend_parameters_sz; +extern unsigned struct_eccapreq_sz; +extern unsigned struct_fbcmap_sz; +extern unsigned struct_fbcurpos_sz; +extern unsigned struct_fbcursor_sz; +extern unsigned struct_fbgattr_sz; +extern unsigned struct_fbsattr_sz; +extern unsigned struct_fbtype_sz; +extern unsigned struct_fdformat_cmd_sz; +extern unsigned struct_fdformat_parms_sz; +extern unsigned struct_fifoq_conf_sz; +extern unsigned struct_fifoq_getstats_sz; +extern unsigned struct_fifoq_interface_sz; +extern unsigned struct_format_op_sz; +extern unsigned struct_fss_get_sz; +extern unsigned struct_fss_set_sz; +extern unsigned struct_gpio_attach_sz; +extern unsigned struct_gpio_info_sz; +extern unsigned struct_gpio_req_sz; +extern unsigned struct_gpio_set_sz; +extern unsigned struct_hfsc_add_class_sz; +extern unsigned struct_hfsc_add_filter_sz; +extern unsigned struct_hfsc_attach_sz; +extern unsigned struct_hfsc_class_stats_sz; +extern unsigned struct_hfsc_delete_class_sz; +extern unsigned struct_hfsc_delete_filter_sz; +extern unsigned struct_hfsc_interface_sz; +extern unsigned struct_hfsc_modify_class_sz; +extern unsigned struct_hpcfb_dsp_op_sz; +extern unsigned struct_hpcfb_dspconf_sz; +extern unsigned struct_hpcfb_fbconf_sz; +extern unsigned struct_if_addrprefreq_sz; +extern unsigned struct_if_clonereq_sz; +extern unsigned struct_if_laddrreq_sz; +extern unsigned struct_ifaddr_sz; +extern unsigned struct_ifaliasreq_sz; +extern unsigned struct_ifcapreq_sz; +extern unsigned struct_ifconf_sz; +extern unsigned struct_ifdatareq_sz; +extern unsigned struct_ifdrv_sz; +extern unsigned struct_ifmediareq_sz; +extern unsigned struct_ifpppcstatsreq_sz; +extern unsigned struct_ifpppstatsreq_sz; +extern unsigned struct_ifreq_sz; +extern unsigned struct_in6_addrpolicy_sz; +extern unsigned struct_in6_ndireq_sz; +extern unsigned struct_ioc_load_unload_sz; +extern unsigned struct_ioc_patch_sz; +extern unsigned struct_ioc_play_blocks_sz; +extern unsigned struct_ioc_play_msf_sz; +extern unsigned struct_ioc_play_track_sz; +extern unsigned struct_ioc_read_subchannel_sz; +extern unsigned struct_ioc_read_toc_entry_sz; +extern unsigned struct_ioc_toc_header_sz; +extern unsigned struct_ioc_vol_sz; +extern unsigned struct_ioctl_pt_sz; +extern unsigned struct_ioppt_sz; +extern unsigned struct_iovec_sz; +extern unsigned struct_ipfobj_sz; +extern unsigned struct_irda_params_sz; +extern unsigned struct_isp_fc_device_sz; +extern unsigned struct_isp_fc_tsk_mgmt_sz; +extern unsigned struct_isp_hba_device_sz; +extern unsigned struct_isv_cmd_sz; +extern unsigned struct_jobs_add_class_sz; +extern unsigned struct_jobs_add_filter_sz; +extern unsigned struct_jobs_attach_sz; +extern unsigned struct_jobs_class_stats_sz; +extern unsigned struct_jobs_delete_class_sz; +extern unsigned struct_jobs_delete_filter_sz; +extern unsigned struct_jobs_interface_sz; +extern unsigned struct_jobs_modify_class_sz; +extern unsigned struct_kbentry_sz; +extern unsigned struct_kfilter_mapping_sz; +extern unsigned struct_kiockeymap_sz; +extern unsigned struct_ksyms_gsymbol_sz; +extern unsigned struct_ksyms_gvalue_sz; +extern unsigned struct_ksyms_ogsymbol_sz; +extern unsigned struct_kttcp_io_args_sz; +extern unsigned struct_ltchars_sz; +extern unsigned struct_lua_create_sz; +extern unsigned struct_lua_info_sz; +extern unsigned struct_lua_load_sz; +extern unsigned struct_lua_require_sz; +extern unsigned struct_mbpp_param_sz; +extern unsigned struct_md_conf_sz; +extern unsigned struct_meteor_capframe_sz; +extern unsigned struct_meteor_counts_sz; +extern unsigned struct_meteor_geomet_sz; +extern unsigned struct_meteor_pixfmt_sz; +extern unsigned struct_meteor_video_sz; +extern unsigned struct_mlx_cinfo_sz; +extern unsigned struct_mlx_pause_sz; +extern unsigned struct_mlx_rebuild_request_sz; +extern unsigned struct_mlx_rebuild_status_sz; +extern unsigned struct_mlx_usercommand_sz; +extern unsigned struct_mly_user_command_sz; +extern unsigned struct_mly_user_health_sz; extern unsigned struct_mtget_sz; extern unsigned struct_mtop_sz; -extern unsigned struct_rtentry_sz; -extern unsigned struct_sbi_instrument_sz; +extern unsigned struct_npf_ioctl_table_sz; +extern unsigned struct_npioctl_sz; +extern unsigned struct_nvme_pt_command_sz; +extern unsigned struct_ochanger_element_status_request_sz; +extern unsigned struct_ofiocdesc_sz; +extern unsigned struct_okiockey_sz; +extern unsigned struct_ortentry_sz; +extern unsigned struct_oscsi_addr_sz; +extern unsigned struct_oss_audioinfo_sz; +extern unsigned struct_oss_sysinfo_sz; +extern unsigned struct_pciio_bdf_cfgreg_sz; +extern unsigned struct_pciio_businfo_sz; +extern unsigned struct_pciio_cfgreg_sz; +extern unsigned struct_pciio_drvname_sz; +extern unsigned struct_pciio_drvnameonbus_sz; +extern unsigned struct_pcvtid_sz; +extern unsigned struct_pf_osfp_ioctl_sz; +extern unsigned struct_pf_status_sz; +extern unsigned struct_pfioc_altq_sz; +extern unsigned struct_pfioc_if_sz; +extern unsigned struct_pfioc_iface_sz; +extern unsigned struct_pfioc_limit_sz; +extern unsigned struct_pfioc_natlook_sz; +extern unsigned struct_pfioc_pooladdr_sz; +extern unsigned struct_pfioc_qstats_sz; +extern unsigned struct_pfioc_rule_sz; +extern unsigned struct_pfioc_ruleset_sz; +extern unsigned struct_pfioc_src_node_kill_sz; +extern unsigned struct_pfioc_src_nodes_sz; +extern unsigned struct_pfioc_state_kill_sz; +extern unsigned struct_pfioc_state_sz; +extern unsigned struct_pfioc_states_sz; +extern unsigned struct_pfioc_table_sz; +extern unsigned struct_pfioc_tm_sz; +extern unsigned struct_pfioc_trans_sz; +extern unsigned struct_plistref_sz; +extern unsigned struct_power_type_sz; +extern unsigned struct_ppp_idle_sz; +extern unsigned struct_ppp_option_data_sz; +extern unsigned struct_ppp_rawin_sz; +extern unsigned struct_pppoeconnectionstate_sz; +extern unsigned struct_pppoediscparms_sz; +extern unsigned struct_priq_add_class_sz; +extern unsigned struct_priq_add_filter_sz; +extern unsigned struct_priq_class_stats_sz; +extern unsigned struct_priq_delete_class_sz; +extern unsigned struct_priq_delete_filter_sz; +extern unsigned struct_priq_interface_sz; +extern unsigned struct_priq_modify_class_sz; +extern unsigned struct_ptmget_sz; +extern unsigned struct_pvctxreq_sz; +extern unsigned struct_radio_info_sz; +extern unsigned struct_red_conf_sz; +extern unsigned struct_red_interface_sz; +extern unsigned struct_red_stats_sz; +extern unsigned struct_redparams_sz; +extern unsigned struct_rf_pmparams_sz; +extern unsigned struct_rf_pmstat_sz; +extern unsigned struct_rf_recon_req_sz; +extern unsigned struct_rio_conf_sz; +extern unsigned struct_rio_interface_sz; +extern unsigned struct_rio_stats_sz; +extern unsigned struct_satlink_id_sz; +extern unsigned struct_scan_io_sz; +extern unsigned struct_scbusaccel_args_sz; +extern unsigned struct_scbusiodetach_args_sz; +extern unsigned struct_scbusioscan_args_sz; +extern unsigned struct_scsi_addr_sz; extern unsigned struct_seq_event_rec_sz; -extern unsigned struct_synth_info_sz; -extern unsigned struct_vt_mode_sz; -extern unsigned struct_audio_buf_info_sz; -extern unsigned struct_ppp_stats_sz; +extern unsigned struct_session_op_sz; +extern unsigned struct_sgttyb_sz; extern unsigned struct_sioc_sg_req_sz; extern unsigned struct_sioc_vif_req_sz; +extern unsigned struct_smbioc_flags_sz; +extern unsigned struct_smbioc_lookup_sz; +extern unsigned struct_smbioc_oshare_sz; +extern unsigned struct_smbioc_ossn_sz; +extern unsigned struct_smbioc_rq_sz; +extern unsigned struct_smbioc_rw_sz; +extern unsigned struct_spppauthcfg_sz; +extern unsigned struct_spppauthfailuresettings_sz; +extern unsigned struct_spppauthfailurestats_sz; +extern unsigned struct_spppdnsaddrs_sz; +extern unsigned struct_spppdnssettings_sz; +extern unsigned struct_spppidletimeout_sz; +extern unsigned struct_spppkeepalivesettings_sz; +extern unsigned struct_sppplcpcfg_sz; +extern unsigned struct_spppstatus_sz; +extern unsigned struct_spppstatusncp_sz; +extern unsigned struct_srt_rt_sz; +extern unsigned struct_stic_xinfo_sz; +extern unsigned struct_sun_dkctlr_sz; +extern unsigned struct_sun_dkgeom_sz; +extern unsigned struct_sun_dkpart_sz; +extern unsigned struct_synth_info_sz; +extern unsigned struct_tbrreq_sz; +extern unsigned struct_tchars_sz; +extern unsigned struct_termios_sz; +extern unsigned struct_timeval_sz; +extern unsigned struct_twe_drivecommand_sz; +extern unsigned struct_twe_paramcommand_sz; +extern unsigned struct_twe_usercommand_sz; +extern unsigned struct_ukyopon_identify_sz; +extern unsigned struct_urio_command_sz; +extern unsigned struct_usb_alt_interface_sz; +extern unsigned struct_usb_bulk_ra_wb_opt_sz; +extern unsigned struct_usb_config_desc_sz; +extern unsigned struct_usb_ctl_report_desc_sz; +extern unsigned struct_usb_ctl_report_sz; +extern unsigned struct_usb_ctl_request_sz; +extern unsigned struct_usb_device_info_old_sz; +extern unsigned struct_usb_device_info_sz; +extern unsigned struct_usb_device_stats_sz; +extern unsigned struct_usb_endpoint_desc_sz; +extern unsigned struct_usb_full_desc_sz; +extern unsigned struct_usb_interface_desc_sz; +extern unsigned struct_usb_string_desc_sz; +extern unsigned struct_utoppy_readfile_sz; +extern unsigned struct_utoppy_rename_sz; +extern unsigned struct_utoppy_stats_sz; +extern unsigned struct_utoppy_writefile_sz; +extern unsigned struct_v4l2_audio_sz; +extern unsigned struct_v4l2_audioout_sz; +extern unsigned struct_v4l2_buffer_sz; +extern unsigned struct_v4l2_capability_sz; +extern unsigned struct_v4l2_control_sz; +extern unsigned struct_v4l2_crop_sz; +extern unsigned struct_v4l2_cropcap_sz; +extern unsigned struct_v4l2_fmtdesc_sz; +extern unsigned struct_v4l2_format_sz; +extern unsigned struct_v4l2_framebuffer_sz; +extern unsigned struct_v4l2_frequency_sz; +extern unsigned struct_v4l2_frmivalenum_sz; +extern unsigned struct_v4l2_frmsizeenum_sz; +extern unsigned struct_v4l2_input_sz; +extern unsigned struct_v4l2_jpegcompression_sz; +extern unsigned struct_v4l2_modulator_sz; +extern unsigned struct_v4l2_output_sz; +extern unsigned struct_v4l2_queryctrl_sz; +extern unsigned struct_v4l2_querymenu_sz; +extern unsigned struct_v4l2_requestbuffers_sz; +extern unsigned struct_v4l2_standard_sz; +extern unsigned struct_v4l2_streamparm_sz; +extern unsigned struct_v4l2_tuner_sz; +extern unsigned struct_vnd_ioctl_sz; +extern unsigned struct_vnd_user_sz; +extern unsigned struct_vt_stat_sz; +extern unsigned struct_wdog_conf_sz; +extern unsigned struct_wdog_mode_sz; +extern unsigned struct_wfq_conf_sz; +extern unsigned struct_wfq_getqid_sz; +extern unsigned struct_wfq_getstats_sz; +extern unsigned struct_wfq_interface_sz; +extern unsigned struct_wfq_setweight_sz; +extern unsigned struct_winsize_sz; +extern unsigned struct_wscons_event_sz; +extern unsigned struct_wsdisplay_addscreendata_sz; +extern unsigned struct_wsdisplay_char_sz; +extern unsigned struct_wsdisplay_cmap_sz; +extern unsigned struct_wsdisplay_curpos_sz; +extern unsigned struct_wsdisplay_cursor_sz; +extern unsigned struct_wsdisplay_delscreendata_sz; +extern unsigned struct_wsdisplay_fbinfo_sz; +extern unsigned struct_wsdisplay_font_sz; +extern unsigned struct_wsdisplay_kbddata_sz; +extern unsigned struct_wsdisplay_msgattrs_sz; +extern unsigned struct_wsdisplay_param_sz; +extern unsigned struct_wsdisplay_scroll_data_sz; +extern unsigned struct_wsdisplay_usefontdata_sz; +extern unsigned struct_wsdisplayio_blit_sz; +extern unsigned struct_wsdisplayio_bus_id_sz; +extern unsigned struct_wsdisplayio_edid_info_sz; +extern unsigned struct_wsdisplayio_fbinfo_sz; +extern unsigned struct_wskbd_bell_data_sz; +extern unsigned struct_wskbd_keyrepeat_data_sz; +extern unsigned struct_wskbd_map_data_sz; +extern unsigned struct_wskbd_scroll_data_sz; +extern unsigned struct_wsmouse_calibcoords_sz; +extern unsigned struct_wsmouse_id_sz; +extern unsigned struct_wsmouse_repeat_sz; +extern unsigned struct_wsmux_device_list_sz; +extern unsigned struct_wsmux_device_sz; +extern unsigned struct_xd_iocmd_sz; + +extern unsigned struct_scsireq_sz; +extern unsigned struct_tone_sz; +extern unsigned union_twe_statrequest_sz; +extern unsigned struct_usb_device_descriptor_sz; +extern unsigned struct_vt_mode_sz; +extern unsigned struct__old_mixer_info_sz; +extern unsigned struct__agp_allocate_sz; +extern unsigned struct__agp_bind_sz; +extern unsigned struct__agp_info_sz; +extern unsigned struct__agp_setup_sz; +extern unsigned struct__agp_unbind_sz; +extern unsigned struct_atareq_sz; +extern unsigned struct_cpustate_sz; +extern unsigned struct_dmx_caps_sz; +extern unsigned enum_dmx_source_sz; +extern unsigned union_dvd_authinfo_sz; +extern unsigned union_dvd_struct_sz; +extern unsigned enum_v4l2_priority_sz; +extern unsigned struct_envsys_basic_info_sz; +extern unsigned struct_envsys_tre_data_sz; +extern unsigned enum_fe_sec_mini_cmd_sz; +extern unsigned enum_fe_sec_tone_mode_sz; +extern unsigned enum_fe_sec_voltage_sz; +extern unsigned enum_fe_status_sz; +extern unsigned struct_gdt_ctrt_sz; +extern unsigned struct_gdt_event_sz; +extern unsigned struct_gdt_osv_sz; +extern unsigned struct_gdt_rescan_sz; +extern unsigned struct_gdt_statist_sz; +extern unsigned struct_gdt_ucmd_sz; +extern unsigned struct_iscsi_conn_status_parameters_sz; +extern unsigned struct_iscsi_get_version_parameters_sz; +extern unsigned struct_iscsi_iocommand_parameters_sz; +extern unsigned struct_iscsi_login_parameters_sz; +extern unsigned struct_iscsi_logout_parameters_sz; +extern unsigned struct_iscsi_register_event_parameters_sz; +extern unsigned struct_iscsi_remove_parameters_sz; +extern unsigned struct_iscsi_send_targets_parameters_sz; +extern unsigned struct_iscsi_set_node_name_parameters_sz; +extern unsigned struct_iscsi_wait_event_parameters_sz; +extern unsigned struct_isp_stats_sz; +extern unsigned struct_lsenable_sz; +extern unsigned struct_lsdisable_sz; +extern unsigned struct_mixer_ctrl_sz; +extern unsigned struct_mixer_devinfo_sz; +extern unsigned struct_mpu_command_rec_sz; +extern unsigned struct_rndstat_sz; +extern unsigned struct_rndstat_name_sz; +extern unsigned struct_rndctl_sz; +extern unsigned struct_rnddata_sz; +extern unsigned struct_rndpoolstat_sz; +extern unsigned struct_rndstat_est_sz; +extern unsigned struct_rndstat_est_name_sz; +extern unsigned struct_pps_params_sz; +extern unsigned struct_pps_info_sz; +extern unsigned struct_mixer_info_sz; +extern unsigned struct_RF_SparetWait_sz; +extern unsigned struct_RF_ComponentLabel_sz; +extern unsigned struct_RF_SingleComponent_sz; +extern unsigned struct_RF_ProgressInfo_sz; -// ioctl request identifiers // A special value to mark ioctls that are not present on the target platform, // when it can not be determined without including any system headers. extern const unsigned IOCTL_NOT_PRESENT; -extern unsigned IOCTL_FIOASYNC; + +extern unsigned IOCTL_AFM_ADDFMAP; +extern unsigned IOCTL_AFM_DELFMAP; +extern unsigned IOCTL_AFM_CLEANFMAP; +extern unsigned IOCTL_AFM_GETFMAP; +extern unsigned IOCTL_ALTQGTYPE; +extern unsigned IOCTL_ALTQTBRSET; +extern unsigned IOCTL_ALTQTBRGET; +extern unsigned IOCTL_BLUE_IF_ATTACH; +extern unsigned IOCTL_BLUE_IF_DETACH; +extern unsigned IOCTL_BLUE_ENABLE; +extern unsigned IOCTL_BLUE_DISABLE; +extern unsigned IOCTL_BLUE_CONFIG; +extern unsigned IOCTL_BLUE_GETSTATS; +extern unsigned IOCTL_CBQ_IF_ATTACH; +extern unsigned IOCTL_CBQ_IF_DETACH; +extern unsigned IOCTL_CBQ_ENABLE; +extern unsigned IOCTL_CBQ_DISABLE; +extern unsigned IOCTL_CBQ_CLEAR_HIERARCHY; +extern unsigned IOCTL_CBQ_ADD_CLASS; +extern unsigned IOCTL_CBQ_DEL_CLASS; +extern unsigned IOCTL_CBQ_MODIFY_CLASS; +extern unsigned IOCTL_CBQ_ADD_FILTER; +extern unsigned IOCTL_CBQ_DEL_FILTER; +extern unsigned IOCTL_CBQ_GETSTATS; +extern unsigned IOCTL_CDNR_IF_ATTACH; +extern unsigned IOCTL_CDNR_IF_DETACH; +extern unsigned IOCTL_CDNR_ENABLE; +extern unsigned IOCTL_CDNR_DISABLE; +extern unsigned IOCTL_CDNR_ADD_FILTER; +extern unsigned IOCTL_CDNR_DEL_FILTER; +extern unsigned IOCTL_CDNR_GETSTATS; +extern unsigned IOCTL_CDNR_ADD_ELEM; +extern unsigned IOCTL_CDNR_DEL_ELEM; +extern unsigned IOCTL_CDNR_ADD_TBM; +extern unsigned IOCTL_CDNR_MOD_TBM; +extern unsigned IOCTL_CDNR_TBM_STATS; +extern unsigned IOCTL_CDNR_ADD_TCM; +extern unsigned IOCTL_CDNR_MOD_TCM; +extern unsigned IOCTL_CDNR_TCM_STATS; +extern unsigned IOCTL_CDNR_ADD_TSW; +extern unsigned IOCTL_CDNR_MOD_TSW; +extern unsigned IOCTL_FIFOQ_IF_ATTACH; +extern unsigned IOCTL_FIFOQ_IF_DETACH; +extern unsigned IOCTL_FIFOQ_ENABLE; +extern unsigned IOCTL_FIFOQ_DISABLE; +extern unsigned IOCTL_FIFOQ_CONFIG; +extern unsigned IOCTL_FIFOQ_GETSTATS; +extern unsigned IOCTL_HFSC_IF_ATTACH; +extern unsigned IOCTL_HFSC_IF_DETACH; +extern unsigned IOCTL_HFSC_ENABLE; +extern unsigned IOCTL_HFSC_DISABLE; +extern unsigned IOCTL_HFSC_CLEAR_HIERARCHY; +extern unsigned IOCTL_HFSC_ADD_CLASS; +extern unsigned IOCTL_HFSC_DEL_CLASS; +extern unsigned IOCTL_HFSC_MOD_CLASS; +extern unsigned IOCTL_HFSC_ADD_FILTER; +extern unsigned IOCTL_HFSC_DEL_FILTER; +extern unsigned IOCTL_HFSC_GETSTATS; +extern unsigned IOCTL_JOBS_IF_ATTACH; +extern unsigned IOCTL_JOBS_IF_DETACH; +extern unsigned IOCTL_JOBS_ENABLE; +extern unsigned IOCTL_JOBS_DISABLE; +extern unsigned IOCTL_JOBS_CLEAR; +extern unsigned IOCTL_JOBS_ADD_CLASS; +extern unsigned IOCTL_JOBS_DEL_CLASS; +extern unsigned IOCTL_JOBS_MOD_CLASS; +extern unsigned IOCTL_JOBS_ADD_FILTER; +extern unsigned IOCTL_JOBS_DEL_FILTER; +extern unsigned IOCTL_JOBS_GETSTATS; +extern unsigned IOCTL_PRIQ_IF_ATTACH; +extern unsigned IOCTL_PRIQ_IF_DETACH; +extern unsigned IOCTL_PRIQ_ENABLE; +extern unsigned IOCTL_PRIQ_DISABLE; +extern unsigned IOCTL_PRIQ_CLEAR; +extern unsigned IOCTL_PRIQ_ADD_CLASS; +extern unsigned IOCTL_PRIQ_DEL_CLASS; +extern unsigned IOCTL_PRIQ_MOD_CLASS; +extern unsigned IOCTL_PRIQ_ADD_FILTER; +extern unsigned IOCTL_PRIQ_DEL_FILTER; +extern unsigned IOCTL_PRIQ_GETSTATS; +extern unsigned IOCTL_RED_IF_ATTACH; +extern unsigned IOCTL_RED_IF_DETACH; +extern unsigned IOCTL_RED_ENABLE; +extern unsigned IOCTL_RED_DISABLE; +extern unsigned IOCTL_RED_CONFIG; +extern unsigned IOCTL_RED_GETSTATS; +extern unsigned IOCTL_RED_SETDEFAULTS; +extern unsigned IOCTL_RIO_IF_ATTACH; +extern unsigned IOCTL_RIO_IF_DETACH; +extern unsigned IOCTL_RIO_ENABLE; +extern unsigned IOCTL_RIO_DISABLE; +extern unsigned IOCTL_RIO_CONFIG; +extern unsigned IOCTL_RIO_GETSTATS; +extern unsigned IOCTL_RIO_SETDEFAULTS; +extern unsigned IOCTL_WFQ_IF_ATTACH; +extern unsigned IOCTL_WFQ_IF_DETACH; +extern unsigned IOCTL_WFQ_ENABLE; +extern unsigned IOCTL_WFQ_DISABLE; +extern unsigned IOCTL_WFQ_CONFIG; +extern unsigned IOCTL_WFQ_GET_STATS; +extern unsigned IOCTL_WFQ_GET_QID; +extern unsigned IOCTL_WFQ_SET_WEIGHT; +extern unsigned IOCTL_CRIOGET; +extern unsigned IOCTL_CIOCFSESSION; +extern unsigned IOCTL_CIOCKEY; +extern unsigned IOCTL_CIOCNFKEYM; +extern unsigned IOCTL_CIOCNFSESSION; +extern unsigned IOCTL_CIOCNCRYPTRETM; +extern unsigned IOCTL_CIOCNCRYPTRET; +extern unsigned IOCTL_CIOCGSESSION; +extern unsigned IOCTL_CIOCNGSESSION; +extern unsigned IOCTL_CIOCCRYPT; +extern unsigned IOCTL_CIOCNCRYPTM; +extern unsigned IOCTL_CIOCASYMFEAT; +extern unsigned IOCTL_APM_IOC_REJECT; +extern unsigned IOCTL_APM_IOC_STANDBY; +extern unsigned IOCTL_APM_IOC_SUSPEND; +extern unsigned IOCTL_OAPM_IOC_GETPOWER; +extern unsigned IOCTL_APM_IOC_GETPOWER; +extern unsigned IOCTL_APM_IOC_NEXTEVENT; +extern unsigned IOCTL_APM_IOC_DEV_CTL; +extern unsigned IOCTL_NETBSD_DM_IOCTL; +extern unsigned IOCTL_DMIO_SETFUNC; +extern unsigned IOCTL_DMX_START; +extern unsigned IOCTL_DMX_STOP; +extern unsigned IOCTL_DMX_SET_FILTER; +extern unsigned IOCTL_DMX_SET_PES_FILTER; +extern unsigned IOCTL_DMX_SET_BUFFER_SIZE; +extern unsigned IOCTL_DMX_GET_STC; +extern unsigned IOCTL_DMX_ADD_PID; +extern unsigned IOCTL_DMX_REMOVE_PID; +extern unsigned IOCTL_DMX_GET_CAPS; +extern unsigned IOCTL_DMX_SET_SOURCE; +extern unsigned IOCTL_FE_READ_STATUS; +extern unsigned IOCTL_FE_READ_BER; +extern unsigned IOCTL_FE_READ_SNR; +extern unsigned IOCTL_FE_READ_SIGNAL_STRENGTH; +extern unsigned IOCTL_FE_READ_UNCORRECTED_BLOCKS; +extern unsigned IOCTL_FE_SET_FRONTEND; +extern unsigned IOCTL_FE_GET_FRONTEND; +extern unsigned IOCTL_FE_GET_EVENT; +extern unsigned IOCTL_FE_GET_INFO; +extern unsigned IOCTL_FE_DISEQC_RESET_OVERLOAD; +extern unsigned IOCTL_FE_DISEQC_SEND_MASTER_CMD; +extern unsigned IOCTL_FE_DISEQC_RECV_SLAVE_REPLY; +extern unsigned IOCTL_FE_DISEQC_SEND_BURST; +extern unsigned IOCTL_FE_SET_TONE; +extern unsigned IOCTL_FE_SET_VOLTAGE; +extern unsigned IOCTL_FE_ENABLE_HIGH_LNB_VOLTAGE; +extern unsigned IOCTL_FE_SET_FRONTEND_TUNE_MODE; +extern unsigned IOCTL_FE_DISHNETWORK_SEND_LEGACY_CMD; +extern unsigned IOCTL_FILEMON_SET_FD; +extern unsigned IOCTL_FILEMON_SET_PID; +extern unsigned IOCTL_HDAUDIO_FGRP_INFO; +extern unsigned IOCTL_HDAUDIO_FGRP_GETCONFIG; +extern unsigned IOCTL_HDAUDIO_FGRP_SETCONFIG; +extern unsigned IOCTL_HDAUDIO_FGRP_WIDGET_INFO; +extern unsigned IOCTL_HDAUDIO_FGRP_CODEC_INFO; +extern unsigned IOCTL_HDAUDIO_AFG_WIDGET_INFO; +extern unsigned IOCTL_HDAUDIO_AFG_CODEC_INFO; +extern unsigned IOCTL_CEC_GET_PHYS_ADDR; +extern unsigned IOCTL_CEC_GET_LOG_ADDRS; +extern unsigned IOCTL_CEC_SET_LOG_ADDRS; +extern unsigned IOCTL_CEC_GET_VENDOR_ID; +extern unsigned IOCTL_HPCFBIO_GCONF; +extern unsigned IOCTL_HPCFBIO_SCONF; +extern unsigned IOCTL_HPCFBIO_GDSPCONF; +extern unsigned IOCTL_HPCFBIO_SDSPCONF; +extern unsigned IOCTL_HPCFBIO_GOP; +extern unsigned IOCTL_HPCFBIO_SOP; +extern unsigned IOCTL_IOPIOCPT; +extern unsigned IOCTL_IOPIOCGLCT; +extern unsigned IOCTL_IOPIOCGSTATUS; +extern unsigned IOCTL_IOPIOCRECONFIG; +extern unsigned IOCTL_IOPIOCGTIDMAP; +extern unsigned IOCTL_SIOCGATHSTATS; +extern unsigned IOCTL_SIOCGATHDIAG; +extern unsigned IOCTL_METEORCAPTUR; +extern unsigned IOCTL_METEORCAPFRM; +extern unsigned IOCTL_METEORSETGEO; +extern unsigned IOCTL_METEORGETGEO; +extern unsigned IOCTL_METEORSTATUS; +extern unsigned IOCTL_METEORSHUE; +extern unsigned IOCTL_METEORGHUE; +extern unsigned IOCTL_METEORSFMT; +extern unsigned IOCTL_METEORGFMT; +extern unsigned IOCTL_METEORSINPUT; +extern unsigned IOCTL_METEORGINPUT; +extern unsigned IOCTL_METEORSCHCV; +extern unsigned IOCTL_METEORGCHCV; +extern unsigned IOCTL_METEORSCOUNT; +extern unsigned IOCTL_METEORGCOUNT; +extern unsigned IOCTL_METEORSFPS; +extern unsigned IOCTL_METEORGFPS; +extern unsigned IOCTL_METEORSSIGNAL; +extern unsigned IOCTL_METEORGSIGNAL; +extern unsigned IOCTL_METEORSVIDEO; +extern unsigned IOCTL_METEORGVIDEO; +extern unsigned IOCTL_METEORSBRIG; +extern unsigned IOCTL_METEORGBRIG; +extern unsigned IOCTL_METEORSCSAT; +extern unsigned IOCTL_METEORGCSAT; +extern unsigned IOCTL_METEORSCONT; +extern unsigned IOCTL_METEORGCONT; +extern unsigned IOCTL_METEORSHWS; +extern unsigned IOCTL_METEORGHWS; +extern unsigned IOCTL_METEORSVWS; +extern unsigned IOCTL_METEORGVWS; +extern unsigned IOCTL_METEORSTS; +extern unsigned IOCTL_METEORGTS; +extern unsigned IOCTL_TVTUNER_SETCHNL; +extern unsigned IOCTL_TVTUNER_GETCHNL; +extern unsigned IOCTL_TVTUNER_SETTYPE; +extern unsigned IOCTL_TVTUNER_GETTYPE; +extern unsigned IOCTL_TVTUNER_GETSTATUS; +extern unsigned IOCTL_TVTUNER_SETFREQ; +extern unsigned IOCTL_TVTUNER_GETFREQ; +extern unsigned IOCTL_TVTUNER_SETAFC; +extern unsigned IOCTL_TVTUNER_GETAFC; +extern unsigned IOCTL_RADIO_SETMODE; +extern unsigned IOCTL_RADIO_GETMODE; +extern unsigned IOCTL_RADIO_SETFREQ; +extern unsigned IOCTL_RADIO_GETFREQ; +extern unsigned IOCTL_METEORSACTPIXFMT; +extern unsigned IOCTL_METEORGACTPIXFMT; +extern unsigned IOCTL_METEORGSUPPIXFMT; +extern unsigned IOCTL_TVTUNER_GETCHNLSET; +extern unsigned IOCTL_REMOTE_GETKEY; +extern unsigned IOCTL_GDT_IOCTL_GENERAL; +extern unsigned IOCTL_GDT_IOCTL_DRVERS; +extern unsigned IOCTL_GDT_IOCTL_CTRTYPE; +extern unsigned IOCTL_GDT_IOCTL_OSVERS; +extern unsigned IOCTL_GDT_IOCTL_CTRCNT; +extern unsigned IOCTL_GDT_IOCTL_EVENT; +extern unsigned IOCTL_GDT_IOCTL_STATIST; +extern unsigned IOCTL_GDT_IOCTL_RESCAN; +extern unsigned IOCTL_ISP_SDBLEV; +extern unsigned IOCTL_ISP_RESETHBA; +extern unsigned IOCTL_ISP_RESCAN; +extern unsigned IOCTL_ISP_SETROLE; +extern unsigned IOCTL_ISP_GETROLE; +extern unsigned IOCTL_ISP_GET_STATS; +extern unsigned IOCTL_ISP_CLR_STATS; +extern unsigned IOCTL_ISP_FC_LIP; +extern unsigned IOCTL_ISP_FC_GETDINFO; +extern unsigned IOCTL_ISP_GET_FW_CRASH_DUMP; +extern unsigned IOCTL_ISP_FORCE_CRASH_DUMP; +extern unsigned IOCTL_ISP_FC_GETHINFO; +extern unsigned IOCTL_ISP_TSK_MGMT; +extern unsigned IOCTL_ISP_FC_GETDLIST; +extern unsigned IOCTL_MLXD_STATUS; +extern unsigned IOCTL_MLXD_CHECKASYNC; +extern unsigned IOCTL_MLXD_DETACH; +extern unsigned IOCTL_MLX_RESCAN_DRIVES; +extern unsigned IOCTL_MLX_PAUSE_CHANNEL; +extern unsigned IOCTL_MLX_COMMAND; +extern unsigned IOCTL_MLX_REBUILDASYNC; +extern unsigned IOCTL_MLX_REBUILDSTAT; +extern unsigned IOCTL_MLX_GET_SYSDRIVE; +extern unsigned IOCTL_MLX_GET_CINFO; +extern unsigned IOCTL_NVME_PASSTHROUGH_CMD; +extern unsigned IOCTL_IRDA_RESET_PARAMS; +extern unsigned IOCTL_IRDA_SET_PARAMS; +extern unsigned IOCTL_IRDA_GET_SPEEDMASK; +extern unsigned IOCTL_IRDA_GET_TURNAROUNDMASK; +extern unsigned IOCTL_IRFRAMETTY_GET_DEVICE; +extern unsigned IOCTL_IRFRAMETTY_GET_DONGLE; +extern unsigned IOCTL_IRFRAMETTY_SET_DONGLE; +extern unsigned IOCTL_SATIORESET; +extern unsigned IOCTL_SATIOGID; +extern unsigned IOCTL_SATIOSBUFSIZE; +extern unsigned IOCTL_ISV_CMD; +extern unsigned IOCTL_WTQICMD; +extern unsigned IOCTL_ISCSI_GET_VERSION; +extern unsigned IOCTL_ISCSI_LOGIN; +extern unsigned IOCTL_ISCSI_LOGOUT; +extern unsigned IOCTL_ISCSI_ADD_CONNECTION; +extern unsigned IOCTL_ISCSI_RESTORE_CONNECTION; +extern unsigned IOCTL_ISCSI_REMOVE_CONNECTION; +extern unsigned IOCTL_ISCSI_CONNECTION_STATUS; +extern unsigned IOCTL_ISCSI_SEND_TARGETS; +extern unsigned IOCTL_ISCSI_SET_NODE_NAME; +extern unsigned IOCTL_ISCSI_IO_COMMAND; +extern unsigned IOCTL_ISCSI_REGISTER_EVENT; +extern unsigned IOCTL_ISCSI_DEREGISTER_EVENT; +extern unsigned IOCTL_ISCSI_WAIT_EVENT; +extern unsigned IOCTL_ISCSI_POLL_EVENT; +extern unsigned IOCTL_OFIOCGET; +extern unsigned IOCTL_OFIOCSET; +extern unsigned IOCTL_OFIOCNEXTPROP; +extern unsigned IOCTL_OFIOCGETOPTNODE; +extern unsigned IOCTL_OFIOCGETNEXT; +extern unsigned IOCTL_OFIOCGETCHILD; +extern unsigned IOCTL_OFIOCFINDDEVICE; +extern unsigned IOCTL_AMR_IO_VERSION; +extern unsigned IOCTL_AMR_IO_COMMAND; +extern unsigned IOCTL_MLYIO_COMMAND; +extern unsigned IOCTL_MLYIO_HEALTH; +extern unsigned IOCTL_PCI_IOC_CFGREAD; +extern unsigned IOCTL_PCI_IOC_CFGWRITE; +extern unsigned IOCTL_PCI_IOC_BDF_CFGREAD; +extern unsigned IOCTL_PCI_IOC_BDF_CFGWRITE; +extern unsigned IOCTL_PCI_IOC_BUSINFO; +extern unsigned IOCTL_PCI_IOC_DRVNAME; +extern unsigned IOCTL_PCI_IOC_DRVNAMEONBUS; +extern unsigned IOCTL_TWEIO_COMMAND; +extern unsigned IOCTL_TWEIO_STATS; +extern unsigned IOCTL_TWEIO_AEN_POLL; +extern unsigned IOCTL_TWEIO_AEN_WAIT; +extern unsigned IOCTL_TWEIO_SET_PARAM; +extern unsigned IOCTL_TWEIO_GET_PARAM; +extern unsigned IOCTL_TWEIO_RESET; +extern unsigned IOCTL_TWEIO_ADD_UNIT; +extern unsigned IOCTL_TWEIO_DEL_UNIT; +extern unsigned IOCTL_SIOCSCNWDOMAIN; +extern unsigned IOCTL_SIOCGCNWDOMAIN; +extern unsigned IOCTL_SIOCSCNWKEY; +extern unsigned IOCTL_SIOCGCNWSTATUS; +extern unsigned IOCTL_SIOCGCNWSTATS; +extern unsigned IOCTL_SIOCGCNWTRAIL; +extern unsigned IOCTL_SIOCGRAYSIGLEV; +extern unsigned IOCTL_RAIDFRAME_SHUTDOWN; +extern unsigned IOCTL_RAIDFRAME_TUR; +extern unsigned IOCTL_RAIDFRAME_FAIL_DISK; +extern unsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS; +extern unsigned IOCTL_RAIDFRAME_REWRITEPARITY; +extern unsigned IOCTL_RAIDFRAME_COPYBACK; +extern unsigned IOCTL_RAIDFRAME_SPARET_WAIT; +extern unsigned IOCTL_RAIDFRAME_SEND_SPARET; +extern unsigned IOCTL_RAIDFRAME_ABORT_SPARET_WAIT; +extern unsigned IOCTL_RAIDFRAME_START_ATRACE; +extern unsigned IOCTL_RAIDFRAME_STOP_ATRACE; +extern unsigned IOCTL_RAIDFRAME_GET_SIZE; +extern unsigned IOCTL_RAIDFRAME_RESET_ACCTOTALS; +extern unsigned IOCTL_RAIDFRAME_KEEP_ACCTOTALS; +extern unsigned IOCTL_RAIDFRAME_GET_COMPONENT_LABEL; +extern unsigned IOCTL_RAIDFRAME_SET_COMPONENT_LABEL; +extern unsigned IOCTL_RAIDFRAME_INIT_LABELS; +extern unsigned IOCTL_RAIDFRAME_ADD_HOT_SPARE; +extern unsigned IOCTL_RAIDFRAME_REMOVE_HOT_SPARE; +extern unsigned IOCTL_RAIDFRAME_REBUILD_IN_PLACE; +extern unsigned IOCTL_RAIDFRAME_CHECK_PARITY; +extern unsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS; +extern unsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS; +extern unsigned IOCTL_RAIDFRAME_SET_AUTOCONFIG; +extern unsigned IOCTL_RAIDFRAME_SET_ROOT; +extern unsigned IOCTL_RAIDFRAME_DELETE_COMPONENT; +extern unsigned IOCTL_RAIDFRAME_INCORPORATE_HOT_SPARE; +extern unsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS_EXT; +extern unsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT; +extern unsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS_EXT; +extern unsigned IOCTL_RAIDFRAME_CONFIGURE; +extern unsigned IOCTL_RAIDFRAME_GET_INFO; +extern unsigned IOCTL_RAIDFRAME_PARITYMAP_STATUS; +extern unsigned IOCTL_RAIDFRAME_PARITYMAP_GET_DISABLE; +extern unsigned IOCTL_RAIDFRAME_PARITYMAP_SET_DISABLE; +extern unsigned IOCTL_RAIDFRAME_PARITYMAP_SET_PARAMS; +extern unsigned IOCTL_RAIDFRAME_SET_LAST_UNIT; +extern unsigned IOCTL_MBPPIOCSPARAM; +extern unsigned IOCTL_MBPPIOCGPARAM; +extern unsigned IOCTL_MBPPIOCGSTAT; +extern unsigned IOCTL_SESIOC_GETNOBJ; +extern unsigned IOCTL_SESIOC_GETOBJMAP; +extern unsigned IOCTL_SESIOC_GETENCSTAT; +extern unsigned IOCTL_SESIOC_SETENCSTAT; +extern unsigned IOCTL_SESIOC_GETOBJSTAT; +extern unsigned IOCTL_SESIOC_SETOBJSTAT; +extern unsigned IOCTL_SESIOC_GETTEXT; +extern unsigned IOCTL_SESIOC_INIT; +extern unsigned IOCTL_SUN_DKIOCGGEOM; +extern unsigned IOCTL_SUN_DKIOCINFO; +extern unsigned IOCTL_SUN_DKIOCGPART; +extern unsigned IOCTL_FBIOGTYPE; +extern unsigned IOCTL_FBIOPUTCMAP; +extern unsigned IOCTL_FBIOGETCMAP; +extern unsigned IOCTL_FBIOGATTR; +extern unsigned IOCTL_FBIOSVIDEO; +extern unsigned IOCTL_FBIOGVIDEO; +extern unsigned IOCTL_FBIOSCURSOR; +extern unsigned IOCTL_FBIOGCURSOR; +extern unsigned IOCTL_FBIOSCURPOS; +extern unsigned IOCTL_FBIOGCURPOS; +extern unsigned IOCTL_FBIOGCURMAX; +extern unsigned IOCTL_KIOCTRANS; +extern unsigned IOCTL_KIOCSETKEY; +extern unsigned IOCTL_KIOCGETKEY; +extern unsigned IOCTL_KIOCGTRANS; +extern unsigned IOCTL_KIOCCMD; +extern unsigned IOCTL_KIOCTYPE; +extern unsigned IOCTL_KIOCSDIRECT; +extern unsigned IOCTL_KIOCSKEY; +extern unsigned IOCTL_KIOCGKEY; +extern unsigned IOCTL_KIOCSLED; +extern unsigned IOCTL_KIOCGLED; +extern unsigned IOCTL_KIOCLAYOUT; +extern unsigned IOCTL_VUIDSFORMAT; +extern unsigned IOCTL_VUIDGFORMAT; +extern unsigned IOCTL_STICIO_GXINFO; +extern unsigned IOCTL_STICIO_RESET; +extern unsigned IOCTL_STICIO_STARTQ; +extern unsigned IOCTL_STICIO_STOPQ; +extern unsigned IOCTL_UKYOPON_IDENTIFY; +extern unsigned IOCTL_URIO_SEND_COMMAND; +extern unsigned IOCTL_URIO_RECV_COMMAND; +extern unsigned IOCTL_USB_REQUEST; +extern unsigned IOCTL_USB_SETDEBUG; +extern unsigned IOCTL_USB_DISCOVER; +extern unsigned IOCTL_USB_DEVICEINFO; +extern unsigned IOCTL_USB_DEVICEINFO_OLD; +extern unsigned IOCTL_USB_DEVICESTATS; +extern unsigned IOCTL_USB_GET_REPORT_DESC; +extern unsigned IOCTL_USB_SET_IMMED; +extern unsigned IOCTL_USB_GET_REPORT; +extern unsigned IOCTL_USB_SET_REPORT; +extern unsigned IOCTL_USB_GET_REPORT_ID; +extern unsigned IOCTL_USB_GET_CONFIG; +extern unsigned IOCTL_USB_SET_CONFIG; +extern unsigned IOCTL_USB_GET_ALTINTERFACE; +extern unsigned IOCTL_USB_SET_ALTINTERFACE; +extern unsigned IOCTL_USB_GET_NO_ALT; +extern unsigned IOCTL_USB_GET_DEVICE_DESC; +extern unsigned IOCTL_USB_GET_CONFIG_DESC; +extern unsigned IOCTL_USB_GET_INTERFACE_DESC; +extern unsigned IOCTL_USB_GET_ENDPOINT_DESC; +extern unsigned IOCTL_USB_GET_FULL_DESC; +extern unsigned IOCTL_USB_GET_STRING_DESC; +extern unsigned IOCTL_USB_DO_REQUEST; +extern unsigned IOCTL_USB_GET_DEVICEINFO; +extern unsigned IOCTL_USB_GET_DEVICEINFO_OLD; +extern unsigned IOCTL_USB_SET_SHORT_XFER; +extern unsigned IOCTL_USB_SET_TIMEOUT; +extern unsigned IOCTL_USB_SET_BULK_RA; +extern unsigned IOCTL_USB_SET_BULK_WB; +extern unsigned IOCTL_USB_SET_BULK_RA_OPT; +extern unsigned IOCTL_USB_SET_BULK_WB_OPT; +extern unsigned IOCTL_USB_GET_CM_OVER_DATA; +extern unsigned IOCTL_USB_SET_CM_OVER_DATA; +extern unsigned IOCTL_UTOPPYIOTURBO; +extern unsigned IOCTL_UTOPPYIOCANCEL; +extern unsigned IOCTL_UTOPPYIOREBOOT; +extern unsigned IOCTL_UTOPPYIOSTATS; +extern unsigned IOCTL_UTOPPYIORENAME; +extern unsigned IOCTL_UTOPPYIOMKDIR; +extern unsigned IOCTL_UTOPPYIODELETE; +extern unsigned IOCTL_UTOPPYIOREADDIR; +extern unsigned IOCTL_UTOPPYIOREADFILE; +extern unsigned IOCTL_UTOPPYIOWRITEFILE; +extern unsigned IOCTL_DIOSXDCMD; +extern unsigned IOCTL_VT_OPENQRY; +extern unsigned IOCTL_VT_SETMODE; +extern unsigned IOCTL_VT_GETMODE; +extern unsigned IOCTL_VT_RELDISP; +extern unsigned IOCTL_VT_ACTIVATE; +extern unsigned IOCTL_VT_WAITACTIVE; +extern unsigned IOCTL_VT_GETACTIVE; +extern unsigned IOCTL_VT_GETSTATE; +extern unsigned IOCTL_KDGETKBENT; +extern unsigned IOCTL_KDGKBMODE; +extern unsigned IOCTL_KDSKBMODE; +extern unsigned IOCTL_KDMKTONE; +extern unsigned IOCTL_KDSETMODE; +extern unsigned IOCTL_KDENABIO; +extern unsigned IOCTL_KDDISABIO; +extern unsigned IOCTL_KDGKBTYPE; +extern unsigned IOCTL_KDGETLED; +extern unsigned IOCTL_KDSETLED; +extern unsigned IOCTL_KDSETRAD; +extern unsigned IOCTL_VGAPCVTID; +extern unsigned IOCTL_CONS_GETVERS; +extern unsigned IOCTL_WSKBDIO_GTYPE; +extern unsigned IOCTL_WSKBDIO_BELL; +extern unsigned IOCTL_WSKBDIO_COMPLEXBELL; +extern unsigned IOCTL_WSKBDIO_SETBELL; +extern unsigned IOCTL_WSKBDIO_GETBELL; +extern unsigned IOCTL_WSKBDIO_SETDEFAULTBELL; +extern unsigned IOCTL_WSKBDIO_GETDEFAULTBELL; +extern unsigned IOCTL_WSKBDIO_SETKEYREPEAT; +extern unsigned IOCTL_WSKBDIO_GETKEYREPEAT; +extern unsigned IOCTL_WSKBDIO_SETDEFAULTKEYREPEAT; +extern unsigned IOCTL_WSKBDIO_GETDEFAULTKEYREPEAT; +extern unsigned IOCTL_WSKBDIO_SETLEDS; +extern unsigned IOCTL_WSKBDIO_GETLEDS; +extern unsigned IOCTL_WSKBDIO_GETMAP; +extern unsigned IOCTL_WSKBDIO_SETMAP; +extern unsigned IOCTL_WSKBDIO_GETENCODING; +extern unsigned IOCTL_WSKBDIO_SETENCODING; +extern unsigned IOCTL_WSKBDIO_SETMODE; +extern unsigned IOCTL_WSKBDIO_GETMODE; +extern unsigned IOCTL_WSKBDIO_SETKEYCLICK; +extern unsigned IOCTL_WSKBDIO_GETKEYCLICK; +extern unsigned IOCTL_WSKBDIO_GETSCROLL; +extern unsigned IOCTL_WSKBDIO_SETSCROLL; +extern unsigned IOCTL_WSKBDIO_SETVERSION; +extern unsigned IOCTL_WSMOUSEIO_GTYPE; +extern unsigned IOCTL_WSMOUSEIO_SRES; +extern unsigned IOCTL_WSMOUSEIO_SSCALE; +extern unsigned IOCTL_WSMOUSEIO_SRATE; +extern unsigned IOCTL_WSMOUSEIO_SCALIBCOORDS; +extern unsigned IOCTL_WSMOUSEIO_GCALIBCOORDS; +extern unsigned IOCTL_WSMOUSEIO_GETID; +extern unsigned IOCTL_WSMOUSEIO_GETREPEAT; +extern unsigned IOCTL_WSMOUSEIO_SETREPEAT; +extern unsigned IOCTL_WSMOUSEIO_SETVERSION; +extern unsigned IOCTL_WSDISPLAYIO_GTYPE; +extern unsigned IOCTL_WSDISPLAYIO_GINFO; +extern unsigned IOCTL_WSDISPLAYIO_GETCMAP; +extern unsigned IOCTL_WSDISPLAYIO_PUTCMAP; +extern unsigned IOCTL_WSDISPLAYIO_GVIDEO; +extern unsigned IOCTL_WSDISPLAYIO_SVIDEO; +extern unsigned IOCTL_WSDISPLAYIO_GCURPOS; +extern unsigned IOCTL_WSDISPLAYIO_SCURPOS; +extern unsigned IOCTL_WSDISPLAYIO_GCURMAX; +extern unsigned IOCTL_WSDISPLAYIO_GCURSOR; +extern unsigned IOCTL_WSDISPLAYIO_SCURSOR; +extern unsigned IOCTL_WSDISPLAYIO_GMODE; +extern unsigned IOCTL_WSDISPLAYIO_SMODE; +extern unsigned IOCTL_WSDISPLAYIO_LDFONT; +extern unsigned IOCTL_WSDISPLAYIO_ADDSCREEN; +extern unsigned IOCTL_WSDISPLAYIO_DELSCREEN; +extern unsigned IOCTL_WSDISPLAYIO_SFONT; +extern unsigned IOCTL__O_WSDISPLAYIO_SETKEYBOARD; +extern unsigned IOCTL_WSDISPLAYIO_GETPARAM; +extern unsigned IOCTL_WSDISPLAYIO_SETPARAM; +extern unsigned IOCTL_WSDISPLAYIO_GETACTIVESCREEN; +extern unsigned IOCTL_WSDISPLAYIO_GETWSCHAR; +extern unsigned IOCTL_WSDISPLAYIO_PUTWSCHAR; +extern unsigned IOCTL_WSDISPLAYIO_DGSCROLL; +extern unsigned IOCTL_WSDISPLAYIO_DSSCROLL; +extern unsigned IOCTL_WSDISPLAYIO_GMSGATTRS; +extern unsigned IOCTL_WSDISPLAYIO_SMSGATTRS; +extern unsigned IOCTL_WSDISPLAYIO_GBORDER; +extern unsigned IOCTL_WSDISPLAYIO_SBORDER; +extern unsigned IOCTL_WSDISPLAYIO_SSPLASH; +extern unsigned IOCTL_WSDISPLAYIO_SPROGRESS; +extern unsigned IOCTL_WSDISPLAYIO_LINEBYTES; +extern unsigned IOCTL_WSDISPLAYIO_SETVERSION; +extern unsigned IOCTL_WSMUXIO_ADD_DEVICE; +extern unsigned IOCTL_WSMUXIO_REMOVE_DEVICE; +extern unsigned IOCTL_WSMUXIO_LIST_DEVICES; +extern unsigned IOCTL_WSMUXIO_INJECTEVENT; +extern unsigned IOCTL_WSDISPLAYIO_GET_BUSID; +extern unsigned IOCTL_WSDISPLAYIO_GET_EDID; +extern unsigned IOCTL_WSDISPLAYIO_SET_POLLING; +extern unsigned IOCTL_WSDISPLAYIO_GET_FBINFO; +extern unsigned IOCTL_WSDISPLAYIO_DOBLIT; +extern unsigned IOCTL_WSDISPLAYIO_WAITBLIT; +extern unsigned IOCTL_BIOCLOCATE; +extern unsigned IOCTL_BIOCINQ; +extern unsigned IOCTL_BIOCDISK_NOVOL; +extern unsigned IOCTL_BIOCDISK; +extern unsigned IOCTL_BIOCVOL; +extern unsigned IOCTL_BIOCALARM; +extern unsigned IOCTL_BIOCBLINK; +extern unsigned IOCTL_BIOCSETSTATE; +extern unsigned IOCTL_BIOCVOLOPS; +extern unsigned IOCTL_MD_GETCONF; +extern unsigned IOCTL_MD_SETCONF; +extern unsigned IOCTL_CCDIOCSET; +extern unsigned IOCTL_CCDIOCCLR; +extern unsigned IOCTL_CGDIOCSET; +extern unsigned IOCTL_CGDIOCCLR; +extern unsigned IOCTL_CGDIOCGET; +extern unsigned IOCTL_FSSIOCSET; +extern unsigned IOCTL_FSSIOCGET; +extern unsigned IOCTL_FSSIOCCLR; +extern unsigned IOCTL_FSSIOFSET; +extern unsigned IOCTL_FSSIOFGET; +extern unsigned IOCTL_BTDEV_ATTACH; +extern unsigned IOCTL_BTDEV_DETACH; +extern unsigned IOCTL_BTSCO_GETINFO; +extern unsigned IOCTL_KTTCP_IO_SEND; +extern unsigned IOCTL_KTTCP_IO_RECV; +extern unsigned IOCTL_IOC_LOCKSTAT_GVERSION; +extern unsigned IOCTL_IOC_LOCKSTAT_ENABLE; +extern unsigned IOCTL_IOC_LOCKSTAT_DISABLE; +extern unsigned IOCTL_VNDIOCSET; +extern unsigned IOCTL_VNDIOCCLR; +extern unsigned IOCTL_VNDIOCGET; +extern unsigned IOCTL_SPKRTONE; +extern unsigned IOCTL_SPKRTUNE; +extern unsigned IOCTL_SPKRGETVOL; +extern unsigned IOCTL_SPKRSETVOL; +extern unsigned IOCTL_BIOCGBLEN; +extern unsigned IOCTL_BIOCSBLEN; +extern unsigned IOCTL_BIOCSETF; +extern unsigned IOCTL_BIOCFLUSH; +extern unsigned IOCTL_BIOCPROMISC; +extern unsigned IOCTL_BIOCGDLT; +extern unsigned IOCTL_BIOCGETIF; +extern unsigned IOCTL_BIOCSETIF; +extern unsigned IOCTL_BIOCGSTATS; +extern unsigned IOCTL_BIOCGSTATSOLD; +extern unsigned IOCTL_BIOCIMMEDIATE; +extern unsigned IOCTL_BIOCVERSION; +extern unsigned IOCTL_BIOCSTCPF; +extern unsigned IOCTL_BIOCSUDPF; +extern unsigned IOCTL_BIOCGHDRCMPLT; +extern unsigned IOCTL_BIOCSHDRCMPLT; +extern unsigned IOCTL_BIOCSDLT; +extern unsigned IOCTL_BIOCGDLTLIST; +extern unsigned IOCTL_BIOCGSEESENT; +extern unsigned IOCTL_BIOCSSEESENT; +extern unsigned IOCTL_BIOCSRTIMEOUT; +extern unsigned IOCTL_BIOCGRTIMEOUT; +extern unsigned IOCTL_BIOCGFEEDBACK; +extern unsigned IOCTL_BIOCSFEEDBACK; +extern unsigned IOCTL_SIOCRAWATM; +extern unsigned IOCTL_SIOCATMENA; +extern unsigned IOCTL_SIOCATMDIS; +extern unsigned IOCTL_SIOCSPVCTX; +extern unsigned IOCTL_SIOCGPVCTX; +extern unsigned IOCTL_SIOCSPVCSIF; +extern unsigned IOCTL_SIOCGPVCSIF; +extern unsigned IOCTL_GRESADDRS; +extern unsigned IOCTL_GRESADDRD; +extern unsigned IOCTL_GREGADDRS; +extern unsigned IOCTL_GREGADDRD; +extern unsigned IOCTL_GRESPROTO; +extern unsigned IOCTL_GREGPROTO; +extern unsigned IOCTL_GRESSOCK; +extern unsigned IOCTL_GREDSOCK; +extern unsigned IOCTL_PPPIOCGRAWIN; +extern unsigned IOCTL_PPPIOCGFLAGS; +extern unsigned IOCTL_PPPIOCSFLAGS; +extern unsigned IOCTL_PPPIOCGASYNCMAP; +extern unsigned IOCTL_PPPIOCSASYNCMAP; +extern unsigned IOCTL_PPPIOCGUNIT; +extern unsigned IOCTL_PPPIOCGRASYNCMAP; +extern unsigned IOCTL_PPPIOCSRASYNCMAP; +extern unsigned IOCTL_PPPIOCGMRU; +extern unsigned IOCTL_PPPIOCSMRU; +extern unsigned IOCTL_PPPIOCSMAXCID; +extern unsigned IOCTL_PPPIOCGXASYNCMAP; +extern unsigned IOCTL_PPPIOCSXASYNCMAP; +extern unsigned IOCTL_PPPIOCXFERUNIT; +extern unsigned IOCTL_PPPIOCSCOMPRESS; +extern unsigned IOCTL_PPPIOCGNPMODE; +extern unsigned IOCTL_PPPIOCSNPMODE; +extern unsigned IOCTL_PPPIOCGIDLE; +extern unsigned IOCTL_PPPIOCGMTU; +extern unsigned IOCTL_PPPIOCSMTU; +extern unsigned IOCTL_SIOCGPPPSTATS; +extern unsigned IOCTL_SIOCGPPPCSTATS; +extern unsigned IOCTL_IOC_NPF_VERSION; +extern unsigned IOCTL_IOC_NPF_SWITCH; +extern unsigned IOCTL_IOC_NPF_LOAD; +extern unsigned IOCTL_IOC_NPF_TABLE; +extern unsigned IOCTL_IOC_NPF_STATS; +extern unsigned IOCTL_IOC_NPF_SAVE; +extern unsigned IOCTL_IOC_NPF_RULE; +extern unsigned IOCTL_IOC_NPF_CONN_LOOKUP; +extern unsigned IOCTL_PPPOESETPARMS; +extern unsigned IOCTL_PPPOEGETPARMS; +extern unsigned IOCTL_PPPOEGETSESSION; +extern unsigned IOCTL_SPPPGETAUTHCFG; +extern unsigned IOCTL_SPPPSETAUTHCFG; +extern unsigned IOCTL_SPPPGETLCPCFG; +extern unsigned IOCTL_SPPPSETLCPCFG; +extern unsigned IOCTL_SPPPGETSTATUS; +extern unsigned IOCTL_SPPPGETSTATUSNCP; +extern unsigned IOCTL_SPPPGETIDLETO; +extern unsigned IOCTL_SPPPSETIDLETO; +extern unsigned IOCTL_SPPPGETAUTHFAILURES; +extern unsigned IOCTL_SPPPSETAUTHFAILURE; +extern unsigned IOCTL_SPPPSETDNSOPTS; +extern unsigned IOCTL_SPPPGETDNSOPTS; +extern unsigned IOCTL_SPPPGETDNSADDRS; +extern unsigned IOCTL_SPPPSETKEEPALIVE; +extern unsigned IOCTL_SPPPGETKEEPALIVE; +extern unsigned IOCTL_SRT_GETNRT; +extern unsigned IOCTL_SRT_GETRT; +extern unsigned IOCTL_SRT_SETRT; +extern unsigned IOCTL_SRT_DELRT; +extern unsigned IOCTL_SRT_SFLAGS; +extern unsigned IOCTL_SRT_GFLAGS; +extern unsigned IOCTL_SRT_SGFLAGS; +extern unsigned IOCTL_SRT_DEBUG; +extern unsigned IOCTL_TAPGIFNAME; +extern unsigned IOCTL_TUNSDEBUG; +extern unsigned IOCTL_TUNGDEBUG; +extern unsigned IOCTL_TUNSIFMODE; +extern unsigned IOCTL_TUNSLMODE; +extern unsigned IOCTL_TUNSIFHEAD; +extern unsigned IOCTL_TUNGIFHEAD; +extern unsigned IOCTL_DIOCSTART; +extern unsigned IOCTL_DIOCSTOP; +extern unsigned IOCTL_DIOCADDRULE; +extern unsigned IOCTL_DIOCGETRULES; +extern unsigned IOCTL_DIOCGETRULE; +extern unsigned IOCTL_DIOCSETLCK; +extern unsigned IOCTL_DIOCCLRSTATES; +extern unsigned IOCTL_DIOCGETSTATE; +extern unsigned IOCTL_DIOCSETSTATUSIF; +extern unsigned IOCTL_DIOCGETSTATUS; +extern unsigned IOCTL_DIOCCLRSTATUS; +extern unsigned IOCTL_DIOCNATLOOK; +extern unsigned IOCTL_DIOCSETDEBUG; +extern unsigned IOCTL_DIOCGETSTATES; +extern unsigned IOCTL_DIOCCHANGERULE; +extern unsigned IOCTL_DIOCSETTIMEOUT; +extern unsigned IOCTL_DIOCGETTIMEOUT; +extern unsigned IOCTL_DIOCADDSTATE; +extern unsigned IOCTL_DIOCCLRRULECTRS; +extern unsigned IOCTL_DIOCGETLIMIT; +extern unsigned IOCTL_DIOCSETLIMIT; +extern unsigned IOCTL_DIOCKILLSTATES; +extern unsigned IOCTL_DIOCSTARTALTQ; +extern unsigned IOCTL_DIOCSTOPALTQ; +extern unsigned IOCTL_DIOCADDALTQ; +extern unsigned IOCTL_DIOCGETALTQS; +extern unsigned IOCTL_DIOCGETALTQ; +extern unsigned IOCTL_DIOCCHANGEALTQ; +extern unsigned IOCTL_DIOCGETQSTATS; +extern unsigned IOCTL_DIOCBEGINADDRS; +extern unsigned IOCTL_DIOCADDADDR; +extern unsigned IOCTL_DIOCGETADDRS; +extern unsigned IOCTL_DIOCGETADDR; +extern unsigned IOCTL_DIOCCHANGEADDR; +extern unsigned IOCTL_DIOCADDSTATES; +extern unsigned IOCTL_DIOCGETRULESETS; +extern unsigned IOCTL_DIOCGETRULESET; +extern unsigned IOCTL_DIOCRCLRTABLES; +extern unsigned IOCTL_DIOCRADDTABLES; +extern unsigned IOCTL_DIOCRDELTABLES; +extern unsigned IOCTL_DIOCRGETTABLES; +extern unsigned IOCTL_DIOCRGETTSTATS; +extern unsigned IOCTL_DIOCRCLRTSTATS; +extern unsigned IOCTL_DIOCRCLRADDRS; +extern unsigned IOCTL_DIOCRADDADDRS; +extern unsigned IOCTL_DIOCRDELADDRS; +extern unsigned IOCTL_DIOCRSETADDRS; +extern unsigned IOCTL_DIOCRGETADDRS; +extern unsigned IOCTL_DIOCRGETASTATS; +extern unsigned IOCTL_DIOCRCLRASTATS; +extern unsigned IOCTL_DIOCRTSTADDRS; +extern unsigned IOCTL_DIOCRSETTFLAGS; +extern unsigned IOCTL_DIOCRINADEFINE; +extern unsigned IOCTL_DIOCOSFPFLUSH; +extern unsigned IOCTL_DIOCOSFPADD; +extern unsigned IOCTL_DIOCOSFPGET; +extern unsigned IOCTL_DIOCXBEGIN; +extern unsigned IOCTL_DIOCXCOMMIT; +extern unsigned IOCTL_DIOCXROLLBACK; +extern unsigned IOCTL_DIOCGETSRCNODES; +extern unsigned IOCTL_DIOCCLRSRCNODES; +extern unsigned IOCTL_DIOCSETHOSTID; +extern unsigned IOCTL_DIOCIGETIFACES; +extern unsigned IOCTL_DIOCSETIFFLAG; +extern unsigned IOCTL_DIOCCLRIFFLAG; +extern unsigned IOCTL_DIOCKILLSRCNODES; +extern unsigned IOCTL_SLIOCGUNIT; +extern unsigned IOCTL_SIOCGBTINFO; +extern unsigned IOCTL_SIOCGBTINFOA; +extern unsigned IOCTL_SIOCNBTINFO; +extern unsigned IOCTL_SIOCSBTFLAGS; +extern unsigned IOCTL_SIOCSBTPOLICY; +extern unsigned IOCTL_SIOCSBTPTYPE; +extern unsigned IOCTL_SIOCGBTSTATS; +extern unsigned IOCTL_SIOCZBTSTATS; +extern unsigned IOCTL_SIOCBTDUMP; +extern unsigned IOCTL_SIOCSBTSCOMTU; +extern unsigned IOCTL_SIOCGBTFEAT; +extern unsigned IOCTL_SIOCADNAT; +extern unsigned IOCTL_SIOCRMNAT; +extern unsigned IOCTL_SIOCGNATS; +extern unsigned IOCTL_SIOCGNATL; +extern unsigned IOCTL_SIOCPURGENAT; +extern unsigned IOCTL_SIOCSIFINFO_FLAGS; +extern unsigned IOCTL_SIOCAADDRCTL_POLICY; +extern unsigned IOCTL_SIOCDADDRCTL_POLICY; +extern unsigned IOCTL_SMBIOC_OPENSESSION; +extern unsigned IOCTL_SMBIOC_OPENSHARE; +extern unsigned IOCTL_SMBIOC_REQUEST; +extern unsigned IOCTL_SMBIOC_SETFLAGS; +extern unsigned IOCTL_SMBIOC_LOOKUP; +extern unsigned IOCTL_SMBIOC_READ; +extern unsigned IOCTL_SMBIOC_WRITE; +extern unsigned IOCTL_AGPIOC_INFO; +extern unsigned IOCTL_AGPIOC_ACQUIRE; +extern unsigned IOCTL_AGPIOC_RELEASE; +extern unsigned IOCTL_AGPIOC_SETUP; +extern unsigned IOCTL_AGPIOC_ALLOCATE; +extern unsigned IOCTL_AGPIOC_DEALLOCATE; +extern unsigned IOCTL_AGPIOC_BIND; +extern unsigned IOCTL_AGPIOC_UNBIND; +extern unsigned IOCTL_AUDIO_GETINFO; +extern unsigned IOCTL_AUDIO_SETINFO; +extern unsigned IOCTL_AUDIO_DRAIN; +extern unsigned IOCTL_AUDIO_FLUSH; +extern unsigned IOCTL_AUDIO_WSEEK; +extern unsigned IOCTL_AUDIO_RERROR; +extern unsigned IOCTL_AUDIO_GETDEV; +extern unsigned IOCTL_AUDIO_GETENC; +extern unsigned IOCTL_AUDIO_GETFD; +extern unsigned IOCTL_AUDIO_SETFD; +extern unsigned IOCTL_AUDIO_PERROR; +extern unsigned IOCTL_AUDIO_GETIOFFS; +extern unsigned IOCTL_AUDIO_GETOOFFS; +extern unsigned IOCTL_AUDIO_GETPROPS; +extern unsigned IOCTL_AUDIO_GETBUFINFO; +extern unsigned IOCTL_AUDIO_SETCHAN; +extern unsigned IOCTL_AUDIO_GETCHAN; +extern unsigned IOCTL_AUDIO_MIXER_READ; +extern unsigned IOCTL_AUDIO_MIXER_WRITE; +extern unsigned IOCTL_AUDIO_MIXER_DEVINFO; +extern unsigned IOCTL_ATAIOCCOMMAND; +extern unsigned IOCTL_ATABUSIOSCAN; +extern unsigned IOCTL_ATABUSIORESET; +extern unsigned IOCTL_ATABUSIODETACH; +extern unsigned IOCTL_CDIOCPLAYTRACKS; +extern unsigned IOCTL_CDIOCPLAYBLOCKS; +extern unsigned IOCTL_CDIOCREADSUBCHANNEL; +extern unsigned IOCTL_CDIOREADTOCHEADER; +extern unsigned IOCTL_CDIOREADTOCENTRIES; +extern unsigned IOCTL_CDIOREADMSADDR; +extern unsigned IOCTL_CDIOCSETPATCH; +extern unsigned IOCTL_CDIOCGETVOL; +extern unsigned IOCTL_CDIOCSETVOL; +extern unsigned IOCTL_CDIOCSETMONO; +extern unsigned IOCTL_CDIOCSETSTEREO; +extern unsigned IOCTL_CDIOCSETMUTE; +extern unsigned IOCTL_CDIOCSETLEFT; +extern unsigned IOCTL_CDIOCSETRIGHT; +extern unsigned IOCTL_CDIOCSETDEBUG; +extern unsigned IOCTL_CDIOCCLRDEBUG; +extern unsigned IOCTL_CDIOCPAUSE; +extern unsigned IOCTL_CDIOCRESUME; +extern unsigned IOCTL_CDIOCRESET; +extern unsigned IOCTL_CDIOCSTART; +extern unsigned IOCTL_CDIOCSTOP; +extern unsigned IOCTL_CDIOCEJECT; +extern unsigned IOCTL_CDIOCALLOW; +extern unsigned IOCTL_CDIOCPREVENT; +extern unsigned IOCTL_CDIOCCLOSE; +extern unsigned IOCTL_CDIOCPLAYMSF; +extern unsigned IOCTL_CDIOCLOADUNLOAD; +extern unsigned IOCTL_CHIOMOVE; +extern unsigned IOCTL_CHIOEXCHANGE; +extern unsigned IOCTL_CHIOPOSITION; +extern unsigned IOCTL_CHIOGPICKER; +extern unsigned IOCTL_CHIOSPICKER; +extern unsigned IOCTL_CHIOGPARAMS; +extern unsigned IOCTL_CHIOIELEM; +extern unsigned IOCTL_OCHIOGSTATUS; +extern unsigned IOCTL_CHIOGSTATUS; +extern unsigned IOCTL_CHIOSVOLTAG; +extern unsigned IOCTL_CLOCKCTL_SETTIMEOFDAY; +extern unsigned IOCTL_CLOCKCTL_ADJTIME; +extern unsigned IOCTL_CLOCKCTL_CLOCK_SETTIME; +extern unsigned IOCTL_CLOCKCTL_NTP_ADJTIME; +extern unsigned IOCTL_IOC_CPU_SETSTATE; +extern unsigned IOCTL_IOC_CPU_GETSTATE; +extern unsigned IOCTL_IOC_CPU_GETCOUNT; +extern unsigned IOCTL_IOC_CPU_MAPID; +extern unsigned IOCTL_IOC_CPU_UCODE_GET_VERSION; +extern unsigned IOCTL_IOC_CPU_UCODE_APPLY; +extern unsigned IOCTL_DIOCGDINFO; +extern unsigned IOCTL_DIOCSDINFO; +extern unsigned IOCTL_DIOCWDINFO; +extern unsigned IOCTL_DIOCRFORMAT; +extern unsigned IOCTL_DIOCWFORMAT; +extern unsigned IOCTL_DIOCSSTEP; +extern unsigned IOCTL_DIOCSRETRIES; +extern unsigned IOCTL_DIOCKLABEL; +extern unsigned IOCTL_DIOCWLABEL; +extern unsigned IOCTL_DIOCSBAD; +extern unsigned IOCTL_DIOCEJECT; +extern unsigned IOCTL_ODIOCEJECT; +extern unsigned IOCTL_DIOCLOCK; +extern unsigned IOCTL_DIOCGDEFLABEL; +extern unsigned IOCTL_DIOCCLRLABEL; +extern unsigned IOCTL_DIOCGCACHE; +extern unsigned IOCTL_DIOCSCACHE; +extern unsigned IOCTL_DIOCCACHESYNC; +extern unsigned IOCTL_DIOCBSLIST; +extern unsigned IOCTL_DIOCBSFLUSH; +extern unsigned IOCTL_DIOCAWEDGE; +extern unsigned IOCTL_DIOCGWEDGEINFO; +extern unsigned IOCTL_DIOCDWEDGE; +extern unsigned IOCTL_DIOCLWEDGES; +extern unsigned IOCTL_DIOCGSTRATEGY; +extern unsigned IOCTL_DIOCSSTRATEGY; +extern unsigned IOCTL_DIOCGDISKINFO; +extern unsigned IOCTL_DIOCTUR; +extern unsigned IOCTL_DIOCMWEDGES; +extern unsigned IOCTL_DIOCGSECTORSIZE; +extern unsigned IOCTL_DIOCGMEDIASIZE; +extern unsigned IOCTL_DRVDETACHDEV; +extern unsigned IOCTL_DRVRESCANBUS; +extern unsigned IOCTL_DRVCTLCOMMAND; +extern unsigned IOCTL_DRVRESUMEDEV; +extern unsigned IOCTL_DRVLISTDEV; +extern unsigned IOCTL_DRVGETEVENT; +extern unsigned IOCTL_DRVSUSPENDDEV; +extern unsigned IOCTL_DVD_READ_STRUCT; +extern unsigned IOCTL_DVD_WRITE_STRUCT; +extern unsigned IOCTL_DVD_AUTH; +extern unsigned IOCTL_ENVSYS_GETDICTIONARY; +extern unsigned IOCTL_ENVSYS_SETDICTIONARY; +extern unsigned IOCTL_ENVSYS_REMOVEPROPS; +extern unsigned IOCTL_ENVSYS_GTREDATA; +extern unsigned IOCTL_ENVSYS_GTREINFO; +extern unsigned IOCTL_KFILTER_BYFILTER; +extern unsigned IOCTL_KFILTER_BYNAME; +extern unsigned IOCTL_FDIOCGETOPTS; +extern unsigned IOCTL_FDIOCSETOPTS; +extern unsigned IOCTL_FDIOCSETFORMAT; +extern unsigned IOCTL_FDIOCGETFORMAT; +extern unsigned IOCTL_FDIOCFORMAT_TRACK; extern unsigned IOCTL_FIOCLEX; -extern unsigned IOCTL_FIOGETOWN; -extern unsigned IOCTL_FIONBIO; extern unsigned IOCTL_FIONCLEX; +extern unsigned IOCTL_FIONREAD; +extern unsigned IOCTL_FIONBIO; +extern unsigned IOCTL_FIOASYNC; extern unsigned IOCTL_FIOSETOWN; -extern unsigned IOCTL_SIOCADDMULTI; +extern unsigned IOCTL_FIOGETOWN; +extern unsigned IOCTL_OFIOGETBMAP; +extern unsigned IOCTL_FIOGETBMAP; +extern unsigned IOCTL_FIONWRITE; +extern unsigned IOCTL_FIONSPACE; +extern unsigned IOCTL_GPIOINFO; +extern unsigned IOCTL_GPIOSET; +extern unsigned IOCTL_GPIOUNSET; +extern unsigned IOCTL_GPIOREAD; +extern unsigned IOCTL_GPIOWRITE; +extern unsigned IOCTL_GPIOTOGGLE; +extern unsigned IOCTL_GPIOATTACH; +extern unsigned IOCTL_PTIOCNETBSD; +extern unsigned IOCTL_PTIOCSUNOS; +extern unsigned IOCTL_PTIOCLINUX; +extern unsigned IOCTL_PTIOCFREEBSD; +extern unsigned IOCTL_PTIOCULTRIX; +extern unsigned IOCTL_TIOCHPCL; +extern unsigned IOCTL_TIOCGETP; +extern unsigned IOCTL_TIOCSETP; +extern unsigned IOCTL_TIOCSETN; +extern unsigned IOCTL_TIOCSETC; +extern unsigned IOCTL_TIOCGETC; +extern unsigned IOCTL_TIOCLBIS; +extern unsigned IOCTL_TIOCLBIC; +extern unsigned IOCTL_TIOCLSET; +extern unsigned IOCTL_TIOCLGET; +extern unsigned IOCTL_TIOCSLTC; +extern unsigned IOCTL_TIOCGLTC; +extern unsigned IOCTL_OTIOCCONS; +extern unsigned IOCTL_JOY_SETTIMEOUT; +extern unsigned IOCTL_JOY_GETTIMEOUT; +extern unsigned IOCTL_JOY_SET_X_OFFSET; +extern unsigned IOCTL_JOY_SET_Y_OFFSET; +extern unsigned IOCTL_JOY_GET_X_OFFSET; +extern unsigned IOCTL_JOY_GET_Y_OFFSET; +extern unsigned IOCTL_OKIOCGSYMBOL; +extern unsigned IOCTL_OKIOCGVALUE; +extern unsigned IOCTL_KIOCGSIZE; +extern unsigned IOCTL_KIOCGVALUE; +extern unsigned IOCTL_KIOCGSYMBOL; +extern unsigned IOCTL_LUAINFO; +extern unsigned IOCTL_LUACREATE; +extern unsigned IOCTL_LUADESTROY; +extern unsigned IOCTL_LUAREQUIRE; +extern unsigned IOCTL_LUALOAD; +extern unsigned IOCTL_MIDI_PRETIME; +extern unsigned IOCTL_MIDI_MPUMODE; +extern unsigned IOCTL_MIDI_MPUCMD; +extern unsigned IOCTL_SEQUENCER_RESET; +extern unsigned IOCTL_SEQUENCER_SYNC; +extern unsigned IOCTL_SEQUENCER_INFO; +extern unsigned IOCTL_SEQUENCER_CTRLRATE; +extern unsigned IOCTL_SEQUENCER_GETOUTCOUNT; +extern unsigned IOCTL_SEQUENCER_GETINCOUNT; +extern unsigned IOCTL_SEQUENCER_RESETSAMPLES; +extern unsigned IOCTL_SEQUENCER_NRSYNTHS; +extern unsigned IOCTL_SEQUENCER_NRMIDIS; +extern unsigned IOCTL_SEQUENCER_THRESHOLD; +extern unsigned IOCTL_SEQUENCER_MEMAVL; +extern unsigned IOCTL_SEQUENCER_PANIC; +extern unsigned IOCTL_SEQUENCER_OUTOFBAND; +extern unsigned IOCTL_SEQUENCER_GETTIME; +extern unsigned IOCTL_SEQUENCER_TMR_TIMEBASE; +extern unsigned IOCTL_SEQUENCER_TMR_START; +extern unsigned IOCTL_SEQUENCER_TMR_STOP; +extern unsigned IOCTL_SEQUENCER_TMR_CONTINUE; +extern unsigned IOCTL_SEQUENCER_TMR_TEMPO; +extern unsigned IOCTL_SEQUENCER_TMR_SOURCE; +extern unsigned IOCTL_SEQUENCER_TMR_METRONOME; +extern unsigned IOCTL_SEQUENCER_TMR_SELECT; +extern unsigned IOCTL_MTIOCTOP; +extern unsigned IOCTL_MTIOCGET; +extern unsigned IOCTL_MTIOCIEOT; +extern unsigned IOCTL_MTIOCEEOT; +extern unsigned IOCTL_MTIOCRDSPOS; +extern unsigned IOCTL_MTIOCRDHPOS; +extern unsigned IOCTL_MTIOCSLOCATE; +extern unsigned IOCTL_MTIOCHLOCATE; +extern unsigned IOCTL_POWER_EVENT_RECVDICT; +extern unsigned IOCTL_POWER_IOC_GET_TYPE; +extern unsigned IOCTL_POWER_IOC_GET_TYPE_WITH_LOSSAGE; +extern unsigned IOCTL_RIOCGINFO; +extern unsigned IOCTL_RIOCSINFO; +extern unsigned IOCTL_RIOCSSRCH; +extern unsigned IOCTL_RNDGETENTCNT; +extern unsigned IOCTL_RNDGETSRCNUM; +extern unsigned IOCTL_RNDGETSRCNAME; +extern unsigned IOCTL_RNDCTL; +extern unsigned IOCTL_RNDADDDATA; +extern unsigned IOCTL_RNDGETPOOLSTAT; +extern unsigned IOCTL_RNDGETESTNUM; +extern unsigned IOCTL_RNDGETESTNAME; +extern unsigned IOCTL_SCIOCGET; +extern unsigned IOCTL_SCIOCSET; +extern unsigned IOCTL_SCIOCRESTART; +extern unsigned IOCTL_SCIOC_USE_ADF; +extern unsigned IOCTL_SCIOCCOMMAND; +extern unsigned IOCTL_SCIOCDEBUG; +extern unsigned IOCTL_SCIOCIDENTIFY; +extern unsigned IOCTL_OSCIOCIDENTIFY; +extern unsigned IOCTL_SCIOCDECONFIG; +extern unsigned IOCTL_SCIOCRECONFIG; +extern unsigned IOCTL_SCIOCRESET; +extern unsigned IOCTL_SCBUSIOSCAN; +extern unsigned IOCTL_SCBUSIORESET; +extern unsigned IOCTL_SCBUSIODETACH; +extern unsigned IOCTL_SCBUSACCEL; +extern unsigned IOCTL_SCBUSIOLLSCAN; +extern unsigned IOCTL_SIOCSHIWAT; +extern unsigned IOCTL_SIOCGHIWAT; +extern unsigned IOCTL_SIOCSLOWAT; +extern unsigned IOCTL_SIOCGLOWAT; extern unsigned IOCTL_SIOCATMARK; -extern unsigned IOCTL_SIOCDELMULTI; -extern unsigned IOCTL_SIOCGIFADDR; -extern unsigned IOCTL_SIOCGIFBRDADDR; -extern unsigned IOCTL_SIOCGIFCONF; -extern unsigned IOCTL_SIOCGIFDSTADDR; -extern unsigned IOCTL_SIOCGIFFLAGS; -extern unsigned IOCTL_SIOCGIFMETRIC; -extern unsigned IOCTL_SIOCGIFMTU; -extern unsigned IOCTL_SIOCGIFNETMASK; +extern unsigned IOCTL_SIOCSPGRP; extern unsigned IOCTL_SIOCGPGRP; +extern unsigned IOCTL_SIOCADDRT; +extern unsigned IOCTL_SIOCDELRT; extern unsigned IOCTL_SIOCSIFADDR; -extern unsigned IOCTL_SIOCSIFBRDADDR; +extern unsigned IOCTL_SIOCGIFADDR; extern unsigned IOCTL_SIOCSIFDSTADDR; +extern unsigned IOCTL_SIOCGIFDSTADDR; extern unsigned IOCTL_SIOCSIFFLAGS; +extern unsigned IOCTL_SIOCGIFFLAGS; +extern unsigned IOCTL_SIOCGIFBRDADDR; +extern unsigned IOCTL_SIOCSIFBRDADDR; +extern unsigned IOCTL_SIOCGIFCONF; +extern unsigned IOCTL_SIOCGIFNETMASK; +extern unsigned IOCTL_SIOCSIFNETMASK; +extern unsigned IOCTL_SIOCGIFMETRIC; extern unsigned IOCTL_SIOCSIFMETRIC; +extern unsigned IOCTL_SIOCDIFADDR; +extern unsigned IOCTL_SIOCAIFADDR; +extern unsigned IOCTL_SIOCGIFALIAS; +extern unsigned IOCTL_SIOCGIFAFLAG_IN; +extern unsigned IOCTL_SIOCALIFADDR; +extern unsigned IOCTL_SIOCGLIFADDR; +extern unsigned IOCTL_SIOCDLIFADDR; +extern unsigned IOCTL_SIOCSIFADDRPREF; +extern unsigned IOCTL_SIOCGIFADDRPREF; +extern unsigned IOCTL_SIOCADDMULTI; +extern unsigned IOCTL_SIOCDELMULTI; +extern unsigned IOCTL_SIOCGETVIFCNT; +extern unsigned IOCTL_SIOCGETSGCNT; +extern unsigned IOCTL_SIOCSIFMEDIA; +extern unsigned IOCTL_SIOCGIFMEDIA; +extern unsigned IOCTL_SIOCSIFGENERIC; +extern unsigned IOCTL_SIOCGIFGENERIC; +extern unsigned IOCTL_SIOCSIFPHYADDR; +extern unsigned IOCTL_SIOCGIFPSRCADDR; +extern unsigned IOCTL_SIOCGIFPDSTADDR; +extern unsigned IOCTL_SIOCDIFPHYADDR; +extern unsigned IOCTL_SIOCSLIFPHYADDR; +extern unsigned IOCTL_SIOCGLIFPHYADDR; extern unsigned IOCTL_SIOCSIFMTU; -extern unsigned IOCTL_SIOCSIFNETMASK; -extern unsigned IOCTL_SIOCSPGRP; -extern unsigned IOCTL_TIOCCONS; +extern unsigned IOCTL_SIOCGIFMTU; +extern unsigned IOCTL_SIOCSDRVSPEC; +extern unsigned IOCTL_SIOCGDRVSPEC; +extern unsigned IOCTL_SIOCIFCREATE; +extern unsigned IOCTL_SIOCIFDESTROY; +extern unsigned IOCTL_SIOCIFGCLONERS; +extern unsigned IOCTL_SIOCGIFDLT; +extern unsigned IOCTL_SIOCGIFCAP; +extern unsigned IOCTL_SIOCSIFCAP; +extern unsigned IOCTL_SIOCSVH; +extern unsigned IOCTL_SIOCGVH; +extern unsigned IOCTL_SIOCINITIFADDR; +extern unsigned IOCTL_SIOCGIFDATA; +extern unsigned IOCTL_SIOCZIFDATA; +extern unsigned IOCTL_SIOCGLINKSTR; +extern unsigned IOCTL_SIOCSLINKSTR; +extern unsigned IOCTL_SIOCGETHERCAP; +extern unsigned IOCTL_SIOCGIFINDEX; +extern unsigned IOCTL_SIOCSETPFSYNC; +extern unsigned IOCTL_SIOCGETPFSYNC; +extern unsigned IOCTL_PPS_IOC_CREATE; +extern unsigned IOCTL_PPS_IOC_DESTROY; +extern unsigned IOCTL_PPS_IOC_SETPARAMS; +extern unsigned IOCTL_PPS_IOC_GETPARAMS; +extern unsigned IOCTL_PPS_IOC_GETCAP; +extern unsigned IOCTL_PPS_IOC_FETCH; +extern unsigned IOCTL_PPS_IOC_KCBIND; extern unsigned IOCTL_TIOCEXCL; -extern unsigned IOCTL_TIOCGETD; -extern unsigned IOCTL_TIOCGPGRP; -extern unsigned IOCTL_TIOCGWINSZ; -extern unsigned IOCTL_TIOCMBIC; -extern unsigned IOCTL_TIOCMBIS; -extern unsigned IOCTL_TIOCMGET; -extern unsigned IOCTL_TIOCMSET; -extern unsigned IOCTL_TIOCNOTTY; extern unsigned IOCTL_TIOCNXCL; -extern unsigned IOCTL_TIOCOUTQ; -extern unsigned IOCTL_TIOCPKT; -extern unsigned IOCTL_TIOCSCTTY; +extern unsigned IOCTL_TIOCFLUSH; +extern unsigned IOCTL_TIOCGETA; +extern unsigned IOCTL_TIOCSETA; +extern unsigned IOCTL_TIOCSETAW; +extern unsigned IOCTL_TIOCSETAF; +extern unsigned IOCTL_TIOCGETD; extern unsigned IOCTL_TIOCSETD; +extern unsigned IOCTL_TIOCGLINED; +extern unsigned IOCTL_TIOCSLINED; +extern unsigned IOCTL_TIOCSBRK; +extern unsigned IOCTL_TIOCCBRK; +extern unsigned IOCTL_TIOCSDTR; +extern unsigned IOCTL_TIOCCDTR; +extern unsigned IOCTL_TIOCGPGRP; extern unsigned IOCTL_TIOCSPGRP; +extern unsigned IOCTL_TIOCOUTQ; extern unsigned IOCTL_TIOCSTI; +extern unsigned IOCTL_TIOCNOTTY; +extern unsigned IOCTL_TIOCPKT; +extern unsigned IOCTL_TIOCSTOP; +extern unsigned IOCTL_TIOCSTART; +extern unsigned IOCTL_TIOCMSET; +extern unsigned IOCTL_TIOCMBIS; +extern unsigned IOCTL_TIOCMBIC; +extern unsigned IOCTL_TIOCMGET; +extern unsigned IOCTL_TIOCREMOTE; +extern unsigned IOCTL_TIOCGWINSZ; extern unsigned IOCTL_TIOCSWINSZ; -extern unsigned IOCTL_SIOCGETSGCNT; -extern unsigned IOCTL_SIOCGETVIFCNT; -extern unsigned IOCTL_MTIOCGET; -extern unsigned IOCTL_MTIOCTOP; -extern unsigned IOCTL_SIOCADDRT; -extern unsigned IOCTL_SIOCDELRT; -extern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE; -extern unsigned IOCTL_SNDCTL_DSP_GETFMTS; -extern unsigned IOCTL_SNDCTL_DSP_NONBLOCK; -extern unsigned IOCTL_SNDCTL_DSP_POST; +extern unsigned IOCTL_TIOCUCNTL; +extern unsigned IOCTL_TIOCSTAT; +extern unsigned IOCTL_TIOCGSID; +extern unsigned IOCTL_TIOCCONS; +extern unsigned IOCTL_TIOCSCTTY; +extern unsigned IOCTL_TIOCEXT; +extern unsigned IOCTL_TIOCSIG; +extern unsigned IOCTL_TIOCDRAIN; +extern unsigned IOCTL_TIOCGFLAGS; +extern unsigned IOCTL_TIOCSFLAGS; +extern unsigned IOCTL_TIOCDCDTIMESTAMP; +extern unsigned IOCTL_TIOCRCVFRAME; +extern unsigned IOCTL_TIOCXMTFRAME; +extern unsigned IOCTL_TIOCPTMGET; +extern unsigned IOCTL_TIOCGRANTPT; +extern unsigned IOCTL_TIOCPTSNAME; +extern unsigned IOCTL_TIOCSQSIZE; +extern unsigned IOCTL_TIOCGQSIZE; +extern unsigned IOCTL_VERIEXEC_LOAD; +extern unsigned IOCTL_VERIEXEC_TABLESIZE; +extern unsigned IOCTL_VERIEXEC_DELETE; +extern unsigned IOCTL_VERIEXEC_QUERY; +extern unsigned IOCTL_VERIEXEC_DUMP; +extern unsigned IOCTL_VERIEXEC_FLUSH; +extern unsigned IOCTL_VIDIOC_QUERYCAP; +extern unsigned IOCTL_VIDIOC_RESERVED; +extern unsigned IOCTL_VIDIOC_ENUM_FMT; +extern unsigned IOCTL_VIDIOC_G_FMT; +extern unsigned IOCTL_VIDIOC_S_FMT; +extern unsigned IOCTL_VIDIOC_REQBUFS; +extern unsigned IOCTL_VIDIOC_QUERYBUF; +extern unsigned IOCTL_VIDIOC_G_FBUF; +extern unsigned IOCTL_VIDIOC_S_FBUF; +extern unsigned IOCTL_VIDIOC_OVERLAY; +extern unsigned IOCTL_VIDIOC_QBUF; +extern unsigned IOCTL_VIDIOC_DQBUF; +extern unsigned IOCTL_VIDIOC_STREAMON; +extern unsigned IOCTL_VIDIOC_STREAMOFF; +extern unsigned IOCTL_VIDIOC_G_PARM; +extern unsigned IOCTL_VIDIOC_S_PARM; +extern unsigned IOCTL_VIDIOC_G_STD; +extern unsigned IOCTL_VIDIOC_S_STD; +extern unsigned IOCTL_VIDIOC_ENUMSTD; +extern unsigned IOCTL_VIDIOC_ENUMINPUT; +extern unsigned IOCTL_VIDIOC_G_CTRL; +extern unsigned IOCTL_VIDIOC_S_CTRL; +extern unsigned IOCTL_VIDIOC_G_TUNER; +extern unsigned IOCTL_VIDIOC_S_TUNER; +extern unsigned IOCTL_VIDIOC_G_AUDIO; +extern unsigned IOCTL_VIDIOC_S_AUDIO; +extern unsigned IOCTL_VIDIOC_QUERYCTRL; +extern unsigned IOCTL_VIDIOC_QUERYMENU; +extern unsigned IOCTL_VIDIOC_G_INPUT; +extern unsigned IOCTL_VIDIOC_S_INPUT; +extern unsigned IOCTL_VIDIOC_G_OUTPUT; +extern unsigned IOCTL_VIDIOC_S_OUTPUT; +extern unsigned IOCTL_VIDIOC_ENUMOUTPUT; +extern unsigned IOCTL_VIDIOC_G_AUDOUT; +extern unsigned IOCTL_VIDIOC_S_AUDOUT; +extern unsigned IOCTL_VIDIOC_G_MODULATOR; +extern unsigned IOCTL_VIDIOC_S_MODULATOR; +extern unsigned IOCTL_VIDIOC_G_FREQUENCY; +extern unsigned IOCTL_VIDIOC_S_FREQUENCY; +extern unsigned IOCTL_VIDIOC_CROPCAP; +extern unsigned IOCTL_VIDIOC_G_CROP; +extern unsigned IOCTL_VIDIOC_S_CROP; +extern unsigned IOCTL_VIDIOC_G_JPEGCOMP; +extern unsigned IOCTL_VIDIOC_S_JPEGCOMP; +extern unsigned IOCTL_VIDIOC_QUERYSTD; +extern unsigned IOCTL_VIDIOC_TRY_FMT; +extern unsigned IOCTL_VIDIOC_ENUMAUDIO; +extern unsigned IOCTL_VIDIOC_ENUMAUDOUT; +extern unsigned IOCTL_VIDIOC_G_PRIORITY; +extern unsigned IOCTL_VIDIOC_S_PRIORITY; +extern unsigned IOCTL_VIDIOC_ENUM_FRAMESIZES; +extern unsigned IOCTL_VIDIOC_ENUM_FRAMEINTERVALS; +extern unsigned IOCTL_WDOGIOC_GMODE; +extern unsigned IOCTL_WDOGIOC_SMODE; +extern unsigned IOCTL_WDOGIOC_WHICH; +extern unsigned IOCTL_WDOGIOC_TICKLE; +extern unsigned IOCTL_WDOGIOC_GTICKLER; +extern unsigned IOCTL_WDOGIOC_GWDOGS; extern unsigned IOCTL_SNDCTL_DSP_RESET; -extern unsigned IOCTL_SNDCTL_DSP_SETFMT; -extern unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT; +extern unsigned IOCTL_SNDCTL_DSP_SYNC; extern unsigned IOCTL_SNDCTL_DSP_SPEED; +extern unsigned IOCTL_SOUND_PCM_READ_RATE; extern unsigned IOCTL_SNDCTL_DSP_STEREO; -extern unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE; -extern unsigned IOCTL_SNDCTL_DSP_SYNC; -extern unsigned IOCTL_SNDCTL_FM_4OP_ENABLE; -extern unsigned IOCTL_SNDCTL_FM_LOAD_INSTR; -extern unsigned IOCTL_SNDCTL_MIDI_INFO; -extern unsigned IOCTL_SNDCTL_MIDI_PRETIME; -extern unsigned IOCTL_SNDCTL_SEQ_CTRLRATE; -extern unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT; -extern unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT; -extern unsigned IOCTL_SNDCTL_SEQ_NRMIDIS; -extern unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS; -extern unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND; -extern unsigned IOCTL_SNDCTL_SEQ_PANIC; -extern unsigned IOCTL_SNDCTL_SEQ_PERCMODE; -extern unsigned IOCTL_SNDCTL_SEQ_RESET; -extern unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES; -extern unsigned IOCTL_SNDCTL_SEQ_SYNC; -extern unsigned IOCTL_SNDCTL_SEQ_TESTMIDI; -extern unsigned IOCTL_SNDCTL_SEQ_THRESHOLD; -extern unsigned IOCTL_SNDCTL_SYNTH_INFO; -extern unsigned IOCTL_SNDCTL_SYNTH_MEMAVL; -extern unsigned IOCTL_SNDCTL_TMR_CONTINUE; -extern unsigned IOCTL_SNDCTL_TMR_METRONOME; -extern unsigned IOCTL_SNDCTL_TMR_SELECT; -extern unsigned IOCTL_SNDCTL_TMR_SOURCE; -extern unsigned IOCTL_SNDCTL_TMR_START; -extern unsigned IOCTL_SNDCTL_TMR_STOP; -extern unsigned IOCTL_SNDCTL_TMR_TEMPO; -extern unsigned IOCTL_SNDCTL_TMR_TIMEBASE; -extern unsigned IOCTL_SOUND_MIXER_READ_ALTPCM; -extern unsigned IOCTL_SOUND_MIXER_READ_BASS; -extern unsigned IOCTL_SOUND_MIXER_READ_CAPS; -extern unsigned IOCTL_SOUND_MIXER_READ_CD; -extern unsigned IOCTL_SOUND_MIXER_READ_DEVMASK; -extern unsigned IOCTL_SOUND_MIXER_READ_ENHANCE; -extern unsigned IOCTL_SOUND_MIXER_READ_IGAIN; -extern unsigned IOCTL_SOUND_MIXER_READ_IMIX; -extern unsigned IOCTL_SOUND_MIXER_READ_LINE1; -extern unsigned IOCTL_SOUND_MIXER_READ_LINE2; -extern unsigned IOCTL_SOUND_MIXER_READ_LINE3; -extern unsigned IOCTL_SOUND_MIXER_READ_LINE; -extern unsigned IOCTL_SOUND_MIXER_READ_LOUD; -extern unsigned IOCTL_SOUND_MIXER_READ_MIC; -extern unsigned IOCTL_SOUND_MIXER_READ_MUTE; -extern unsigned IOCTL_SOUND_MIXER_READ_OGAIN; -extern unsigned IOCTL_SOUND_MIXER_READ_PCM; -extern unsigned IOCTL_SOUND_MIXER_READ_RECLEV; -extern unsigned IOCTL_SOUND_MIXER_READ_RECMASK; -extern unsigned IOCTL_SOUND_MIXER_READ_RECSRC; -extern unsigned IOCTL_SOUND_MIXER_READ_SPEAKER; -extern unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS; -extern unsigned IOCTL_SOUND_MIXER_READ_SYNTH; -extern unsigned IOCTL_SOUND_MIXER_READ_TREBLE; -extern unsigned IOCTL_SOUND_MIXER_READ_VOLUME; -extern unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM; -extern unsigned IOCTL_SOUND_MIXER_WRITE_BASS; -extern unsigned IOCTL_SOUND_MIXER_WRITE_CD; -extern unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE; -extern unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN; -extern unsigned IOCTL_SOUND_MIXER_WRITE_IMIX; -extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE1; -extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE2; -extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE3; -extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE; -extern unsigned IOCTL_SOUND_MIXER_WRITE_LOUD; -extern unsigned IOCTL_SOUND_MIXER_WRITE_MIC; -extern unsigned IOCTL_SOUND_MIXER_WRITE_MUTE; -extern unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN; -extern unsigned IOCTL_SOUND_MIXER_WRITE_PCM; -extern unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV; -extern unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC; -extern unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER; -extern unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH; -extern unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE; -extern unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME; +extern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE; +extern unsigned IOCTL_SNDCTL_DSP_SETFMT; extern unsigned IOCTL_SOUND_PCM_READ_BITS; +extern unsigned IOCTL_SNDCTL_DSP_CHANNELS; extern unsigned IOCTL_SOUND_PCM_READ_CHANNELS; -extern unsigned IOCTL_SOUND_PCM_READ_FILTER; -extern unsigned IOCTL_SOUND_PCM_READ_RATE; -extern unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS; extern unsigned IOCTL_SOUND_PCM_WRITE_FILTER; -extern unsigned IOCTL_VT_ACTIVATE; -extern unsigned IOCTL_VT_GETMODE; -extern unsigned IOCTL_VT_OPENQRY; -extern unsigned IOCTL_VT_RELDISP; -extern unsigned IOCTL_VT_SETMODE; -extern unsigned IOCTL_VT_WAITACTIVE; -extern unsigned IOCTL_KDDISABIO; -extern unsigned IOCTL_KDENABIO; -extern unsigned IOCTL_KDGETLED; -extern unsigned IOCTL_KDGKBMODE; -extern unsigned IOCTL_KDGKBTYPE; -extern unsigned IOCTL_KDMKTONE; -extern unsigned IOCTL_KDSETLED; -extern unsigned IOCTL_KDSETMODE; -extern unsigned IOCTL_KDSKBMODE; +extern unsigned IOCTL_SOUND_PCM_READ_FILTER; +extern unsigned IOCTL_SNDCTL_DSP_POST; +extern unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE; +extern unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT; +extern unsigned IOCTL_SNDCTL_DSP_GETFMTS; +extern unsigned IOCTL_SNDCTL_DSP_GETOSPACE; +extern unsigned IOCTL_SNDCTL_DSP_GETISPACE; +extern unsigned IOCTL_SNDCTL_DSP_NONBLOCK; +extern unsigned IOCTL_SNDCTL_DSP_GETCAPS; +extern unsigned IOCTL_SNDCTL_DSP_GETTRIGGER; +extern unsigned IOCTL_SNDCTL_DSP_SETTRIGGER; +extern unsigned IOCTL_SNDCTL_DSP_GETIPTR; +extern unsigned IOCTL_SNDCTL_DSP_GETOPTR; +extern unsigned IOCTL_SNDCTL_DSP_MAPINBUF; +extern unsigned IOCTL_SNDCTL_DSP_MAPOUTBUF; +extern unsigned IOCTL_SNDCTL_DSP_SETSYNCRO; +extern unsigned IOCTL_SNDCTL_DSP_SETDUPLEX; +extern unsigned IOCTL_SNDCTL_DSP_PROFILE; +extern unsigned IOCTL_SNDCTL_DSP_GETODELAY; +extern unsigned IOCTL_SOUND_MIXER_INFO; +extern unsigned IOCTL_SOUND_OLD_MIXER_INFO; +extern unsigned IOCTL_OSS_GETVERSION; +extern unsigned IOCTL_SNDCTL_SYSINFO; +extern unsigned IOCTL_SNDCTL_AUDIOINFO; +extern unsigned IOCTL_SNDCTL_ENGINEINFO; +extern unsigned IOCTL_SNDCTL_DSP_GETPLAYVOL; +extern unsigned IOCTL_SNDCTL_DSP_SETPLAYVOL; +extern unsigned IOCTL_SNDCTL_DSP_GETRECVOL; +extern unsigned IOCTL_SNDCTL_DSP_SETRECVOL; +extern unsigned IOCTL_SNDCTL_DSP_SKIP; +extern unsigned IOCTL_SNDCTL_DSP_SILENCE; extern const int si_SEGV_MAPERR; extern const int si_SEGV_ACCERR; diff --git a/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cc b/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cc new file mode 100644 index 000000000000..7ab0ff60a24d --- /dev/null +++ b/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cc @@ -0,0 +1,279 @@ +//===-- sanitizer_platform_limits_openbsd.cc ------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of Sanitizer common code. +// +// Sizes and layouts of platform-specific NetBSD data structures. +//===----------------------------------------------------------------------===// + +#include "sanitizer_platform.h" + +#if SANITIZER_OPENBSD +#include <arpa/inet.h> +#include <dirent.h> +#include <glob.h> +#include <grp.h> +#include <ifaddrs.h> +#include <limits.h> +#include <link_elf.h> +#include <sys/socket.h> +#include <net/if.h> +#include <net/ppp_defs.h> +#include <net/route.h> +#include <netdb.h> +#include <netinet/in.h> +#include <netinet/ip_mroute.h> +#include <poll.h> +#include <pthread.h> +#include <pwd.h> +#include <semaphore.h> +#include <signal.h> +#include <soundcard.h> +#include <stddef.h> +#include <stdint.h> +#include <sys/filio.h> +#include <sys/ipc.h> +#include <sys/mman.h> +#include <sys/mount.h> +#include <sys/msg.h> +#include <sys/mtio.h> +#include <sys/ptrace.h> +#include <sys/resource.h> +#include <sys/shm.h> +#include <sys/signal.h> +#include <sys/sockio.h> +#include <sys/stat.h> +#include <sys/statvfs.h> +#include <sys/time.h> +#include <sys/times.h> +#include <sys/types.h> +#include <sys/utsname.h> +#include <term.h> +#include <time.h> +#include <utime.h> +#include <utmp.h> +#include <wchar.h> + +// Include these after system headers to avoid name clashes and ambiguities. +#include "sanitizer_internal_defs.h" +#include "sanitizer_platform_limits_openbsd.h" + +namespace __sanitizer { +unsigned struct_utsname_sz = sizeof(struct utsname); +unsigned struct_stat_sz = sizeof(struct stat); +unsigned struct_rusage_sz = sizeof(struct rusage); +unsigned struct_tm_sz = sizeof(struct tm); +unsigned struct_passwd_sz = sizeof(struct passwd); +unsigned struct_group_sz = sizeof(struct group); +unsigned siginfo_t_sz = sizeof(siginfo_t); +unsigned struct_sigaction_sz = sizeof(struct sigaction); +unsigned struct_itimerval_sz = sizeof(struct itimerval); +unsigned pthread_t_sz = sizeof(pthread_t); +unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t); +unsigned pthread_cond_t_sz = sizeof(pthread_cond_t); +unsigned pid_t_sz = sizeof(pid_t); +unsigned timeval_sz = sizeof(timeval); +unsigned uid_t_sz = sizeof(uid_t); +unsigned gid_t_sz = sizeof(gid_t); +unsigned mbstate_t_sz = sizeof(mbstate_t); +unsigned sigset_t_sz = sizeof(sigset_t); +unsigned struct_timezone_sz = sizeof(struct timezone); +unsigned struct_tms_sz = sizeof(struct tms); +unsigned struct_sched_param_sz = sizeof(struct sched_param); +unsigned struct_sockaddr_sz = sizeof(struct sockaddr); +unsigned struct_rlimit_sz = sizeof(struct rlimit); +unsigned struct_timespec_sz = sizeof(struct timespec); +unsigned struct_utimbuf_sz = sizeof(struct utimbuf); +unsigned struct_itimerspec_sz = sizeof(struct itimerspec); +unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds); +unsigned struct_statvfs_sz = sizeof(struct statvfs); + +const uptr sig_ign = (uptr)SIG_IGN; +const uptr sig_dfl = (uptr)SIG_DFL; +const uptr sig_err = (uptr)SIG_ERR; +const uptr sa_siginfo = (uptr)SA_SIGINFO; + +int shmctl_ipc_stat = (int)IPC_STAT; + +unsigned struct_utmp_sz = sizeof(struct utmp); + +int map_fixed = MAP_FIXED; + +int af_inet = (int)AF_INET; +int af_inet6 = (int)AF_INET6; + +uptr __sanitizer_in_addr_sz(int af) { + if (af == AF_INET) + return sizeof(struct in_addr); + else if (af == AF_INET6) + return sizeof(struct in6_addr); + else + return 0; +} + +unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); + +int glob_nomatch = GLOB_NOMATCH; +int glob_altdirfunc = GLOB_ALTDIRFUNC; + +unsigned path_max = PATH_MAX; + +const int si_SEGV_MAPERR = SEGV_MAPERR; +const int si_SEGV_ACCERR = SEGV_ACCERR; +} // namespace __sanitizer + +using namespace __sanitizer; + +COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t)); + +COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned)); +CHECK_TYPE_SIZE(pthread_key_t); + +CHECK_TYPE_SIZE(dl_phdr_info); +CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr); +CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name); +CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr); +CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum); + +CHECK_TYPE_SIZE(glob_t); +CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc); +CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv); +CHECK_SIZE_AND_OFFSET(glob_t, gl_offs); +CHECK_SIZE_AND_OFFSET(glob_t, gl_flags); +CHECK_SIZE_AND_OFFSET(glob_t, gl_closedir); +CHECK_SIZE_AND_OFFSET(glob_t, gl_readdir); +CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir); +CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat); +CHECK_SIZE_AND_OFFSET(glob_t, gl_stat); + +CHECK_TYPE_SIZE(addrinfo); +CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags); +CHECK_SIZE_AND_OFFSET(addrinfo, ai_family); +CHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype); +CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol); +CHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen); +CHECK_SIZE_AND_OFFSET(addrinfo, ai_addr); +CHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname); +CHECK_SIZE_AND_OFFSET(addrinfo, ai_next); + +CHECK_TYPE_SIZE(hostent); +CHECK_SIZE_AND_OFFSET(hostent, h_name); +CHECK_SIZE_AND_OFFSET(hostent, h_aliases); +CHECK_SIZE_AND_OFFSET(hostent, h_addrtype); +CHECK_SIZE_AND_OFFSET(hostent, h_length); +CHECK_SIZE_AND_OFFSET(hostent, h_addr_list); + +CHECK_TYPE_SIZE(iovec); +CHECK_SIZE_AND_OFFSET(iovec, iov_base); +CHECK_SIZE_AND_OFFSET(iovec, iov_len); + +CHECK_TYPE_SIZE(msghdr); +CHECK_SIZE_AND_OFFSET(msghdr, msg_name); +CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen); +CHECK_SIZE_AND_OFFSET(msghdr, msg_iov); +CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen); +CHECK_SIZE_AND_OFFSET(msghdr, msg_control); +CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen); +CHECK_SIZE_AND_OFFSET(msghdr, msg_flags); + +CHECK_TYPE_SIZE(cmsghdr); +CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len); +CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level); +CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type); + +COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent)); +CHECK_SIZE_AND_OFFSET(dirent, d_fileno); +CHECK_SIZE_AND_OFFSET(dirent, d_off); +CHECK_SIZE_AND_OFFSET(dirent, d_reclen); + +CHECK_TYPE_SIZE(ifconf); +CHECK_SIZE_AND_OFFSET(ifconf, ifc_len); +CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu); + +CHECK_TYPE_SIZE(pollfd); +CHECK_SIZE_AND_OFFSET(pollfd, fd); +CHECK_SIZE_AND_OFFSET(pollfd, events); +CHECK_SIZE_AND_OFFSET(pollfd, revents); + +CHECK_TYPE_SIZE(nfds_t); + +CHECK_TYPE_SIZE(sigset_t); + +COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction)); +// Can't write checks for sa_handler and sa_sigaction due to them being +// preprocessor macros. +CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask); + +CHECK_TYPE_SIZE(tm); +CHECK_SIZE_AND_OFFSET(tm, tm_sec); +CHECK_SIZE_AND_OFFSET(tm, tm_min); +CHECK_SIZE_AND_OFFSET(tm, tm_hour); +CHECK_SIZE_AND_OFFSET(tm, tm_mday); +CHECK_SIZE_AND_OFFSET(tm, tm_mon); +CHECK_SIZE_AND_OFFSET(tm, tm_year); +CHECK_SIZE_AND_OFFSET(tm, tm_wday); +CHECK_SIZE_AND_OFFSET(tm, tm_yday); +CHECK_SIZE_AND_OFFSET(tm, tm_isdst); +CHECK_SIZE_AND_OFFSET(tm, tm_gmtoff); +CHECK_SIZE_AND_OFFSET(tm, tm_zone); + +CHECK_TYPE_SIZE(ipc_perm); +CHECK_SIZE_AND_OFFSET(ipc_perm, cuid); +CHECK_SIZE_AND_OFFSET(ipc_perm, cgid); +CHECK_SIZE_AND_OFFSET(ipc_perm, uid); +CHECK_SIZE_AND_OFFSET(ipc_perm, gid); +CHECK_SIZE_AND_OFFSET(ipc_perm, mode); +CHECK_SIZE_AND_OFFSET(ipc_perm, seq); +CHECK_SIZE_AND_OFFSET(ipc_perm, key); + +CHECK_TYPE_SIZE(shmid_ds); +CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm); +CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz); +CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime); +CHECK_SIZE_AND_OFFSET(shmid_ds, __shm_atimensec); +CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime); +CHECK_SIZE_AND_OFFSET(shmid_ds, __shm_dtimensec); +CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime); +CHECK_SIZE_AND_OFFSET(shmid_ds, __shm_ctimensec); +CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid); +CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid); +CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch); + +CHECK_TYPE_SIZE(clock_t); + +CHECK_TYPE_SIZE(ifaddrs); +CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next); +CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name); +CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr); +CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask); +// Compare against the union, because we can't reach into the union in a +// compliant way. +#ifdef ifa_dstaddr +#undef ifa_dstaddr +#endif +CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr); +CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data); + +CHECK_TYPE_SIZE(passwd); +CHECK_SIZE_AND_OFFSET(passwd, pw_name); +CHECK_SIZE_AND_OFFSET(passwd, pw_passwd); +CHECK_SIZE_AND_OFFSET(passwd, pw_uid); +CHECK_SIZE_AND_OFFSET(passwd, pw_gid); +CHECK_SIZE_AND_OFFSET(passwd, pw_dir); +CHECK_SIZE_AND_OFFSET(passwd, pw_shell); + +CHECK_SIZE_AND_OFFSET(passwd, pw_gecos); + +CHECK_TYPE_SIZE(group); +CHECK_SIZE_AND_OFFSET(group, gr_name); +CHECK_SIZE_AND_OFFSET(group, gr_passwd); +CHECK_SIZE_AND_OFFSET(group, gr_gid); +CHECK_SIZE_AND_OFFSET(group, gr_mem); + +#endif // SANITIZER_OPENBSD diff --git a/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h b/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h new file mode 100644 index 000000000000..d9899913e576 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h @@ -0,0 +1,382 @@ +//===-- sanitizer_platform_limits_openbsd.h -------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of Sanitizer common code. +// +// Sizes and layouts of platform-specific OpenBSD data structures. +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_PLATFORM_LIMITS_OPENBSD_H +#define SANITIZER_PLATFORM_LIMITS_OPENBSD_H + +#if SANITIZER_OPENBSD + +#include "sanitizer_internal_defs.h" +#include "sanitizer_platform.h" + +#define _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, shift) \ + ((link_map *)((handle) == nullptr ? nullptr : ((char *)(handle) + (shift)))) + +#if defined(__x86_64__) +#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \ + _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, 312) +#elif defined(__i386__) +#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \ + _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, 164) +#endif + +#define RLIMIT_AS RLIMIT_DATA + +namespace __sanitizer { +extern unsigned struct_utsname_sz; +extern unsigned struct_stat_sz; +extern unsigned struct_rusage_sz; +extern unsigned siginfo_t_sz; +extern unsigned struct_itimerval_sz; +extern unsigned pthread_t_sz; +extern unsigned pthread_mutex_t_sz; +extern unsigned pthread_cond_t_sz; +extern unsigned pid_t_sz; +extern unsigned timeval_sz; +extern unsigned uid_t_sz; +extern unsigned gid_t_sz; +extern unsigned mbstate_t_sz; +extern unsigned struct_timezone_sz; +extern unsigned struct_tms_sz; +extern unsigned struct_itimerspec_sz; +extern unsigned struct_sigevent_sz; +extern unsigned struct_statfs_sz; +extern unsigned struct_sockaddr_sz; + +extern unsigned struct_rlimit_sz; +extern unsigned struct_utimbuf_sz; +extern unsigned struct_timespec_sz; + +struct __sanitizer_iocb { + u64 aio_offset; + uptr aio_buf; + long aio_nbytes; + u32 aio_fildes; + u32 aio_lio_opcode; + long aio_reqprio; +#if SANITIZER_WORDSIZE == 64 + u8 aio_sigevent[32]; +#else + u8 aio_sigevent[20]; +#endif + u32 _state; + u32 _errno; + long _retval; +}; + +struct __sanitizer___sysctl_args { + int *name; + int nlen; + void *oldval; + uptr *oldlenp; + void *newval; + uptr newlen; +}; + +struct __sanitizer_sem_t { + uptr data[5]; +}; + +struct __sanitizer_ipc_perm { + u32 cuid; + u32 cgid; + u32 uid; + u32 gid; + u32 mode; + unsigned short seq; + long key; +}; + +struct __sanitizer_shmid_ds { + __sanitizer_ipc_perm shm_perm; + int shm_segsz; + u32 shm_lpid; + u32 shm_cpid; + short shm_nattch; + u64 shm_atime; + long __shm_atimensec; + u64 shm_dtime; + long __shm_dtimensec; + u64 shm_ctime; + long __shm_ctimensec; + void *_shm_internal; +}; + +extern unsigned struct_msqid_ds_sz; +extern unsigned struct_mq_attr_sz; +extern unsigned struct_timex_sz; +extern unsigned struct_statvfs_sz; + +struct __sanitizer_iovec { + void *iov_base; + uptr iov_len; +}; + +struct __sanitizer_ifaddrs { + struct __sanitizer_ifaddrs *ifa_next; + char *ifa_name; + unsigned int ifa_flags; + struct __sanitizer_sockaddr *ifa_addr; // (struct sockaddr *) + struct __sanitizer_sockaddr *ifa_netmask; // (struct sockaddr *) + struct __sanitizer_sockaddr *ifa_dstaddr; // (struct sockaddr *) + void *ifa_data; +}; + +typedef unsigned __sanitizer_pthread_key_t; + +typedef long long __sanitizer_time_t; +typedef int __sanitizer_suseconds_t; + +struct __sanitizer_timeval { + __sanitizer_time_t tv_sec; + __sanitizer_suseconds_t tv_usec; +}; + +struct __sanitizer_itimerval { + struct __sanitizer_timeval it_interval; + struct __sanitizer_timeval it_value; +}; + +struct __sanitizer_passwd { + char *pw_name; + char *pw_passwd; + int pw_uid; + int pw_gid; + __sanitizer_time_t pw_change; + char *pw_class; + char *pw_gecos; + char *pw_dir; + char *pw_shell; + __sanitizer_time_t pw_expire; +}; + +struct __sanitizer_group { + char *gr_name; + char *gr_passwd; + int gr_gid; + char **gr_mem; +}; + +struct __sanitizer_ether_addr { + u8 octet[6]; +}; + +struct __sanitizer_tm { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + int tm_year; + int tm_wday; + int tm_yday; + int tm_isdst; + long int tm_gmtoff; + const char *tm_zone; +}; + +struct __sanitizer_msghdr { + void *msg_name; + unsigned msg_namelen; + struct __sanitizer_iovec *msg_iov; + unsigned msg_iovlen; + void *msg_control; + unsigned msg_controllen; + int msg_flags; +}; +struct __sanitizer_cmsghdr { + unsigned cmsg_len; + int cmsg_level; + int cmsg_type; +}; + +struct __sanitizer_dirent { + u64 d_fileno; + u64 d_off; + u16 d_reclen; +}; + +typedef u64 __sanitizer_clock_t; +typedef u32 __sanitizer_clockid_t; + +typedef u32 __sanitizer___kernel_uid_t; +typedef u32 __sanitizer___kernel_gid_t; +typedef u64 __sanitizer___kernel_off_t; +typedef struct { + u32 fds_bits[8]; +} __sanitizer___kernel_fd_set; + +typedef struct { + unsigned int pta_magic; + int pta_flags; + void *pta_private; +} __sanitizer_pthread_attr_t; + +typedef unsigned int __sanitizer_sigset_t; + +struct __sanitizer_siginfo { + // The size is determined by looking at sizeof of real siginfo_t on linux. + u64 opaque[128 / sizeof(u64)]; +}; + +using __sanitizer_sighandler_ptr = void (*)(int sig); +using __sanitizer_sigactionhandler_ptr = void (*)(int sig, + __sanitizer_siginfo *siginfo, + void *uctx); + +struct __sanitizer_sigaction { + union { + __sanitizer_sighandler_ptr handler; + __sanitizer_sigactionhandler_ptr sigaction; + }; + __sanitizer_sigset_t sa_mask; + int sa_flags; +}; + +typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t; + +struct __sanitizer_kernel_sigaction_t { + union { + void (*handler)(int signo); + void (*sigaction)(int signo, void *info, void *ctx); + }; + unsigned long sa_flags; + void (*sa_restorer)(void); + __sanitizer_kernel_sigset_t sa_mask; +}; + +extern const uptr sig_ign; +extern const uptr sig_dfl; +extern const uptr sig_err; +extern const uptr sa_siginfo; + +extern int af_inet; +extern int af_inet6; +uptr __sanitizer_in_addr_sz(int af); + +struct __sanitizer_dl_phdr_info { +#if SANITIZER_WORDSIZE == 64 + u64 dlpi_addr; +#else + u32 dlpi_addr; +#endif + const char *dlpi_name; + const void *dlpi_phdr; +#if SANITIZER_WORDSIZE == 64 + u32 dlpi_phnum; +#else + u16 dlpi_phnum; +#endif +}; + +extern unsigned struct_ElfW_Phdr_sz; + +struct __sanitizer_addrinfo { + int ai_flags; + int ai_family; + int ai_socktype; + int ai_protocol; + unsigned ai_addrlen; + struct __sanitizer_sockaddr *ai_addr; + char *ai_canonname; + struct __sanitizer_addrinfo *ai_next; +}; + +struct __sanitizer_hostent { + char *h_name; + char **h_aliases; + int h_addrtype; + int h_length; + char **h_addr_list; +}; + +struct __sanitizer_pollfd { + int fd; + short events; + short revents; +}; + +typedef unsigned __sanitizer_nfds_t; + +struct __sanitizer_glob_t { + int gl_pathc; + int gl_matchc; + int gl_offs; + int gl_flags; + char **gl_pathv; + void **gl_statv; + int (*gl_errfunc)(const char *, int); + void (*gl_closedir)(void *dirp); + struct dirent *(*gl_readdir)(void *dirp); + void *(*gl_opendir)(const char *); + int (*gl_lstat)(const char *, void * /* struct stat* */); + int (*gl_stat)(const char *, void * /* struct stat* */); +}; + +extern int glob_nomatch; +extern int glob_altdirfunc; + +extern unsigned path_max; + +typedef char __sanitizer_FILE; +#define SANITIZER_HAS_STRUCT_FILE 0 + +extern int shmctl_ipc_stat; + +// This simplifies generic code +#define struct_shminfo_sz -1 +#define struct_shm_info_sz -1 +#define shmctl_shm_stat -1 +#define shmctl_ipc_info -1 +#define shmctl_shm_info -1 + +extern unsigned struct_utmp_sz; +extern unsigned struct_utmpx_sz; + +extern int map_fixed; + +// ioctl arguments +struct __sanitizer_ifconf { + int ifc_len; + union { + void *ifcu_req; + } ifc_ifcu; +}; + +extern const int si_SEGV_MAPERR; +extern const int si_SEGV_ACCERR; +} // namespace __sanitizer + +#define CHECK_TYPE_SIZE(TYPE) \ + COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE)) + +#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \ + COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \ + sizeof(((CLASS *)NULL)->MEMBER)); \ + COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \ + offsetof(CLASS, MEMBER)) + +// For sigaction, which is a function and struct at the same time, +// and thus requires explicit "struct" in sizeof() expression. +#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \ + COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \ + sizeof(((struct CLASS *)NULL)->MEMBER)); \ + COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \ + offsetof(struct CLASS, MEMBER)) + +#define SIGACTION_SYMNAME __sigaction14 + +#endif // SANITIZER_OPENBSD + +#endif diff --git a/lib/sanitizer_common/sanitizer_platform_limits_posix.cc b/lib/sanitizer_common/sanitizer_platform_limits_posix.cc index f12e8206abe6..c27055f2aa80 100644 --- a/lib/sanitizer_common/sanitizer_platform_limits_posix.cc +++ b/lib/sanitizer_common/sanitizer_platform_limits_posix.cc @@ -159,7 +159,6 @@ typedef struct user_fpregs elf_fpregset_t; # include <sys/procfs.h> #endif #include <sys/user.h> -#include <sys/ustat.h> #include <linux/cyclades.h> #include <linux/if_eql.h> #include <linux/if_plip.h> @@ -253,7 +252,19 @@ namespace __sanitizer { #endif // SANITIZER_LINUX || SANITIZER_FREEBSD #if SANITIZER_LINUX && !SANITIZER_ANDROID - unsigned struct_ustat_sz = sizeof(struct ustat); + // Use pre-computed size of struct ustat to avoid <sys/ustat.h> which + // has been removed from glibc 2.28. +#if defined(__aarch64__) || defined(__s390x__) || defined (__mips64) \ + || defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) \ + || defined(__x86_64__) +#define SIZEOF_STRUCT_USTAT 32 +#elif defined(__arm__) || defined(__i386__) || defined(__mips__) \ + || defined(__powerpc__) || defined(__s390__) || defined(__sparc__) +#define SIZEOF_STRUCT_USTAT 20 +#else +#error Unknown size of struct ustat +#endif + unsigned struct_ustat_sz = SIZEOF_STRUCT_USTAT; unsigned struct_rlimit64_sz = sizeof(struct rlimit64); unsigned struct_statvfs64_sz = sizeof(struct statvfs64); #endif // SANITIZER_LINUX && !SANITIZER_ANDROID @@ -1026,6 +1037,12 @@ CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len); CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level); CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type); +#if SANITIZER_LINUX && (!defined(__ANDROID__) || __ANDROID_API__ >= 21) +CHECK_TYPE_SIZE(mmsghdr); +CHECK_SIZE_AND_OFFSET(mmsghdr, msg_hdr); +CHECK_SIZE_AND_OFFSET(mmsghdr, msg_len); +#endif + COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent)); CHECK_SIZE_AND_OFFSET(dirent, d_ino); #if SANITIZER_MAC diff --git a/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/lib/sanitizer_common/sanitizer_platform_limits_posix.h index b1901fb63edc..f89a11312d8b 100644 --- a/lib/sanitizer_common/sanitizer_platform_limits_posix.h +++ b/lib/sanitizer_common/sanitizer_platform_limits_posix.h @@ -24,7 +24,7 @@ // FreeBSD's dlopen() returns a pointer to an Obj_Entry structure that // incorporates the map structure. # define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \ - ((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 544))) + ((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 560))) // Get sys/_types.h, because that tells us whether 64-bit inodes are // used in struct dirent below. #include <sys/_types.h> @@ -414,6 +414,18 @@ namespace __sanitizer { typedef long __sanitizer_time_t; #endif + typedef long __sanitizer_suseconds_t; + + struct __sanitizer_timeval { + __sanitizer_time_t tv_sec; + __sanitizer_suseconds_t tv_usec; + }; + + struct __sanitizer_itimerval { + struct __sanitizer_timeval it_interval; + struct __sanitizer_timeval it_value; + }; + struct __sanitizer_timeb { __sanitizer_time_t time; unsigned short millitm; @@ -448,6 +460,12 @@ namespace __sanitizer { int mnt_freq; int mnt_passno; }; + + struct __sanitizer_file_handle { + unsigned int handle_bytes; + int handle_type; + unsigned char f_handle[1]; // variable sized + }; #endif #if SANITIZER_MAC || SANITIZER_FREEBSD @@ -482,6 +500,13 @@ namespace __sanitizer { }; #endif +#if SANITIZER_LINUX + struct __sanitizer_mmsghdr { + __sanitizer_msghdr msg_hdr; + unsigned int msg_len; + }; +#endif + #if SANITIZER_MAC struct __sanitizer_dirent { unsigned long long d_ino; diff --git a/lib/sanitizer_common/sanitizer_platform_limits_solaris.h b/lib/sanitizer_common/sanitizer_platform_limits_solaris.h index a9db71b99c81..c0aa4cc1b17e 100644 --- a/lib/sanitizer_common/sanitizer_platform_limits_solaris.h +++ b/lib/sanitizer_common/sanitizer_platform_limits_solaris.h @@ -160,6 +160,18 @@ struct __sanitizer_group { typedef long __sanitizer_time_t; +typedef long __sanitizer_suseconds_t; + +struct __sanitizer_timeval { + __sanitizer_time_t tv_sec; + __sanitizer_suseconds_t tv_usec; +}; + +struct __sanitizer_itimerval { + struct __sanitizer_timeval it_interval; + struct __sanitizer_timeval it_value; +}; + struct __sanitizer_timeb { __sanitizer_time_t time; unsigned short millitm; diff --git a/lib/sanitizer_common/sanitizer_posix.cc b/lib/sanitizer_common/sanitizer_posix.cc index 1fad71f3582b..f7dfc86f58c6 100644 --- a/lib/sanitizer_common/sanitizer_posix.cc +++ b/lib/sanitizer_common/sanitizer_posix.cc @@ -21,7 +21,6 @@ #include "sanitizer_libc.h" #include "sanitizer_posix.h" #include "sanitizer_procmaps.h" -#include "sanitizer_stacktrace.h" #include <errno.h> #include <fcntl.h> @@ -153,11 +152,15 @@ bool MprotectReadOnly(uptr addr, uptr size) { return 0 == internal_mprotect((void *)addr, size, PROT_READ); } +#if !SANITIZER_MAC +void MprotectMallocZones(void *addr, int prot) {} +#endif + fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) { int flags; switch (mode) { case RdOnly: flags = O_RDONLY; break; - case WrOnly: flags = O_WRONLY | O_CREAT; break; + case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break; case RdWr: flags = O_RDWR | O_CREAT; break; } fd_t res = internal_open(filename, flags, 0660); diff --git a/lib/sanitizer_common/sanitizer_posix.h b/lib/sanitizer_common/sanitizer_posix.h index adef08248004..da447002b66c 100644 --- a/lib/sanitizer_common/sanitizer_posix.h +++ b/lib/sanitizer_common/sanitizer_posix.h @@ -17,6 +17,7 @@ // This header should NOT include any other headers from sanitizer runtime. #include "sanitizer_internal_defs.h" #include "sanitizer_platform_limits_netbsd.h" +#include "sanitizer_platform_limits_openbsd.h" #include "sanitizer_platform_limits_posix.h" #include "sanitizer_platform_limits_solaris.h" diff --git a/lib/sanitizer_common/sanitizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_posix_libcdep.cc index db41cad6c318..266e9bdba051 100644 --- a/lib/sanitizer_common/sanitizer_posix_libcdep.cc +++ b/lib/sanitizer_common/sanitizer_posix_libcdep.cc @@ -19,12 +19,11 @@ #include "sanitizer_common.h" #include "sanitizer_flags.h" #include "sanitizer_platform_limits_netbsd.h" +#include "sanitizer_platform_limits_openbsd.h" #include "sanitizer_platform_limits_posix.h" #include "sanitizer_platform_limits_solaris.h" #include "sanitizer_posix.h" #include "sanitizer_procmaps.h" -#include "sanitizer_stacktrace.h" -#include "sanitizer_symbolizer.h" #include <errno.h> #include <fcntl.h> @@ -42,7 +41,7 @@ #if SANITIZER_FREEBSD // The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before // that, it was never implemented. So just define it to zero. -#undef MAP_NORESERVE +#undef MAP_NORESERVE #define MAP_NORESERVE 0 #endif @@ -70,16 +69,22 @@ void ReleaseMemoryPagesToOS(uptr beg, uptr end) { SANITIZER_MADVISE_DONTNEED); } -void NoHugePagesInRegion(uptr addr, uptr size) { +bool NoHugePagesInRegion(uptr addr, uptr size) { #ifdef MADV_NOHUGEPAGE // May not be defined on old systems. - madvise((void *)addr, size, MADV_NOHUGEPAGE); + return madvise((void *)addr, size, MADV_NOHUGEPAGE) == 0; +#else + return true; #endif // MADV_NOHUGEPAGE } -void DontDumpShadowMemory(uptr addr, uptr length) { -#ifdef MADV_DONTDUMP - madvise((void *)addr, length, MADV_DONTDUMP); -#endif +bool DontDumpShadowMemory(uptr addr, uptr length) { +#if defined(MADV_DONTDUMP) + return madvise((void *)addr, length, MADV_DONTDUMP) == 0; +#elif defined(MADV_NOCORE) + return madvise((void *)addr, length, MADV_NOCORE) == 0; +#else + return true; +#endif // MADV_DONTDUMP } static rlim_t getlim(int res) { @@ -218,6 +223,7 @@ void InstallDeadlySignalHandlers(SignalHandlerType handler) { MaybeInstallSigaction(SIGABRT, handler); MaybeInstallSigaction(SIGFPE, handler); MaybeInstallSigaction(SIGILL, handler); + MaybeInstallSigaction(SIGTRAP, handler); } bool SignalContext::IsStackOverflow() const { @@ -230,7 +236,9 @@ bool SignalContext::IsStackOverflow() const { // take it into account. bool IsStackAccess = addr >= (sp & ~0xFFF) && addr < sp + 0xFFFF; #else - bool IsStackAccess = addr + 512 > sp && addr < sp + 0xFFFF; + // Let's accept up to a page size away from top of stack. Things like stack + // probing can trigger accesses with such large offsets. + bool IsStackAccess = addr + GetPageSizeCached() > sp && addr < sp + 0xFFFF; #endif #if __powerpc__ @@ -290,16 +298,12 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) { return result; } -void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) { +void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) { // Some kinds of sandboxes may forbid filesystem access, so we won't be able // to read the file mappings from /proc/self/maps. Luckily, neither the // process will be able to load additional libraries, so it's fine to use the // cached mappings. MemoryMappingLayout::CacheMemoryMappings(); - // Same for /proc/self/exe in the symbolizer. -#if !SANITIZER_GO - Symbolizer::GetOrInit()->PrepareForSandboxing(); -#endif } #if SANITIZER_ANDROID || SANITIZER_GO @@ -324,7 +328,7 @@ int GetNamedMappingFd(const char *name, uptr size) { } #endif -void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { +bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { int fd = name ? GetNamedMappingFd(name, size) : -1; unsigned flags = MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE; if (fd == -1) flags |= MAP_ANON; @@ -334,12 +338,14 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { RoundUpTo(size, PageSize), PROT_READ | PROT_WRITE, flags, fd, 0); int reserrno; - if (internal_iserror(p, &reserrno)) + if (internal_iserror(p, &reserrno)) { Report("ERROR: %s failed to " "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n", SanitizerToolName, size, size, fixed_addr, reserrno); + return false; + } IncreaseTotalMmap(size); - return (void *)p; + return true; } uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) { @@ -348,11 +354,7 @@ uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) { // `open` (e.g. TSAN, ESAN), then you'll get a failure during initialization. // TODO(flowerhack): Fix the implementation of GetNamedMappingFd to solve // this problem. - if (fixed_addr) { - base_ = MmapFixedNoAccess(fixed_addr, size); - } else { - base_ = MmapNoAccess(size); - } + base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size); size_ = size; name_ = name; (void)os_handle_; // unsupported @@ -370,16 +372,14 @@ uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size) { } void ReservedAddressRange::Unmap(uptr addr, uptr size) { - void* addr_as_void = reinterpret_cast<void*>(addr); - uptr base_as_uptr = reinterpret_cast<uptr>(base_); - // Only unmap at the beginning or end of the range. - CHECK((addr_as_void == base_) || (addr + size == base_as_uptr + size_)); CHECK_LE(size, size_); + if (addr == reinterpret_cast<uptr>(base_)) + // If we unmap the whole range, just null out the base. + base_ = (size == size_) ? nullptr : reinterpret_cast<void*>(addr + size); + else + CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_); + size_ -= size; UnmapOrDie(reinterpret_cast<void*>(addr), size); - if (addr_as_void == base_) { - base_ = reinterpret_cast<void*>(addr + size); - } - size_ = size_ - size; } void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { diff --git a/lib/sanitizer_common/sanitizer_printf.cc b/lib/sanitizer_common/sanitizer_printf.cc index 5c2360043b07..4315f6516c03 100644 --- a/lib/sanitizer_common/sanitizer_printf.cc +++ b/lib/sanitizer_common/sanitizer_printf.cc @@ -95,22 +95,31 @@ static int AppendSignedDecimal(char **buff, const char *buff_end, s64 num, false /* uppercase */); } -static int AppendString(char **buff, const char *buff_end, int precision, - const char *s) { + +// Use the fact that explicitly requesting 0 width (%0s) results in UB and +// interpret width == 0 as "no width requested": +// width == 0 - no width requested +// width < 0 - left-justify s within and pad it to -width chars, if necessary +// width > 0 - right-justify s, not implemented yet +static int AppendString(char **buff, const char *buff_end, int width, + int max_chars, const char *s) { if (!s) s = "<null>"; int result = 0; for (; *s; s++) { - if (precision >= 0 && result >= precision) + if (max_chars >= 0 && result >= max_chars) break; result += AppendChar(buff, buff_end, *s); } + // Only the left justified strings are supported. + while (width < -result) + result += AppendChar(buff, buff_end, ' '); return result; } static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) { int result = 0; - result += AppendString(buff, buff_end, -1, "0x"); + result += AppendString(buff, buff_end, 0, -1, "0x"); result += AppendUnsigned(buff, buff_end, ptr_value, 16, SANITIZER_POINTER_FORMAT_LENGTH, true /* pad_with_zero */, false /* uppercase */); @@ -120,8 +129,8 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) { int VSNPrintf(char *buff, int buff_length, const char *format, va_list args) { static const char *kPrintfFormatsHelp = - "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; %(\\.\\*)?s; " - "%c\n"; + "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; " + "%[-]([0-9]*)?(\\.\\*)?s; %c\n"; RAW_CHECK(format); RAW_CHECK(buff_length > 0); const char *buff_end = &buff[buff_length - 1]; @@ -133,6 +142,9 @@ int VSNPrintf(char *buff, int buff_length, continue; } cur++; + bool left_justified = *cur == '-'; + if (left_justified) + cur++; bool have_width = (*cur >= '0' && *cur <= '9'); bool pad_with_zero = (*cur == '0'); int width = 0; @@ -153,9 +165,10 @@ int VSNPrintf(char *buff, int buff_length, cur += have_ll * 2; s64 dval; u64 uval; - bool have_flags = have_width | have_z | have_ll; - // Only %s supports precision for now - CHECK(!(precision >= 0 && *cur != 's')); + const bool have_length = have_z || have_ll; + const bool have_flags = have_width || have_length; + // At the moment only %s supports precision and left-justification. + CHECK(!((precision >= 0 || left_justified) && *cur != 's')); switch (*cur) { case 'd': { dval = have_ll ? va_arg(args, s64) @@ -182,8 +195,11 @@ int VSNPrintf(char *buff, int buff_length, break; } case 's': { - RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp); - result += AppendString(&buff, buff_end, precision, va_arg(args, char*)); + RAW_CHECK_MSG(!have_length, kPrintfFormatsHelp); + // Only left-justified width is supported. + CHECK(!have_width || left_justified); + result += AppendString(&buff, buff_end, left_justified ? -width : width, + precision, va_arg(args, char*)); break; } case 'c': { diff --git a/lib/sanitizer_common/sanitizer_procmaps.h b/lib/sanitizer_common/sanitizer_procmaps.h index ea2cb7a5c68b..9fde040a11a6 100644 --- a/lib/sanitizer_common/sanitizer_procmaps.h +++ b/lib/sanitizer_common/sanitizer_procmaps.h @@ -16,8 +16,8 @@ #include "sanitizer_platform.h" -#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ - SANITIZER_MAC || SANITIZER_SOLARIS +#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ + SANITIZER_OPENBSD || SANITIZER_MAC || SANITIZER_SOLARIS #include "sanitizer_common.h" #include "sanitizer_internal_defs.h" @@ -95,6 +95,5 @@ uptr ParseHex(const char **p); } // namespace __sanitizer -#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || - // SANITIZER_MAC || SANITIZER_SOLARIS +#endif #endif // SANITIZER_PROCMAPS_H diff --git a/lib/sanitizer_common/sanitizer_procmaps_freebsd.cc b/lib/sanitizer_common/sanitizer_procmaps_bsd.cc index c26a6d47ea75..e41dc987dcd7 100644 --- a/lib/sanitizer_common/sanitizer_procmaps_freebsd.cc +++ b/lib/sanitizer_common/sanitizer_procmaps_bsd.cc @@ -1,4 +1,4 @@ -//===-- sanitizer_procmaps_freebsd.cc -------------------------------------===// +//===-- sanitizer_procmaps_bsd.cc -----------------------------------------===// // // The LLVM Compiler Infrastructure // @@ -7,29 +7,40 @@ // //===----------------------------------------------------------------------===// // -// Information about the process mappings (FreeBSD and NetBSD-specific parts). +// Information about the process mappings +// (FreeBSD, OpenBSD and NetBSD-specific parts). //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" -#if SANITIZER_FREEBSD || SANITIZER_NETBSD +#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD #include "sanitizer_common.h" #if SANITIZER_FREEBSD #include "sanitizer_freebsd.h" #endif #include "sanitizer_procmaps.h" -#include <unistd.h> +// clang-format off +#include <sys/types.h> #include <sys/sysctl.h> +// clang-format on +#include <unistd.h> #if SANITIZER_FREEBSD #include <sys/user.h> #endif +#include <limits.h> +#if SANITIZER_OPENBSD +#define KVME_PROT_READ KVE_PROT_READ +#define KVME_PROT_WRITE KVE_PROT_WRITE +#define KVME_PROT_EXEC KVE_PROT_EXEC +#endif + // Fix 'kinfo_vmentry' definition on FreeBSD prior v9.2 in 32-bit mode. #if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) -# include <osreldate.h> -# if __FreeBSD_version <= 902001 // v9.2 -# define kinfo_vmentry xkinfo_vmentry -# endif +#include <osreldate.h> +#if __FreeBSD_version <= 902001 // v9.2 +#define kinfo_vmentry xkinfo_vmentry +#endif #endif namespace __sanitizer { @@ -41,12 +52,18 @@ void ReadProcMaps(ProcSelfMapsBuff *proc_maps) { KERN_PROC, KERN_PROC_VMMAP, getpid() -#else +#elif SANITIZER_OPENBSD + CTL_KERN, + KERN_PROC_VMMAP, + getpid() +#elif SANITIZER_NETBSD CTL_VM, VM_PROC, VM_PROC_MAP, getpid(), sizeof(struct kinfo_vmentry) +#else +#error "not supported" #endif }; @@ -55,21 +72,38 @@ void ReadProcMaps(ProcSelfMapsBuff *proc_maps) { CHECK_EQ(Err, 0); CHECK_GT(Size, 0); +#if !SANITIZER_OPENBSD size_t MmapedSize = Size * 4 / 3; void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()"); Size = MmapedSize; Err = sysctl(Mib, ARRAY_SIZE(Mib), VmMap, &Size, NULL, 0); CHECK_EQ(Err, 0); + proc_maps->data = (char *)VmMap; +#else + size_t PageSize = GetPageSize(); + size_t MmapedSize = Size; + MmapedSize = ((MmapedSize - 1) / PageSize + 1) * PageSize; + char *Mem = (char *)MmapOrDie(MmapedSize, "ReadProcMaps()"); + Size = 2 * Size + 10 * sizeof(struct kinfo_vmentry); + if (Size > 0x10000) + Size = 0x10000; + Size = (Size / sizeof(struct kinfo_vmentry)) * sizeof(struct kinfo_vmentry); + Err = sysctl(Mib, ARRAY_SIZE(Mib), Mem, &Size, NULL, 0); + CHECK_EQ(Err, 0); + MmapedSize = Size; + proc_maps->data = Mem; +#endif - proc_maps->data = (char*)VmMap; proc_maps->mmaped_size = MmapedSize; proc_maps->len = Size; } bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) { char *last = data_.proc_self_maps.data + data_.proc_self_maps.len; - if (data_.current >= last) return false; - struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry *)data_.current; + if (data_.current >= last) + return false; + const struct kinfo_vmentry *VmEntry = + (const struct kinfo_vmentry *)data_.current; segment->start = (uptr)VmEntry->kve_start; segment->end = (uptr)VmEntry->kve_end; @@ -83,11 +117,13 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) { if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0) segment->protection |= kProtectionExecute; +#if !SANITIZER_OPENBSD if (segment->filename != NULL && segment->filename_size > 0) { internal_snprintf(segment->filename, Min(segment->filename_size, (uptr)PATH_MAX), "%s", VmEntry->kve_path); } +#endif #if SANITIZER_FREEBSD data_.current += VmEntry->kve_structsize; @@ -98,6 +134,6 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) { return true; } -} // namespace __sanitizer +} // namespace __sanitizer -#endif // SANITIZER_FREEBSD || SANITIZER_NETBSD +#endif diff --git a/lib/sanitizer_common/sanitizer_procmaps_common.cc b/lib/sanitizer_common/sanitizer_procmaps_common.cc index 0cd3e2458d87..1f2b431c7ccd 100644 --- a/lib/sanitizer_common/sanitizer_procmaps_common.cc +++ b/lib/sanitizer_common/sanitizer_procmaps_common.cc @@ -12,8 +12,8 @@ #include "sanitizer_platform.h" -#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ - SANITIZER_SOLARIS +#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ + SANITIZER_OPENBSD || SANITIZER_SOLARIS #include "sanitizer_common.h" #include "sanitizer_placement_new.h" @@ -170,5 +170,4 @@ void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { } // namespace __sanitizer -#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || - // SANITIZER_SOLARIS +#endif diff --git a/lib/sanitizer_common/sanitizer_quarantine.h b/lib/sanitizer_common/sanitizer_quarantine.h index 886445a27322..d4aa12fbebce 100644 --- a/lib/sanitizer_common/sanitizer_quarantine.h +++ b/lib/sanitizer_common/sanitizer_quarantine.h @@ -90,6 +90,9 @@ class Quarantine { atomic_store_relaxed(&max_size_, size); atomic_store_relaxed(&min_size_, size / 10 * 9); // 90% of max size. atomic_store_relaxed(&max_cache_size_, cache_size); + + cache_mutex_.Init(); + recycle_mutex_.Init(); } uptr GetSize() const { return atomic_load_relaxed(&max_size_); } @@ -142,8 +145,8 @@ class Quarantine { atomic_uintptr_t min_size_; atomic_uintptr_t max_cache_size_; char pad1_[kCacheLineSize]; - SpinMutex cache_mutex_; - SpinMutex recycle_mutex_; + StaticSpinMutex cache_mutex_; + StaticSpinMutex recycle_mutex_; Cache cache_; char pad2_[kCacheLineSize]; diff --git a/lib/sanitizer_common/sanitizer_report_decorator.h b/lib/sanitizer_common/sanitizer_report_decorator.h index 060b58d3f2d3..a7878684a60d 100644 --- a/lib/sanitizer_common/sanitizer_report_decorator.h +++ b/lib/sanitizer_common/sanitizer_report_decorator.h @@ -25,10 +25,11 @@ class SanitizerCommonDecorator { // stdout, which is not the case on Windows (see SetConsoleTextAttribute()). public: SanitizerCommonDecorator() : ansi_(ColorizeReports()) {} - const char *Bold() const { return ansi_ ? "\033[1m" : ""; } + const char *Bold() const { return ansi_ ? "\033[1m" : ""; } const char *Default() const { return ansi_ ? "\033[1m\033[0m" : ""; } const char *Warning() const { return Red(); } - const char *MemoryByte() { return Magenta(); } + const char *Error() const { return Red(); } + const char *MemoryByte() const { return Magenta(); } protected: const char *Black() const { return ansi_ ? "\033[1m\033[30m" : ""; } diff --git a/lib/sanitizer_common/sanitizer_rtems.cc b/lib/sanitizer_common/sanitizer_rtems.cc new file mode 100644 index 000000000000..4be367911e3f --- /dev/null +++ b/lib/sanitizer_common/sanitizer_rtems.cc @@ -0,0 +1,282 @@ +//===-- sanitizer_rtems.cc ------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between various sanitizers' runtime libraries and +// implements RTEMS-specific functions. +//===----------------------------------------------------------------------===// + +#include "sanitizer_rtems.h" +#if SANITIZER_RTEMS + +#define posix_memalign __real_posix_memalign +#define free __real_free +#define memset __real_memset + +#include "sanitizer_file.h" +#include "sanitizer_symbolizer.h" +#include <errno.h> +#include <fcntl.h> +#include <pthread.h> +#include <sched.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +// There is no mmap on RTEMS. Use memalign, etc. +#define __mmap_alloc_aligned posix_memalign +#define __mmap_free free +#define __mmap_memset memset + +namespace __sanitizer { + +#include "sanitizer_syscall_generic.inc" + +void NORETURN internal__exit(int exitcode) { + _exit(exitcode); +} + +uptr internal_sched_yield() { + return sched_yield(); +} + +uptr internal_getpid() { + return getpid(); +} + +bool FileExists(const char *filename) { + struct stat st; + if (stat(filename, &st)) + return false; + // Sanity check: filename is a regular file. + return S_ISREG(st.st_mode); +} + +uptr GetThreadSelf() { return static_cast<uptr>(pthread_self()); } + +tid_t GetTid() { return GetThreadSelf(); } + +void Abort() { abort(); } + +int Atexit(void (*function)(void)) { return atexit(function); } + +void SleepForSeconds(int seconds) { sleep(seconds); } + +void SleepForMillis(int millis) { usleep(millis * 1000); } + +bool SupportsColoredOutput(fd_t fd) { return false; } + +void GetThreadStackTopAndBottom(bool at_initialization, + uptr *stack_top, uptr *stack_bottom) { + pthread_attr_t attr; + pthread_attr_init(&attr); + CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0); + void *base = nullptr; + size_t size = 0; + CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0); + CHECK_EQ(pthread_attr_destroy(&attr), 0); + + *stack_bottom = reinterpret_cast<uptr>(base); + *stack_top = *stack_bottom + size; +} + +void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, + uptr *tls_addr, uptr *tls_size) { + uptr stack_top, stack_bottom; + GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); + *stk_addr = stack_bottom; + *stk_size = stack_top - stack_bottom; + *tls_addr = *tls_size = 0; +} + +void MaybeReexec() {} +void CheckASLR() {} +void DisableCoreDumperIfNecessary() {} +void InstallDeadlySignalHandlers(SignalHandlerType handler) {} +void SetAlternateSignalStack() {} +void UnsetAlternateSignalStack() {} +void InitTlsSize() {} + +void PrintModuleMap() {} + +void SignalContext::DumpAllRegisters(void *context) {} +const char *DescribeSignalOrException(int signo) { UNIMPLEMENTED(); } + +enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 }; + +BlockingMutex::BlockingMutex() { + internal_memset(this, 0, sizeof(*this)); +} + +void BlockingMutex::Lock() { + CHECK_EQ(owner_, 0); + atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); + if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked) + return; + while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) { + internal_sched_yield(); + } +} + +void BlockingMutex::Unlock() { + atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); + u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release); + CHECK_NE(v, MtxUnlocked); +} + +void BlockingMutex::CheckLocked() { + atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); + CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed)); +} + +uptr GetPageSize() { return getpagesize(); } + +uptr GetMmapGranularity() { return GetPageSize(); } + +uptr GetMaxVirtualAddress() { + return (1ULL << 32) - 1; // 0xffffffff +} + +void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { + void* ptr = 0; + int res = __mmap_alloc_aligned(&ptr, GetPageSize(), size); + if (UNLIKELY(res)) + ReportMmapFailureAndDie(size, mem_type, "allocate", res, raw_report); + __mmap_memset(ptr, 0, size); + IncreaseTotalMmap(size); + return ptr; +} + +void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { + void* ptr = 0; + int res = __mmap_alloc_aligned(&ptr, GetPageSize(), size); + if (UNLIKELY(res)) { + if (res == ENOMEM) + return nullptr; + ReportMmapFailureAndDie(size, mem_type, "allocate", false); + } + __mmap_memset(ptr, 0, size); + IncreaseTotalMmap(size); + return ptr; +} + +void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, + const char *mem_type) { + CHECK(IsPowerOfTwo(size)); + CHECK(IsPowerOfTwo(alignment)); + void* ptr = 0; + int res = __mmap_alloc_aligned(&ptr, alignment, size); + if (res) + ReportMmapFailureAndDie(size, mem_type, "align allocate", res, false); + __mmap_memset(ptr, 0, size); + IncreaseTotalMmap(size); + return ptr; +} + +void *MmapNoReserveOrDie(uptr size, const char *mem_type) { + return MmapOrDie(size, mem_type, false); +} + +void UnmapOrDie(void *addr, uptr size) { + if (!addr || !size) return; + __mmap_free(addr); + DecreaseTotalMmap(size); +} + +fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) { + int flags; + switch (mode) { + case RdOnly: flags = O_RDONLY; break; + case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break; + case RdWr: flags = O_RDWR | O_CREAT; break; + } + fd_t res = open(filename, flags, 0660); + if (internal_iserror(res, errno_p)) + return kInvalidFd; + return res; +} + +void CloseFile(fd_t fd) { + close(fd); +} + +bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read, + error_t *error_p) { + uptr res = read(fd, buff, buff_size); + if (internal_iserror(res, error_p)) + return false; + if (bytes_read) + *bytes_read = res; + return true; +} + +bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written, + error_t *error_p) { + uptr res = write(fd, buff, buff_size); + if (internal_iserror(res, error_p)) + return false; + if (bytes_written) + *bytes_written = res; + return true; +} + +bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) { + uptr res = rename(oldpath, newpath); + return !internal_iserror(res, error_p); +} + +void ReleaseMemoryPagesToOS(uptr beg, uptr end) {} +void DumpProcessMap() {} + +// There is no page protection so everything is "accessible." +bool IsAccessibleMemoryRange(uptr beg, uptr size) { + return true; +} + +char **GetArgv() { return nullptr; } + +const char *GetEnv(const char *name) { + return getenv(name); +} + +uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { + internal_strncpy(buf, "StubBinaryName", buf_len); + return internal_strlen(buf); +} + +uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) { + internal_strncpy(buf, "StubProcessName", buf_len); + return internal_strlen(buf); +} + +bool IsPathSeparator(const char c) { + return c == '/'; +} + +bool IsAbsolutePath(const char *path) { + return path != nullptr && IsPathSeparator(path[0]); +} + +void ReportFile::Write(const char *buffer, uptr length) { + SpinMutexLock l(mu); + static const char *kWriteError = + "ReportFile::Write() can't output requested buffer!\n"; + ReopenIfNecessary(); + if (length != write(fd, buffer, length)) { + write(fd, kWriteError, internal_strlen(kWriteError)); + Die(); + } +} + +uptr MainThreadStackBase, MainThreadStackSize; +uptr MainThreadTlsBase, MainThreadTlsSize; + +} // namespace __sanitizer + +#endif // SANITIZER_RTEMS diff --git a/lib/sanitizer_common/sanitizer_rtems.h b/lib/sanitizer_common/sanitizer_rtems.h new file mode 100644 index 000000000000..968fa66e1be7 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_rtems.h @@ -0,0 +1,21 @@ +//===-- sanitizer_rtems.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between various sanitizers' runtime libraries and +// provides definitions for RTEMS-specific functions. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_RTEMS_H +#define SANITIZER_RTEMS_H + +#include "sanitizer_platform.h" +#if SANITIZER_RTEMS +#include "sanitizer_common.h" + +#endif // SANITIZER_RTEMS +#endif // SANITIZER_RTEMS_H diff --git a/lib/sanitizer_common/sanitizer_stackdepot.cc b/lib/sanitizer_common/sanitizer_stackdepot.cc index 214dda56df34..3bd5b677a1f8 100644 --- a/lib/sanitizer_common/sanitizer_stackdepot.cc +++ b/lib/sanitizer_common/sanitizer_stackdepot.cc @@ -135,8 +135,8 @@ bool StackDepotReverseMap::IdDescPair::IdComparator( return a.id < b.id; } -StackDepotReverseMap::StackDepotReverseMap() - : map_(StackDepotGetStats()->n_uniq_ids + 100) { +StackDepotReverseMap::StackDepotReverseMap() { + map_.reserve(StackDepotGetStats()->n_uniq_ids + 100); for (int idx = 0; idx < StackDepot::kTabSize; idx++) { atomic_uintptr_t *p = &theDepot.tab[idx]; uptr v = atomic_load(p, memory_order_consume); @@ -146,7 +146,7 @@ StackDepotReverseMap::StackDepotReverseMap() map_.push_back(pair); } } - InternalSort(&map_, map_.size(), IdDescPair::IdComparator); + Sort(map_.data(), map_.size(), &IdDescPair::IdComparator); } StackTrace StackDepotReverseMap::Get(u32 id) { diff --git a/lib/sanitizer_common/sanitizer_stacktrace.cc b/lib/sanitizer_common/sanitizer_stacktrace.cc index 2741dde7a3f3..21976b6b1531 100644 --- a/lib/sanitizer_common/sanitizer_stacktrace.cc +++ b/lib/sanitizer_common/sanitizer_stacktrace.cc @@ -20,7 +20,8 @@ namespace __sanitizer { uptr StackTrace::GetNextInstructionPc(uptr pc) { #if defined(__mips__) return pc + 8; -#elif defined(__powerpc__) +#elif defined(__powerpc__) || defined(__sparc__) || defined(__arm__) || \ + defined(__aarch64__) return pc + 4; #else return pc + 1; @@ -40,6 +41,9 @@ void BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) { top_frame_bp = 0; } +// Sparc implemention is in its own file. +#if !defined(__sparc__) + // In GCC on ARM bp points to saved lr, not fp, so we should check the next // cell in stack to be a saved frame pointer. GetCanonicFrame returns the // pointer to saved frame pointer in any case. @@ -106,6 +110,8 @@ void BufferedStackTrace::FastUnwindStack(uptr pc, uptr bp, uptr stack_top, } } +#endif // !defined(__sparc__) + void BufferedStackTrace::PopStackFrames(uptr count) { CHECK_LT(count, size); size -= count; diff --git a/lib/sanitizer_common/sanitizer_stacktrace.h b/lib/sanitizer_common/sanitizer_stacktrace.h index f15196e4b725..562d2e9f737c 100644 --- a/lib/sanitizer_common/sanitizer_stacktrace.h +++ b/lib/sanitizer_common/sanitizer_stacktrace.h @@ -23,6 +23,8 @@ static const u32 kStackTraceMax = 256; # define SANITIZER_CAN_FAST_UNWIND 0 #elif SANITIZER_WINDOWS # define SANITIZER_CAN_FAST_UNWIND 0 +#elif SANITIZER_OPENBSD +# define SANITIZER_CAN_FAST_UNWIND 0 #else # define SANITIZER_CAN_FAST_UNWIND 1 #endif @@ -30,7 +32,7 @@ static const u32 kStackTraceMax = 256; // Fast unwind is the only option on Mac for now; we will need to // revisit this macro when slow unwind works on Mac, see // https://github.com/google/sanitizers/issues/137 -#if SANITIZER_MAC +#if SANITIZER_MAC || SANITIZER_OPENBSD || SANITIZER_RTEMS # define SANITIZER_CAN_SLOW_UNWIND 0 #else # define SANITIZER_CAN_SLOW_UNWIND 1 @@ -73,10 +75,11 @@ struct StackTrace { ALWAYS_INLINE uptr StackTrace::GetPreviousInstructionPc(uptr pc) { #if defined(__arm__) - // Cancel Thumb bit. - pc = pc & (~1); -#endif -#if defined(__powerpc__) || defined(__powerpc64__) + // T32 (Thumb) branch instructions might be 16 or 32 bit long, + // so we return (pc-2) in that case in order to be safe. + // For A32 mode we return (pc-4) because all instructions are 32 bit long. + return (pc - 3) & (~1); +#elif defined(__powerpc__) || defined(__powerpc64__) || defined(__aarch64__) // PCs are always 4 byte aligned. return pc - 4; #elif defined(__sparc__) || defined(__mips__) diff --git a/lib/sanitizer_common/sanitizer_stacktrace_printer.cc b/lib/sanitizer_common/sanitizer_stacktrace_printer.cc index a271302708be..ac0731d46a6b 100644 --- a/lib/sanitizer_common/sanitizer_stacktrace_printer.cc +++ b/lib/sanitizer_common/sanitizer_stacktrace_printer.cc @@ -17,8 +17,8 @@ namespace __sanitizer { -// sanitizer_symbolizer_fuchsia.cc implements these differently for Fuchsia. -#if !SANITIZER_FUCHSIA +// sanitizer_symbolizer_markup.cc implements these differently. +#if !SANITIZER_SYMBOLIZER_MARKUP static const char *StripFunctionName(const char *function, const char *prefix) { if (!function) return nullptr; @@ -228,7 +228,7 @@ void RenderData(InternalScopedString *buffer, const char *format, } } -#endif // !SANITIZER_FUCHSIA +#endif // !SANITIZER_SYMBOLIZER_MARKUP void RenderSourceLocation(InternalScopedString *buffer, const char *file, int line, int column, bool vs_style, diff --git a/lib/sanitizer_common/sanitizer_stacktrace_sparc.cc b/lib/sanitizer_common/sanitizer_stacktrace_sparc.cc new file mode 100644 index 000000000000..9f9920ece80b --- /dev/null +++ b/lib/sanitizer_common/sanitizer_stacktrace_sparc.cc @@ -0,0 +1,58 @@ +//===-- sanitizer_stacktrace_sparc.cc -------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between AddressSanitizer and ThreadSanitizer +// run-time libraries. +// +// Implemention of fast stack unwinding for Sparc. +//===----------------------------------------------------------------------===// + +// This file is ported to Sparc v8, but it should be easy to port to +// Sparc v9. +#if defined(__sparcv8__) + +#include "sanitizer_common.h" +#include "sanitizer_stacktrace.h" + +namespace __sanitizer { + +void BufferedStackTrace::FastUnwindStack(uptr pc, uptr bp, uptr stack_top, + uptr stack_bottom, u32 max_depth) { + const uptr kPageSize = GetPageSizeCached(); + CHECK_GE(max_depth, 2); + trace_buffer[0] = pc; + size = 1; + if (stack_top < 4096) return; // Sanity check for stack top. + // Flush register windows to memory + asm volatile("ta 3" ::: "memory"); + uhwptr *frame = (uhwptr*)bp; + // Lowest possible address that makes sense as the next frame pointer. + // Goes up as we walk the stack. + uptr bottom = stack_bottom; + // Avoid infinite loop when frame == frame[0] by using frame > prev_frame. + while (IsValidFrame((uptr)frame, stack_top, bottom) && + IsAligned((uptr)frame, sizeof(*frame)) && + size < max_depth) { + uhwptr pc1 = frame[15]; + // Let's assume that any pointer in the 0th page is invalid and + // stop unwinding here. If we're adding support for a platform + // where this isn't true, we need to reconsider this check. + if (pc1 < kPageSize) + break; + if (pc1 != pc) { + trace_buffer[size++] = (uptr) pc1; + } + bottom = (uptr)frame; + frame = (uhwptr*)frame[14]; + } +} + +} // namespace __sanitizer + +#endif // !defined(__sparcv8__) diff --git a/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc index 0f543d2d7c87..fe4fe86daedf 100644 --- a/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc +++ b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc @@ -88,7 +88,7 @@ namespace __sanitizer { class SuspendedThreadsListLinux : public SuspendedThreadsList { public: - SuspendedThreadsListLinux() : thread_ids_(1024) {} + SuspendedThreadsListLinux() { thread_ids_.reserve(1024); } tid_t GetThreadID(uptr index) const; uptr ThreadCount() const; @@ -209,26 +209,26 @@ void ThreadSuspender::KillAllThreads() { bool ThreadSuspender::SuspendAllThreads() { ThreadLister thread_lister(pid_); - bool added_threads; - bool first_iteration = true; - do { - // Run through the directory entries once. - added_threads = false; - pid_t tid = thread_lister.GetNextTID(); - while (tid >= 0) { - if (SuspendThread(tid)) - added_threads = true; - tid = thread_lister.GetNextTID(); - } - if (thread_lister.error() || (first_iteration && !added_threads)) { - // Detach threads and fail. - ResumeAllThreads(); - return false; + bool retry = true; + InternalMmapVector<tid_t> threads; + threads.reserve(128); + for (int i = 0; i < 30 && retry; ++i) { + retry = false; + switch (thread_lister.ListThreads(&threads)) { + case ThreadLister::Error: + ResumeAllThreads(); + return false; + case ThreadLister::Incomplete: + retry = true; + break; + case ThreadLister::Ok: + break; } - thread_lister.Reset(); - first_iteration = false; - } while (added_threads); - return true; + for (tid_t tid : threads) + if (SuspendThread(tid)) + retry = true; + }; + return suspended_threads_list_.ThreadCount(); } // Pointer to the ThreadSuspender instance for use in signal handler. @@ -295,7 +295,7 @@ static int TracerThread(void* argument) { thread_suspender_instance = &thread_suspender; // Alternate stack for signal handling. - InternalScopedBuffer<char> handler_stack_memory(kHandlerStackSize); + InternalMmapVector<char> handler_stack_memory(kHandlerStackSize); stack_t handler_stack; internal_memset(&handler_stack, 0, sizeof(handler_stack)); handler_stack.ss_sp = handler_stack_memory.data(); diff --git a/lib/sanitizer_common/sanitizer_suppressions.cc b/lib/sanitizer_common/sanitizer_suppressions.cc index 89ddfddd1108..232171ed5b67 100644 --- a/lib/sanitizer_common/sanitizer_suppressions.cc +++ b/lib/sanitizer_common/sanitizer_suppressions.cc @@ -25,7 +25,7 @@ namespace __sanitizer { SuppressionContext::SuppressionContext(const char *suppression_types[], int suppression_types_num) : suppression_types_(suppression_types), - suppression_types_num_(suppression_types_num), suppressions_(1), + suppression_types_num_(suppression_types_num), can_parse_(true) { CHECK_LE(suppression_types_num_, kMaxSuppressionTypes); internal_memset(has_suppression_type_, 0, suppression_types_num_); diff --git a/lib/sanitizer_common/sanitizer_symbolizer.h b/lib/sanitizer_common/sanitizer_symbolizer.h index 208362d2f478..e08eb0d660d5 100644 --- a/lib/sanitizer_common/sanitizer_symbolizer.h +++ b/lib/sanitizer_common/sanitizer_symbolizer.h @@ -107,7 +107,6 @@ class Symbolizer final { void Flush(); // Attempts to demangle the provided C++ mangled name. const char *Demangle(const char *name); - void PrepareForSandboxing(); // Allow user to install hooks that would be called before/after Symbolizer // does the actual file/line info fetching. Specific sanitizers may need this @@ -133,8 +132,9 @@ class Symbolizer final { class ModuleNameOwner { public: explicit ModuleNameOwner(BlockingMutex *synchronized_by) - : storage_(kInitialCapacity), last_match_(nullptr), - mu_(synchronized_by) {} + : last_match_(nullptr), mu_(synchronized_by) { + storage_.reserve(kInitialCapacity); + } const char *GetOwnedCopy(const char *str); private: @@ -158,7 +158,6 @@ class Symbolizer final { // Platform-specific default demangler, must not return nullptr. const char *PlatformDemangle(const char *name); - void PlatformPrepareForSandboxing(); static Symbolizer *symbolizer_; static StaticSpinMutex init_mu_; diff --git a/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.h b/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.h new file mode 100644 index 000000000000..3c1c864c0615 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.h @@ -0,0 +1,40 @@ +//===-- sanitizer_symbolizer_fuchsia.h -----------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between various sanitizers' runtime libraries. +// +// Define Fuchsia's string formats and limits for the markup symbolizer. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_SYMBOLIZER_FUCHSIA_H +#define SANITIZER_SYMBOLIZER_FUCHSIA_H + +#include "sanitizer_internal_defs.h" + +namespace __sanitizer { + +// See the spec at: +// https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md + +// This is used by UBSan for type names, and by ASan for global variable names. +constexpr const char *kFormatDemangle = "{{{symbol:%s}}}"; +constexpr uptr kFormatDemangleMax = 1024; // Arbitrary. + +// Function name or equivalent from PC location. +constexpr const char *kFormatFunction = "{{{pc:%p}}}"; +constexpr uptr kFormatFunctionMax = 64; // More than big enough for 64-bit hex. + +// Global variable name or equivalent from data memory address. +constexpr const char *kFormatData = "{{{data:%p}}}"; + +// One frame in a backtrace (printed on a line by itself). +constexpr const char *kFormatFrame = "{{{bt:%u:%p}}}"; + +} // namespace __sanitizer + +#endif // SANITIZER_SYMBOLIZER_FUCHSIA_H diff --git a/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc b/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc index a4bab668b27a..f2ee7baad837 100644 --- a/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc +++ b/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc @@ -26,8 +26,8 @@ Symbolizer *Symbolizer::GetOrInit() { return symbolizer_; } -// See sanitizer_symbolizer_fuchsia.cc. -#if !SANITIZER_FUCHSIA +// See sanitizer_symbolizer_markup.cc. +#if !SANITIZER_SYMBOLIZER_MARKUP const char *ExtractToken(const char *str, const char *delims, char **result) { uptr prefix_len = internal_strcspn(str, delims); @@ -145,11 +145,6 @@ const char *Symbolizer::Demangle(const char *name) { return PlatformDemangle(name); } -void Symbolizer::PrepareForSandboxing() { - BlockingMutexLock l(&mu_); - PlatformPrepareForSandboxing(); -} - bool Symbolizer::FindModuleNameAndOffsetForAddress(uptr address, const char **module_name, uptr *module_offset, @@ -494,6 +489,6 @@ bool SymbolizerProcess::WriteToSymbolizer(const char *buffer, uptr length) { return true; } -#endif // !SANITIZER_FUCHSIA +#endif // !SANITIZER_SYMBOLIZER_MARKUP } // namespace __sanitizer diff --git a/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.cc b/lib/sanitizer_common/sanitizer_symbolizer_markup.cc index 3d1117d9d3ca..c62dc90fabba 100644 --- a/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.cc +++ b/lib/sanitizer_common/sanitizer_symbolizer_markup.cc @@ -1,4 +1,4 @@ -//===-- sanitizer_symbolizer_fuchsia.cc -----------------------------------===// +//===-- sanitizer_symbolizer_markup.cc ------------------------------------===// // // The LLVM Compiler Infrastructure // @@ -9,18 +9,27 @@ // // This file is shared between various sanitizers' runtime libraries. // -// Implementation of Fuchsia-specific symbolizer. +// Implementation of offline markup symbolizer. //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" -#if SANITIZER_FUCHSIA +#if SANITIZER_SYMBOLIZER_MARKUP -#include "sanitizer_fuchsia.h" +#if SANITIZER_FUCHSIA +#include "sanitizer_symbolizer_fuchsia.h" +#elif SANITIZER_RTEMS +#include "sanitizer_symbolizer_rtems.h" +#endif +#include "sanitizer_stacktrace.h" #include "sanitizer_symbolizer.h" +#include <limits.h> +#include <unwind.h> + namespace __sanitizer { -// For Fuchsia we don't do any actual symbolization per se. +// This generic support for offline symbolizing is based on the +// Fuchsia port. We don't do any actual symbolization per se. // Instead, we emit text containing raw addresses and raw linkage // symbol names, embedded in Fuchsia's symbolization markup format. // Fuchsia's logging infrastructure emits enough information about @@ -29,20 +38,6 @@ namespace __sanitizer { // https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md // This is used by UBSan for type names, and by ASan for global variable names. -constexpr const char *kFormatDemangle = "{{{symbol:%s}}}"; -constexpr uptr kFormatDemangleMax = 1024; // Arbitrary. - -// Function name or equivalent from PC location. -constexpr const char *kFormatFunction = "{{{pc:%p}}}"; -constexpr uptr kFormatFunctionMax = 64; // More than big enough for 64-bit hex. - -// Global variable name or equivalent from data memory address. -constexpr const char *kFormatData = "{{{data:%p}}}"; - -// One frame in a backtrace (printed on a line by itself). -constexpr const char *kFormatFrame = "{{{bt:%u:%p}}}"; - -// This is used by UBSan for type names, and by ASan for global variable names. // It's expected to return a static buffer that will be reused on each call. const char *Symbolizer::Demangle(const char *name) { static char buffer[kFormatDemangleMax]; @@ -102,6 +97,49 @@ Symbolizer *Symbolizer::PlatformInit() { void Symbolizer::LateInitialize() { Symbolizer::GetOrInit(); } +void StartReportDeadlySignal() {} +void ReportDeadlySignal(const SignalContext &sig, u32 tid, + UnwindSignalStackCallbackType unwind, + const void *unwind_context) {} + +#if SANITIZER_CAN_SLOW_UNWIND +struct UnwindTraceArg { + BufferedStackTrace *stack; + u32 max_depth; +}; + +_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) { + UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param); + CHECK_LT(arg->stack->size, arg->max_depth); + uptr pc = _Unwind_GetIP(ctx); + if (pc < PAGE_SIZE) return _URC_NORMAL_STOP; + arg->stack->trace_buffer[arg->stack->size++] = pc; + return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP + : _URC_NO_REASON); +} + +void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) { + CHECK_GE(max_depth, 2); + size = 0; + UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)}; + _Unwind_Backtrace(Unwind_Trace, &arg); + CHECK_GT(size, 0); + // We need to pop a few frames so that pc is on top. + uptr to_pop = LocatePcInTrace(pc); + // trace_buffer[0] belongs to the current function so we always pop it, + // unless there is only 1 frame in the stack trace (1 frame is always better + // than 0!). + PopStackFrames(Min(to_pop, static_cast<uptr>(1))); + trace_buffer[0] = pc; +} + +void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context, + u32 max_depth) { + CHECK_NE(context, nullptr); + UNREACHABLE("signal context doesn't exist"); +} +#endif // SANITIZER_CAN_SLOW_UNWIND + } // namespace __sanitizer -#endif // SANITIZER_FUCHSIA +#endif // SANITIZER_SYMBOLIZER_MARKUP diff --git a/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc index 71d748d0515f..d74a6f44ee6b 100644 --- a/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc +++ b/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc @@ -315,8 +315,9 @@ class Addr2LinePool : public SymbolizerTool { public: explicit Addr2LinePool(const char *addr2line_path, LowLevelAllocator *allocator) - : addr2line_path_(addr2line_path), allocator_(allocator), - addr2line_pool_(16) {} + : addr2line_path_(addr2line_path), allocator_(allocator) { + addr2line_pool_.reserve(16); + } bool SymbolizePC(uptr addr, SymbolizedStack *stack) override { if (const char *buf = @@ -445,8 +446,6 @@ const char *Symbolizer::PlatformDemangle(const char *name) { return DemangleSwiftAndCXX(name); } -void Symbolizer::PlatformPrepareForSandboxing() {} - static SymbolizerTool *ChooseExternalSymbolizer(LowLevelAllocator *allocator) { const char *path = common_flags()->external_symbolizer_path; const char *binary_name = path ? StripModuleName(path) : ""; diff --git a/lib/sanitizer_common/sanitizer_symbolizer_report.cc b/lib/sanitizer_common/sanitizer_symbolizer_report.cc new file mode 100644 index 000000000000..fd26d4cfa079 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_symbolizer_report.cc @@ -0,0 +1,282 @@ +//===-- sanitizer_symbolizer_report.cc ------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// This file is shared between AddressSanitizer and other sanitizer run-time +/// libraries and implements symbolized reports related functions. +/// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common.h" +#include "sanitizer_file.h" +#include "sanitizer_flags.h" +#include "sanitizer_procmaps.h" +#include "sanitizer_report_decorator.h" +#include "sanitizer_stacktrace.h" +#include "sanitizer_stacktrace_printer.h" +#include "sanitizer_symbolizer.h" + +#if SANITIZER_POSIX +# include "sanitizer_posix.h" +# include <sys/mman.h> +#endif + +namespace __sanitizer { + +#if !SANITIZER_GO +void ReportErrorSummary(const char *error_type, const AddressInfo &info, + const char *alt_tool_name) { + if (!common_flags()->print_summary) return; + InternalScopedString buff(kMaxSummaryLength); + buff.append("%s ", error_type); + RenderFrame(&buff, "%L %F", 0, info, common_flags()->symbolize_vs_style, + common_flags()->strip_path_prefix); + ReportErrorSummary(buff.data(), alt_tool_name); +} +#endif + +#if !SANITIZER_FUCHSIA + +bool ReportFile::SupportsColors() { + SpinMutexLock l(mu); + ReopenIfNecessary(); + return SupportsColoredOutput(fd); +} + +static INLINE bool ReportSupportsColors() { + return report_file.SupportsColors(); +} + +#else // SANITIZER_FUCHSIA + +// Fuchsia's logs always go through post-processing that handles colorization. +static INLINE bool ReportSupportsColors() { return true; } + +#endif // !SANITIZER_FUCHSIA + +bool ColorizeReports() { + // FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color + // printing on Windows. + if (SANITIZER_WINDOWS) + return false; + + const char *flag = common_flags()->color; + return internal_strcmp(flag, "always") == 0 || + (internal_strcmp(flag, "auto") == 0 && ReportSupportsColors()); +} + +void ReportErrorSummary(const char *error_type, const StackTrace *stack, + const char *alt_tool_name) { +#if !SANITIZER_GO + if (!common_flags()->print_summary) + return; + if (stack->size == 0) { + ReportErrorSummary(error_type); + return; + } + // Currently, we include the first stack frame into the report summary. + // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc). + uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]); + SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc); + ReportErrorSummary(error_type, frame->info, alt_tool_name); + frame->ClearAll(); +#endif +} + +void ReportMmapWriteExec(int prot) { +#if SANITIZER_POSIX && (!SANITIZER_GO && !SANITIZER_ANDROID) + if ((prot & (PROT_WRITE | PROT_EXEC)) != (PROT_WRITE | PROT_EXEC)) + return; + + ScopedErrorReportLock l; + SanitizerCommonDecorator d; + + InternalMmapVector<BufferedStackTrace> stack_buffer(1); + BufferedStackTrace *stack = stack_buffer.data(); + stack->Reset(); + uptr top = 0; + uptr bottom = 0; + GET_CALLER_PC_BP_SP; + (void)sp; + bool fast = common_flags()->fast_unwind_on_fatal; + if (fast) + GetThreadStackTopAndBottom(false, &top, &bottom); + stack->Unwind(kStackTraceMax, pc, bp, nullptr, top, bottom, fast); + + Printf("%s", d.Warning()); + Report("WARNING: %s: writable-executable page usage\n", SanitizerToolName); + Printf("%s", d.Default()); + + stack->Print(); + ReportErrorSummary("w-and-x-usage", stack); +#endif +} + +#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS && !SANITIZER_GO +void StartReportDeadlySignal() { + // Write the first message using fd=2, just in case. + // It may actually fail to write in case stderr is closed. + CatastrophicErrorWrite(SanitizerToolName, internal_strlen(SanitizerToolName)); + static const char kDeadlySignal[] = ":DEADLYSIGNAL\n"; + CatastrophicErrorWrite(kDeadlySignal, sizeof(kDeadlySignal) - 1); +} + +static void MaybeReportNonExecRegion(uptr pc) { +#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD + MemoryMappingLayout proc_maps(/*cache_enabled*/ true); + MemoryMappedSegment segment; + while (proc_maps.Next(&segment)) { + if (pc >= segment.start && pc < segment.end && !segment.IsExecutable()) + Report("Hint: PC is at a non-executable region. Maybe a wild jump?\n"); + } +#endif +} + +static void PrintMemoryByte(InternalScopedString *str, const char *before, + u8 byte) { + SanitizerCommonDecorator d; + str->append("%s%s%x%x%s ", before, d.MemoryByte(), byte >> 4, byte & 15, + d.Default()); +} + +static void MaybeDumpInstructionBytes(uptr pc) { + if (!common_flags()->dump_instruction_bytes || (pc < GetPageSizeCached())) + return; + InternalScopedString str(1024); + str.append("First 16 instruction bytes at pc: "); + if (IsAccessibleMemoryRange(pc, 16)) { + for (int i = 0; i < 16; ++i) { + PrintMemoryByte(&str, "", ((u8 *)pc)[i]); + } + str.append("\n"); + } else { + str.append("unaccessible\n"); + } + Report("%s", str.data()); +} + +static void MaybeDumpRegisters(void *context) { + if (!common_flags()->dump_registers) return; + SignalContext::DumpAllRegisters(context); +} + +static void ReportStackOverflowImpl(const SignalContext &sig, u32 tid, + UnwindSignalStackCallbackType unwind, + const void *unwind_context) { + SanitizerCommonDecorator d; + Printf("%s", d.Warning()); + static const char kDescription[] = "stack-overflow"; + Report("ERROR: %s: %s on address %p (pc %p bp %p sp %p T%d)\n", + SanitizerToolName, kDescription, (void *)sig.addr, (void *)sig.pc, + (void *)sig.bp, (void *)sig.sp, tid); + Printf("%s", d.Default()); + InternalMmapVector<BufferedStackTrace> stack_buffer(1); + BufferedStackTrace *stack = stack_buffer.data(); + stack->Reset(); + unwind(sig, unwind_context, stack); + stack->Print(); + ReportErrorSummary(kDescription, stack); +} + +static void ReportDeadlySignalImpl(const SignalContext &sig, u32 tid, + UnwindSignalStackCallbackType unwind, + const void *unwind_context) { + SanitizerCommonDecorator d; + Printf("%s", d.Warning()); + const char *description = sig.Describe(); + Report("ERROR: %s: %s on unknown address %p (pc %p bp %p sp %p T%d)\n", + SanitizerToolName, description, (void *)sig.addr, (void *)sig.pc, + (void *)sig.bp, (void *)sig.sp, tid); + Printf("%s", d.Default()); + if (sig.pc < GetPageSizeCached()) + Report("Hint: pc points to the zero page.\n"); + if (sig.is_memory_access) { + const char *access_type = + sig.write_flag == SignalContext::WRITE + ? "WRITE" + : (sig.write_flag == SignalContext::READ ? "READ" : "UNKNOWN"); + Report("The signal is caused by a %s memory access.\n", access_type); + if (sig.addr < GetPageSizeCached()) + Report("Hint: address points to the zero page.\n"); + } + MaybeReportNonExecRegion(sig.pc); + InternalMmapVector<BufferedStackTrace> stack_buffer(1); + BufferedStackTrace *stack = stack_buffer.data(); + stack->Reset(); + unwind(sig, unwind_context, stack); + stack->Print(); + MaybeDumpInstructionBytes(sig.pc); + MaybeDumpRegisters(sig.context); + Printf("%s can not provide additional info.\n", SanitizerToolName); + ReportErrorSummary(description, stack); +} + +void ReportDeadlySignal(const SignalContext &sig, u32 tid, + UnwindSignalStackCallbackType unwind, + const void *unwind_context) { + if (sig.IsStackOverflow()) + ReportStackOverflowImpl(sig, tid, unwind, unwind_context); + else + ReportDeadlySignalImpl(sig, tid, unwind, unwind_context); +} + +void HandleDeadlySignal(void *siginfo, void *context, u32 tid, + UnwindSignalStackCallbackType unwind, + const void *unwind_context) { + StartReportDeadlySignal(); + ScopedErrorReportLock rl; + SignalContext sig(siginfo, context); + ReportDeadlySignal(sig, tid, unwind, unwind_context); + Report("ABORTING\n"); + Die(); +} + +#endif // !SANITIZER_FUCHSIA && !SANITIZER_GO + +static atomic_uintptr_t reporting_thread = {0}; +static StaticSpinMutex CommonSanitizerReportMutex; + +ScopedErrorReportLock::ScopedErrorReportLock() { + uptr current = GetThreadSelf(); + for (;;) { + uptr expected = 0; + if (atomic_compare_exchange_strong(&reporting_thread, &expected, current, + memory_order_relaxed)) { + // We've claimed reporting_thread so proceed. + CommonSanitizerReportMutex.Lock(); + return; + } + + if (expected == current) { + // This is either asynch signal or nested error during error reporting. + // Fail simple to avoid deadlocks in Report(). + + // Can't use Report() here because of potential deadlocks in nested + // signal handlers. + CatastrophicErrorWrite(SanitizerToolName, + internal_strlen(SanitizerToolName)); + static const char msg[] = ": nested bug in the same thread, aborting.\n"; + CatastrophicErrorWrite(msg, sizeof(msg) - 1); + + internal__exit(common_flags()->exitcode); + } + + internal_sched_yield(); + } +} + +ScopedErrorReportLock::~ScopedErrorReportLock() { + CommonSanitizerReportMutex.Unlock(); + atomic_store_relaxed(&reporting_thread, 0); +} + +void ScopedErrorReportLock::CheckLocked() { + CommonSanitizerReportMutex.CheckLocked(); +} + +} // namespace __sanitizer diff --git a/lib/sanitizer_common/sanitizer_symbolizer_rtems.h b/lib/sanitizer_common/sanitizer_symbolizer_rtems.h new file mode 100644 index 000000000000..62356ef6ef73 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_symbolizer_rtems.h @@ -0,0 +1,41 @@ +//===-- sanitizer_symbolizer_rtems.h -----------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between various sanitizers' runtime libraries. +// +// Define RTEMS's string formats and limits for the markup symbolizer. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_SYMBOLIZER_RTEMS_H +#define SANITIZER_SYMBOLIZER_RTEMS_H + +#include "sanitizer_internal_defs.h" + +namespace __sanitizer { + +// The Myriad RTEMS symbolizer currently only parses backtrace lines, +// so use a format that the symbolizer understands. For other +// markups, keep them the same as the Fuchsia's. + +// This is used by UBSan for type names, and by ASan for global variable names. +constexpr const char *kFormatDemangle = "{{{symbol:%s}}}"; +constexpr uptr kFormatDemangleMax = 1024; // Arbitrary. + +// Function name or equivalent from PC location. +constexpr const char *kFormatFunction = "{{{pc:%p}}}"; +constexpr uptr kFormatFunctionMax = 64; // More than big enough for 64-bit hex. + +// Global variable name or equivalent from data memory address. +constexpr const char *kFormatData = "{{{data:%p}}}"; + +// One frame in a backtrace (printed on a line by itself). +constexpr const char *kFormatFrame = " [%u] IP: %p"; + +} // namespace __sanitizer + +#endif // SANITIZER_SYMBOLIZER_RTEMS_H diff --git a/lib/sanitizer_common/sanitizer_symbolizer_win.cc b/lib/sanitizer_common/sanitizer_symbolizer_win.cc index 135823b157de..6ec75831a633 100644 --- a/lib/sanitizer_common/sanitizer_symbolizer_win.cc +++ b/lib/sanitizer_common/sanitizer_symbolizer_win.cc @@ -176,10 +176,6 @@ const char *Symbolizer::PlatformDemangle(const char *name) { return name; } -void Symbolizer::PlatformPrepareForSandboxing() { - // Do nothing. -} - namespace { struct ScopedHandle { ScopedHandle() : h_(nullptr) {} diff --git a/lib/sanitizer_common/sanitizer_syscall_generic.inc b/lib/sanitizer_common/sanitizer_syscall_generic.inc index 3c8e77867ce5..e4ed1b4deee0 100644 --- a/lib/sanitizer_common/sanitizer_syscall_generic.inc +++ b/lib/sanitizer_common/sanitizer_syscall_generic.inc @@ -11,7 +11,8 @@ // //===----------------------------------------------------------------------===// -#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || SANITIZER_SOLARIS +#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || \ + SANITIZER_OPENBSD || SANITIZER_SOLARIS # define SYSCALL(name) SYS_ ## name #else # define SYSCALL(name) __NR_ ## name diff --git a/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc b/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc new file mode 100644 index 000000000000..4f766100813d --- /dev/null +++ b/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc @@ -0,0 +1,3786 @@ +//===-- sanitizer_syscalls_netbsd.inc ---------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Common syscalls handlers for tools like AddressSanitizer, +// ThreadSanitizer, MemorySanitizer, etc. +// +// This file should be included into the tool's interceptor file, +// which has to define it's own macros: +// COMMON_SYSCALL_PRE_READ_RANGE +// Called in prehook for regions that will be read by the kernel and +// must be initialized. +// COMMON_SYSCALL_PRE_WRITE_RANGE +// Called in prehook for regions that will be written to by the kernel +// and must be addressable. The actual write range may be smaller than +// reported in the prehook. See POST_WRITE_RANGE. +// COMMON_SYSCALL_POST_READ_RANGE +// Called in posthook for regions that were read by the kernel. Does +// not make much sense. +// COMMON_SYSCALL_POST_WRITE_RANGE +// Called in posthook for regions that were written to by the kernel +// and are now initialized. +// COMMON_SYSCALL_ACQUIRE(addr) +// Acquire memory visibility from addr. +// COMMON_SYSCALL_RELEASE(addr) +// Release memory visibility to addr. +// COMMON_SYSCALL_FD_CLOSE(fd) +// Called before closing file descriptor fd. +// COMMON_SYSCALL_FD_ACQUIRE(fd) +// Acquire memory visibility from fd. +// COMMON_SYSCALL_FD_RELEASE(fd) +// Release memory visibility to fd. +// COMMON_SYSCALL_PRE_FORK() +// Called before fork syscall. +// COMMON_SYSCALL_POST_FORK(long long res) +// Called after fork syscall. +// +// DO NOT EDIT! THIS FILE HAS BEEN GENERATED! +// +// Generated with: generate_netbsd_syscalls.awk +// Generated date: 2018-03-03 +// Generated from: syscalls.master,v 1.291 2018/01/06 16:41:23 kamil Exp +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_platform.h" +#if SANITIZER_NETBSD + +#include "sanitizer_libc.h" + +#define PRE_SYSCALL(name) \ + SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_impl_##name +#define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s) +#define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) + +#define POST_SYSCALL(name) \ + SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_impl_##name +#define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s) +#define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s) + +#ifndef COMMON_SYSCALL_ACQUIRE +#define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr)) +#endif + +#ifndef COMMON_SYSCALL_RELEASE +#define COMMON_SYSCALL_RELEASE(addr) ((void)(addr)) +#endif + +#ifndef COMMON_SYSCALL_FD_CLOSE +#define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd)) +#endif + +#ifndef COMMON_SYSCALL_FD_ACQUIRE +#define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd)) +#endif + +#ifndef COMMON_SYSCALL_FD_RELEASE +#define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd)) +#endif + +#ifndef COMMON_SYSCALL_PRE_FORK +#define COMMON_SYSCALL_PRE_FORK() \ + {} +#endif + +#ifndef COMMON_SYSCALL_POST_FORK +#define COMMON_SYSCALL_POST_FORK(res) \ + {} +#endif + +// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such). + +extern "C" { +#define SYS_MAXSYSARGS 8 +PRE_SYSCALL(syscall)(long long code_, long long args_[SYS_MAXSYSARGS]) { + /* Nothing to do */ +} +POST_SYSCALL(syscall) +(long long res, long long code_, long long args_[SYS_MAXSYSARGS]) { + /* Nothing to do */ +} +PRE_SYSCALL(exit)(long long rval_) { /* Nothing to do */ } +POST_SYSCALL(exit)(long long res, long long rval_) { /* Nothing to do */ } +PRE_SYSCALL(fork)(void) { COMMON_SYSCALL_PRE_FORK(); } +POST_SYSCALL(fork)(long long res) { COMMON_SYSCALL_POST_FORK(res); } +PRE_SYSCALL(read)(long long fd_, void *buf_, long long nbyte_) { + if (buf_) { + PRE_WRITE(buf_, nbyte_); + } +} +POST_SYSCALL(read)(long long res, long long fd_, void *buf_, long long nbyte_) { + if (res > 0) { + POST_WRITE(buf_, res); + } +} +PRE_SYSCALL(write)(long long fd_, void *buf_, long long nbyte_) { + if (buf_) { + PRE_READ(buf_, nbyte_); + } +} +POST_SYSCALL(write) +(long long res, long long fd_, void *buf_, long long nbyte_) { + if (res > 0) { + POST_READ(buf_, res); + } +} +PRE_SYSCALL(open)(void *path_, long long flags_, long long mode_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(open) +(long long res, void *path_, long long flags_, long long mode_) { + if (res > 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(close)(long long fd_) { COMMON_SYSCALL_FD_CLOSE((int)fd_); } +POST_SYSCALL(close)(long long res, long long fd_) { /* Nothing to do */ } +PRE_SYSCALL(compat_50_wait4) +(long long pid_, void *status_, long long options_, void *rusage_) { + /* TODO */ +} +POST_SYSCALL(compat_50_wait4) +(long long res, long long pid_, void *status_, long long options_, + void *rusage_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_ocreat)(void *path_, long long mode_) { /* TODO */ } +POST_SYSCALL(compat_43_ocreat)(long long res, void *path_, long long mode_) { + /* TODO */ +} +PRE_SYSCALL(link)(void *path_, void *link_) { + const char *path = (const char *)path_; + const char *link = (const char *)link_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (link) { + PRE_READ(path, __sanitizer::internal_strlen(link) + 1); + } +} +POST_SYSCALL(link)(long long res, void *path_, void *link_) { + if (res == 0) { + const char *path = (const char *)path_; + const char *link = (const char *)link_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (link) { + POST_READ(path, __sanitizer::internal_strlen(link) + 1); + } + } +} +PRE_SYSCALL(unlink)(void *path_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(unlink)(long long res, void *path_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +/* syscall 11 has been skipped */ +PRE_SYSCALL(chdir)(void *path_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(chdir)(long long res, void *path_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(fchdir)(long long fd_) { /* Nothing to do */ } +POST_SYSCALL(fchdir)(long long res, long long fd_) { /* Nothing to do */ } +PRE_SYSCALL(compat_50_mknod)(void *path_, long long mode_, long long dev_) { + /* TODO */ +} +POST_SYSCALL(compat_50_mknod) +(long long res, void *path_, long long mode_, long long dev_) { + /* TODO */ +} +PRE_SYSCALL(chmod)(void *path_, long long mode_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(chmod)(long long res, void *path_, long long mode_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(chown)(void *path_, long long uid_, long long gid_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(chown) +(long long res, void *path_, long long uid_, long long gid_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(break)(void *nsize_) { /* Nothing to do */ } +POST_SYSCALL(break)(long long res, void *nsize_) { /* Nothing to do */ } +PRE_SYSCALL(compat_20_getfsstat) +(void *buf_, long long bufsize_, long long flags_) { + /* TODO */ +} +POST_SYSCALL(compat_20_getfsstat) +(long long res, void *buf_, long long bufsize_, long long flags_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_olseek) +(long long fd_, long long offset_, long long whence_) { + /* TODO */ +} +POST_SYSCALL(compat_43_olseek) +(long long res, long long fd_, long long offset_, long long whence_) { + /* TODO */ +} +PRE_SYSCALL(getpid)(void) { /* Nothing to do */ } +POST_SYSCALL(getpid)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(compat_40_mount) +(void *type_, void *path_, long long flags_, void *data_) { + /* TODO */ +} +POST_SYSCALL(compat_40_mount) +(long long res, void *type_, void *path_, long long flags_, void *data_) { + /* TODO */ +} +PRE_SYSCALL(unmount)(void *path_, long long flags_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(unmount)(long long res, void *path_, long long flags_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(setuid)(long long uid_) { /* Nothing to do */ } +POST_SYSCALL(setuid)(long long res, long long uid_) { /* Nothing to do */ } +PRE_SYSCALL(getuid)(void) { /* Nothing to do */ } +POST_SYSCALL(getuid)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(geteuid)(void) { /* Nothing to do */ } +POST_SYSCALL(geteuid)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(ptrace) +(long long req_, long long pid_, void *addr_, long long data_) { + if (req_ == ptrace_pt_io) { + struct __sanitizer_ptrace_io_desc *addr = + (struct __sanitizer_ptrace_io_desc *)addr_; + PRE_READ(addr, struct_ptrace_ptrace_io_desc_struct_sz); + if (addr->piod_op == ptrace_piod_write_d || + addr->piod_op == ptrace_piod_write_i) { + PRE_READ(addr->piod_addr, addr->piod_len); + } + if (addr->piod_op == ptrace_piod_read_d || + addr->piod_op == ptrace_piod_read_i || + addr->piod_op == ptrace_piod_read_auxv) { + PRE_WRITE(addr->piod_addr, addr->piod_len); + } + } else if (req_ == ptrace_pt_lwpinfo) { + struct __sanitizer_ptrace_lwpinfo *addr = + (struct __sanitizer_ptrace_lwpinfo *)addr_; + PRE_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t)); + PRE_WRITE(addr, struct_ptrace_ptrace_lwpinfo_struct_sz); + } else if (req_ == ptrace_pt_set_event_mask) { + PRE_READ(addr_, struct_ptrace_ptrace_event_struct_sz); + } else if (req_ == ptrace_pt_get_event_mask) { + PRE_WRITE(addr_, struct_ptrace_ptrace_event_struct_sz); + } else if (req_ == ptrace_pt_set_siginfo) { + PRE_READ(addr_, struct_ptrace_ptrace_siginfo_struct_sz); + } else if (req_ == ptrace_pt_get_siginfo) { + PRE_WRITE(addr_, struct_ptrace_ptrace_siginfo_struct_sz); + } else if (req_ == ptrace_pt_setregs) { + PRE_READ(addr_, struct_ptrace_reg_struct_sz); + } else if (req_ == ptrace_pt_getregs) { + PRE_WRITE(addr_, struct_ptrace_reg_struct_sz); + } else if (req_ == ptrace_pt_setfpregs) { + PRE_READ(addr_, struct_ptrace_fpreg_struct_sz); + } else if (req_ == ptrace_pt_getfpregs) { + PRE_WRITE(addr_, struct_ptrace_fpreg_struct_sz); + } else if (req_ == ptrace_pt_setdbregs) { + PRE_READ(addr_, struct_ptrace_dbreg_struct_sz); + } else if (req_ == ptrace_pt_getdbregs) { + PRE_WRITE(addr_, struct_ptrace_dbreg_struct_sz); + } +} +POST_SYSCALL(ptrace) +(long long res, long long req_, long long pid_, void *addr_, long long data_) { + if (res == 0) { + if (req_ == ptrace_pt_io) { + struct __sanitizer_ptrace_io_desc *addr = + (struct __sanitizer_ptrace_io_desc *)addr_; + POST_READ(addr, struct_ptrace_ptrace_io_desc_struct_sz); + if (addr->piod_op == ptrace_piod_write_d || + addr->piod_op == ptrace_piod_write_i) { + POST_READ(addr->piod_addr, addr->piod_len); + } + if (addr->piod_op == ptrace_piod_read_d || + addr->piod_op == ptrace_piod_read_i || + addr->piod_op == ptrace_piod_read_auxv) { + POST_WRITE(addr->piod_addr, addr->piod_len); + } + } else if (req_ == ptrace_pt_lwpinfo) { + struct __sanitizer_ptrace_lwpinfo *addr = + (struct __sanitizer_ptrace_lwpinfo *)addr_; + POST_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t)); + POST_WRITE(addr, struct_ptrace_ptrace_lwpinfo_struct_sz); + } else if (req_ == ptrace_pt_set_event_mask) { + POST_READ(addr_, struct_ptrace_ptrace_event_struct_sz); + } else if (req_ == ptrace_pt_get_event_mask) { + POST_WRITE(addr_, struct_ptrace_ptrace_event_struct_sz); + } else if (req_ == ptrace_pt_set_siginfo) { + POST_READ(addr_, struct_ptrace_ptrace_siginfo_struct_sz); + } else if (req_ == ptrace_pt_get_siginfo) { + POST_WRITE(addr_, struct_ptrace_ptrace_siginfo_struct_sz); + } else if (req_ == ptrace_pt_setregs) { + POST_READ(addr_, struct_ptrace_reg_struct_sz); + } else if (req_ == ptrace_pt_getregs) { + POST_WRITE(addr_, struct_ptrace_reg_struct_sz); + } else if (req_ == ptrace_pt_setfpregs) { + POST_READ(addr_, struct_ptrace_fpreg_struct_sz); + } else if (req_ == ptrace_pt_getfpregs) { + POST_WRITE(addr_, struct_ptrace_fpreg_struct_sz); + } else if (req_ == ptrace_pt_setdbregs) { + POST_READ(addr_, struct_ptrace_dbreg_struct_sz); + } else if (req_ == ptrace_pt_getdbregs) { + POST_WRITE(addr_, struct_ptrace_dbreg_struct_sz); + } + } +} +PRE_SYSCALL(recvmsg)(long long s_, void *msg_, long long flags_) { + PRE_WRITE(msg_, sizeof(__sanitizer_msghdr)); +} +POST_SYSCALL(recvmsg) +(long long res, long long s_, void *msg_, long long flags_) { + if (res > 0) { + POST_WRITE(msg_, sizeof(__sanitizer_msghdr)); + } +} +PRE_SYSCALL(sendmsg)(long long s_, void *msg_, long long flags_) { + PRE_READ(msg_, sizeof(__sanitizer_msghdr)); +} +POST_SYSCALL(sendmsg) +(long long res, long long s_, void *msg_, long long flags_) { + if (res > 0) { + POST_READ(msg_, sizeof(__sanitizer_msghdr)); + } +} +PRE_SYSCALL(recvfrom) +(long long s_, void *buf_, long long len_, long long flags_, void *from_, + void *fromlenaddr_) { + PRE_WRITE(buf_, len_); + PRE_WRITE(from_, struct_sockaddr_sz); + PRE_WRITE(fromlenaddr_, sizeof(__sanitizer_socklen_t)); +} +POST_SYSCALL(recvfrom) +(long long res, long long s_, void *buf_, long long len_, long long flags_, + void *from_, void *fromlenaddr_) { + if (res >= 0) { + POST_WRITE(buf_, res); + POST_WRITE(from_, struct_sockaddr_sz); + POST_WRITE(fromlenaddr_, sizeof(__sanitizer_socklen_t)); + } +} +PRE_SYSCALL(accept)(long long s_, void *name_, void *anamelen_) { + PRE_WRITE(name_, struct_sockaddr_sz); + PRE_WRITE(anamelen_, sizeof(__sanitizer_socklen_t)); +} +POST_SYSCALL(accept) +(long long res, long long s_, void *name_, void *anamelen_) { + if (res == 0) { + POST_WRITE(name_, struct_sockaddr_sz); + POST_WRITE(anamelen_, sizeof(__sanitizer_socklen_t)); + } +} +PRE_SYSCALL(getpeername)(long long fdes_, void *asa_, void *alen_) { + PRE_WRITE(asa_, struct_sockaddr_sz); + PRE_WRITE(alen_, sizeof(__sanitizer_socklen_t)); +} +POST_SYSCALL(getpeername) +(long long res, long long fdes_, void *asa_, void *alen_) { + if (res == 0) { + POST_WRITE(asa_, struct_sockaddr_sz); + POST_WRITE(alen_, sizeof(__sanitizer_socklen_t)); + } +} +PRE_SYSCALL(getsockname)(long long fdes_, void *asa_, void *alen_) { + PRE_WRITE(asa_, struct_sockaddr_sz); + PRE_WRITE(alen_, sizeof(__sanitizer_socklen_t)); +} +POST_SYSCALL(getsockname) +(long long res, long long fdes_, void *asa_, void *alen_) { + if (res == 0) { + POST_WRITE(asa_, struct_sockaddr_sz); + POST_WRITE(alen_, sizeof(__sanitizer_socklen_t)); + } +} +PRE_SYSCALL(access)(void *path_, long long flags_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(access)(long long res, void *path_, long long flags_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(chflags)(void *path_, long long flags_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(chflags)(long long res, void *path_, long long flags_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(fchflags)(long long fd_, long long flags_) { /* Nothing to do */ } +POST_SYSCALL(fchflags)(long long res, long long fd_, long long flags_) { + /* Nothing to do */ +} +PRE_SYSCALL(sync)(void) { /* Nothing to do */ } +POST_SYSCALL(sync)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(kill)(long long pid_, long long signum_) { /* Nothing to do */ } +POST_SYSCALL(kill)(long long res, long long pid_, long long signum_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_43_stat43)(void *path_, void *ub_) { /* TODO */ } +POST_SYSCALL(compat_43_stat43)(long long res, void *path_, void *ub_) { + /* TODO */ +} +PRE_SYSCALL(getppid)(void) { /* Nothing to do */ } +POST_SYSCALL(getppid)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(compat_43_lstat43)(void *path_, void *ub_) { /* TODO */ } +POST_SYSCALL(compat_43_lstat43)(long long res, void *path_, void *ub_) { + /* TODO */ +} +PRE_SYSCALL(dup)(long long fd_) { /* Nothing to do */ } +POST_SYSCALL(dup)(long long res, long long fd_) { /* Nothing to do */ } +PRE_SYSCALL(pipe)(void) { + /* pipe returns two descriptors through two returned values */ +} +POST_SYSCALL(pipe)(long long res) { + /* pipe returns two descriptors through two returned values */ +} +PRE_SYSCALL(getegid)(void) { /* Nothing to do */ } +POST_SYSCALL(getegid)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(profil) +(void *samples_, long long size_, long long offset_, long long scale_) { + if (samples_) { + PRE_WRITE(samples_, size_); + } +} +POST_SYSCALL(profil) +(long long res, void *samples_, long long size_, long long offset_, + long long scale_) { + if (res == 0) { + if (samples_) { + POST_WRITE(samples_, size_); + } + } +} +PRE_SYSCALL(ktrace) +(void *fname_, long long ops_, long long facs_, long long pid_) { + const char *fname = (const char *)fname_; + if (fname) { + PRE_READ(fname, __sanitizer::internal_strlen(fname) + 1); + } +} +POST_SYSCALL(ktrace) +(long long res, void *fname_, long long ops_, long long facs_, long long pid_) { + const char *fname = (const char *)fname_; + if (res == 0) { + if (fname) { + POST_READ(fname, __sanitizer::internal_strlen(fname) + 1); + } + } +} +PRE_SYSCALL(compat_13_sigaction13)(long long signum_, void *nsa_, void *osa_) { + /* TODO */ +} +POST_SYSCALL(compat_13_sigaction13) +(long long res, long long signum_, void *nsa_, void *osa_) { + /* TODO */ +} +PRE_SYSCALL(getgid)(void) { /* Nothing to do */ } +POST_SYSCALL(getgid)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(compat_13_sigprocmask13)(long long how_, long long mask_) { + /* TODO */ +} +POST_SYSCALL(compat_13_sigprocmask13) +(long long res, long long how_, long long mask_) { + /* TODO */ +} +PRE_SYSCALL(__getlogin)(void *namebuf_, long long namelen_) { + if (namebuf_) { + PRE_WRITE(namebuf_, namelen_); + } +} +POST_SYSCALL(__getlogin)(long long res, void *namebuf_, long long namelen_) { + if (res == 0) { + if (namebuf_) { + POST_WRITE(namebuf_, namelen_); + } + } +} +PRE_SYSCALL(__setlogin)(void *namebuf_) { + const char *namebuf = (const char *)namebuf_; + if (namebuf) { + PRE_READ(namebuf, __sanitizer::internal_strlen(namebuf) + 1); + } +} +POST_SYSCALL(__setlogin)(long long res, void *namebuf_) { + if (res == 0) { + const char *namebuf = (const char *)namebuf_; + if (namebuf) { + POST_READ(namebuf, __sanitizer::internal_strlen(namebuf) + 1); + } + } +} +PRE_SYSCALL(acct)(void *path_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(acct)(long long res, void *path_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(compat_13_sigpending13)(void) { /* TODO */ } +POST_SYSCALL(compat_13_sigpending13)(long long res) { /* TODO */ } +PRE_SYSCALL(compat_13_sigaltstack13)(void *nss_, void *oss_) { /* TODO */ } +POST_SYSCALL(compat_13_sigaltstack13)(long long res, void *nss_, void *oss_) { + /* TODO */ +} +PRE_SYSCALL(ioctl)(long long fd_, long long com_, void *data_) { + /* Nothing to do */ +} +POST_SYSCALL(ioctl)(long long res, long long fd_, long long com_, void *data_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_12_oreboot)(long long opt_) { /* TODO */ } +POST_SYSCALL(compat_12_oreboot)(long long res, long long opt_) { /* TODO */ } +PRE_SYSCALL(revoke)(void *path_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(revoke)(long long res, void *path_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(symlink)(void *path_, void *link_) { + const char *path = (const char *)path_; + const char *link = (const char *)link_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (link) { + PRE_READ(link, __sanitizer::internal_strlen(link) + 1); + } +} +POST_SYSCALL(symlink)(long long res, void *path_, void *link_) { + if (res == 0) { + const char *path = (const char *)path_; + const char *link = (const char *)link_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (link) { + POST_READ(link, __sanitizer::internal_strlen(link) + 1); + } + } +} +PRE_SYSCALL(readlink)(void *path_, void *buf_, long long count_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (buf_) { + PRE_WRITE(buf_, count_); + } +} +POST_SYSCALL(readlink) +(long long res, void *path_, void *buf_, long long count_) { + if (res > 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (buf_) { + PRE_WRITE(buf_, res); + } + } +} +PRE_SYSCALL(execve)(void *path_, void *argp_, void *envp_) { + const char *path = (const char *)path_; + char **argp = (char **)argp_; + char **envp = (char **)envp_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (argp && argp[0]) { + char *a = argp[0]; + while (a++) { + PRE_READ(a, __sanitizer::internal_strlen(a) + 1); + } + } + if (envp && envp[0]) { + char *e = envp[0]; + while (e++) { + PRE_READ(e, __sanitizer::internal_strlen(e) + 1); + } + } +} +POST_SYSCALL(execve)(long long res, void *path_, void *argp_, void *envp_) { + /* If we are here, something went wrong */ + const char *path = (const char *)path_; + char **argp = (char **)argp_; + char **envp = (char **)envp_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (argp && argp[0]) { + char *a = argp[0]; + while (a++) { + POST_READ(a, __sanitizer::internal_strlen(a) + 1); + } + } + if (envp && envp[0]) { + char *e = envp[0]; + while (e++) { + POST_READ(e, __sanitizer::internal_strlen(e) + 1); + } + } +} +PRE_SYSCALL(umask)(long long newmask_) { /* Nothing to do */ } +POST_SYSCALL(umask)(long long res, long long newmask_) { /* Nothing to do */ } +PRE_SYSCALL(chroot)(void *path_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(chroot)(long long res, void *path_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(compat_43_fstat43)(long long fd_, void *sb_) { /* TODO */ } +POST_SYSCALL(compat_43_fstat43)(long long res, long long fd_, void *sb_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_ogetkerninfo) +(long long op_, void *where_, void *size_, long long arg_) { + /* TODO */ +} +POST_SYSCALL(compat_43_ogetkerninfo) +(long long res, long long op_, void *where_, void *size_, long long arg_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_ogetpagesize)(void) { /* TODO */ } +POST_SYSCALL(compat_43_ogetpagesize)(long long res) { /* TODO */ } +PRE_SYSCALL(compat_12_msync)(void *addr_, long long len_) { /* TODO */ } +POST_SYSCALL(compat_12_msync)(long long res, void *addr_, long long len_) { + /* TODO */ +} +PRE_SYSCALL(vfork)(void) { /* Nothing to do */ } +POST_SYSCALL(vfork)(long long res) { /* Nothing to do */ } +/* syscall 67 has been skipped */ +/* syscall 68 has been skipped */ +/* syscall 69 has been skipped */ +/* syscall 70 has been skipped */ +PRE_SYSCALL(compat_43_ommap) +(void *addr_, long long len_, long long prot_, long long flags_, long long fd_, + long long pos_) { + /* TODO */ +} +POST_SYSCALL(compat_43_ommap) +(long long res, void *addr_, long long len_, long long prot_, long long flags_, + long long fd_, long long pos_) { + /* TODO */ +} +PRE_SYSCALL(vadvise)(long long anom_) { /* Nothing to do */ } +POST_SYSCALL(vadvise)(long long res, long long anom_) { /* Nothing to do */ } +PRE_SYSCALL(munmap)(void *addr_, long long len_) { /* Nothing to do */ } +POST_SYSCALL(munmap)(long long res, void *addr_, long long len_) { + /* Nothing to do */ +} +PRE_SYSCALL(mprotect)(void *addr_, long long len_, long long prot_) { + /* Nothing to do */ +} +POST_SYSCALL(mprotect) +(long long res, void *addr_, long long len_, long long prot_) { + /* Nothing to do */ +} +PRE_SYSCALL(madvise)(void *addr_, long long len_, long long behav_) { + /* Nothing to do */ +} +POST_SYSCALL(madvise) +(long long res, void *addr_, long long len_, long long behav_) { + /* Nothing to do */ +} +/* syscall 76 has been skipped */ +/* syscall 77 has been skipped */ +PRE_SYSCALL(mincore)(void *addr_, long long len_, void *vec_) { + /* Nothing to do */ +} +POST_SYSCALL(mincore)(long long res, void *addr_, long long len_, void *vec_) { + /* Nothing to do */ +} +PRE_SYSCALL(getgroups)(long long gidsetsize_, void *gidset_) { + unsigned int *gidset = (unsigned int *)gidset_; + if (gidset) { + PRE_WRITE(gidset, sizeof(*gidset) * gidsetsize_); + } +} +POST_SYSCALL(getgroups)(long long res, long long gidsetsize_, void *gidset_) { + if (res == 0) { + unsigned int *gidset = (unsigned int *)gidset_; + if (gidset) { + POST_WRITE(gidset, sizeof(*gidset) * gidsetsize_); + } + } +} +PRE_SYSCALL(setgroups)(long long gidsetsize_, void *gidset_) { + unsigned int *gidset = (unsigned int *)gidset_; + if (gidset) { + PRE_READ(gidset, sizeof(*gidset) * gidsetsize_); + } +} +POST_SYSCALL(setgroups)(long long res, long long gidsetsize_, void *gidset_) { + if (res == 0) { + unsigned int *gidset = (unsigned int *)gidset_; + if (gidset) { + POST_READ(gidset, sizeof(*gidset) * gidsetsize_); + } + } +} +PRE_SYSCALL(getpgrp)(void) { /* Nothing to do */ } +POST_SYSCALL(getpgrp)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(setpgid)(long long pid_, long long pgid_) { /* Nothing to do */ } +POST_SYSCALL(setpgid)(long long res, long long pid_, long long pgid_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_50_setitimer)(long long which_, void *itv_, void *oitv_) { + /* TODO */ +} +POST_SYSCALL(compat_50_setitimer) +(long long res, long long which_, void *itv_, void *oitv_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_owait)(void) { /* TODO */ } +POST_SYSCALL(compat_43_owait)(long long res) { /* TODO */ } +PRE_SYSCALL(compat_12_oswapon)(void *name_) { /* TODO */ } +POST_SYSCALL(compat_12_oswapon)(long long res, void *name_) { /* TODO */ } +PRE_SYSCALL(compat_50_getitimer)(long long which_, void *itv_) { /* TODO */ } +POST_SYSCALL(compat_50_getitimer)(long long res, long long which_, void *itv_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_ogethostname)(void *hostname_, long long len_) { + /* TODO */ +} +POST_SYSCALL(compat_43_ogethostname) +(long long res, void *hostname_, long long len_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_osethostname)(void *hostname_, long long len_) { + /* TODO */ +} +POST_SYSCALL(compat_43_osethostname) +(long long res, void *hostname_, long long len_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_ogetdtablesize)(void) { /* TODO */ } +POST_SYSCALL(compat_43_ogetdtablesize)(long long res) { /* TODO */ } +PRE_SYSCALL(dup2)(long long from_, long long to_) { /* Nothing to do */ } +POST_SYSCALL(dup2)(long long res, long long from_, long long to_) { + /* Nothing to do */ +} +/* syscall 91 has been skipped */ +PRE_SYSCALL(fcntl)(long long fd_, long long cmd_, void *arg_) { + /* Nothing to do */ +} +POST_SYSCALL(fcntl)(long long res, long long fd_, long long cmd_, void *arg_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_50_select) +(long long nd_, void *in_, void *ou_, void *ex_, void *tv_) { + /* TODO */ +} +POST_SYSCALL(compat_50_select) +(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *tv_) { + /* TODO */ +} +/* syscall 94 has been skipped */ +PRE_SYSCALL(fsync)(long long fd_) { /* Nothing to do */ } +POST_SYSCALL(fsync)(long long res, long long fd_) { /* Nothing to do */ } +PRE_SYSCALL(setpriority)(long long which_, long long who_, long long prio_) { + /* Nothing to do */ +} +POST_SYSCALL(setpriority) +(long long res, long long which_, long long who_, long long prio_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_30_socket) +(long long domain_, long long type_, long long protocol_) { + /* TODO */ +} +POST_SYSCALL(compat_30_socket) +(long long res, long long domain_, long long type_, long long protocol_) { + /* TODO */ +} +PRE_SYSCALL(connect)(long long s_, void *name_, long long namelen_) { + PRE_READ(name_, namelen_); +} +POST_SYSCALL(connect) +(long long res, long long s_, void *name_, long long namelen_) { + if (res == 0) { + POST_READ(name_, namelen_); + } +} +PRE_SYSCALL(compat_43_oaccept)(long long s_, void *name_, void *anamelen_) { + /* TODO */ +} +POST_SYSCALL(compat_43_oaccept) +(long long res, long long s_, void *name_, void *anamelen_) { + /* TODO */ +} +PRE_SYSCALL(getpriority)(long long which_, long long who_) { + /* Nothing to do */ +} +POST_SYSCALL(getpriority)(long long res, long long which_, long long who_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_43_osend) +(long long s_, void *buf_, long long len_, long long flags_) { + /* TODO */ +} +POST_SYSCALL(compat_43_osend) +(long long res, long long s_, void *buf_, long long len_, long long flags_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_orecv) +(long long s_, void *buf_, long long len_, long long flags_) { + /* TODO */ +} +POST_SYSCALL(compat_43_orecv) +(long long res, long long s_, void *buf_, long long len_, long long flags_) { + /* TODO */ +} +PRE_SYSCALL(compat_13_sigreturn13)(void *sigcntxp_) { /* TODO */ } +POST_SYSCALL(compat_13_sigreturn13)(long long res, void *sigcntxp_) { + /* TODO */ +} +PRE_SYSCALL(bind)(long long s_, void *name_, long long namelen_) { + PRE_READ(name_, namelen_); +} +POST_SYSCALL(bind) +(long long res, long long s_, void *name_, long long namelen_) { + if (res == 0) { + PRE_READ(name_, namelen_); + } +} +PRE_SYSCALL(setsockopt) +(long long s_, long long level_, long long name_, void *val_, + long long valsize_) { + if (val_) { + PRE_READ(val_, valsize_); + } +} +POST_SYSCALL(setsockopt) +(long long res, long long s_, long long level_, long long name_, void *val_, + long long valsize_) { + if (res == 0) { + if (val_) { + POST_READ(val_, valsize_); + } + } +} +PRE_SYSCALL(listen)(long long s_, long long backlog_) { /* Nothing to do */ } +POST_SYSCALL(listen)(long long res, long long s_, long long backlog_) { + /* Nothing to do */ +} +/* syscall 107 has been skipped */ +PRE_SYSCALL(compat_43_osigvec)(long long signum_, void *nsv_, void *osv_) { + /* TODO */ +} +POST_SYSCALL(compat_43_osigvec) +(long long res, long long signum_, void *nsv_, void *osv_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_osigblock)(long long mask_) { /* TODO */ } +POST_SYSCALL(compat_43_osigblock)(long long res, long long mask_) { /* TODO */ } +PRE_SYSCALL(compat_43_osigsetmask)(long long mask_) { /* TODO */ } +POST_SYSCALL(compat_43_osigsetmask)(long long res, long long mask_) { + /* TODO */ +} +PRE_SYSCALL(compat_13_sigsuspend13)(long long mask_) { /* TODO */ } +POST_SYSCALL(compat_13_sigsuspend13)(long long res, long long mask_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_osigstack)(void *nss_, void *oss_) { /* TODO */ } +POST_SYSCALL(compat_43_osigstack)(long long res, void *nss_, void *oss_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_orecvmsg)(long long s_, void *msg_, long long flags_) { + /* TODO */ +} +POST_SYSCALL(compat_43_orecvmsg) +(long long res, long long s_, void *msg_, long long flags_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_osendmsg)(long long s_, void *msg_, long long flags_) { + /* TODO */ +} +POST_SYSCALL(compat_43_osendmsg) +(long long res, long long s_, void *msg_, long long flags_) { + /* TODO */ +} +/* syscall 115 has been skipped */ +PRE_SYSCALL(compat_50_gettimeofday)(void *tp_, void *tzp_) { /* TODO */ } +POST_SYSCALL(compat_50_gettimeofday)(long long res, void *tp_, void *tzp_) { + /* TODO */ +} +PRE_SYSCALL(compat_50_getrusage)(long long who_, void *rusage_) { /* TODO */ } +POST_SYSCALL(compat_50_getrusage) +(long long res, long long who_, void *rusage_) { + /* TODO */ +} +PRE_SYSCALL(getsockopt) +(long long s_, long long level_, long long name_, void *val_, void *avalsize_) { + /* TODO */ +} +POST_SYSCALL(getsockopt) +(long long res, long long s_, long long level_, long long name_, void *val_, + void *avalsize_) { + /* TODO */ +} +/* syscall 119 has been skipped */ +PRE_SYSCALL(readv)(long long fd_, void *iovp_, long long iovcnt_) { + struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_; + int i; + if (iovp) { + PRE_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_); + for (i = 0; i < iovcnt_; i++) { + PRE_WRITE(iovp[i].iov_base, iovp[i].iov_len); + } + } +} +POST_SYSCALL(readv) +(long long res, long long fd_, void *iovp_, long long iovcnt_) { + struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_; + int i; + uptr m, n = res; + if (res > 0) { + if (iovp) { + POST_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_); + for (i = 0; i < iovcnt_ && n > 0; i++) { + m = n > iovp[i].iov_len ? iovp[i].iov_len : n; + POST_WRITE(iovp[i].iov_base, m); + n -= m; + } + } + } +} +PRE_SYSCALL(writev)(long long fd_, void *iovp_, long long iovcnt_) { + struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_; + int i; + if (iovp) { + PRE_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_); + for (i = 0; i < iovcnt_; i++) { + PRE_READ(iovp[i].iov_base, iovp[i].iov_len); + } + } +} +POST_SYSCALL(writev) +(long long res, long long fd_, void *iovp_, long long iovcnt_) { + struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_; + int i; + uptr m, n = res; + if (res > 0) { + if (iovp) { + POST_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_); + for (i = 0; i < iovcnt_ && n > 0; i++) { + m = n > iovp[i].iov_len ? iovp[i].iov_len : n; + POST_READ(iovp[i].iov_base, m); + n -= m; + } + } + } +} +PRE_SYSCALL(compat_50_settimeofday)(void *tv_, void *tzp_) { /* TODO */ } +POST_SYSCALL(compat_50_settimeofday)(long long res, void *tv_, void *tzp_) { + /* TODO */ +} +PRE_SYSCALL(fchown)(long long fd_, long long uid_, long long gid_) { + /* Nothing to do */ +} +POST_SYSCALL(fchown) +(long long res, long long fd_, long long uid_, long long gid_) { + /* Nothing to do */ +} +PRE_SYSCALL(fchmod)(long long fd_, long long mode_) { /* Nothing to do */ } +POST_SYSCALL(fchmod)(long long res, long long fd_, long long mode_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_43_orecvfrom) +(long long s_, void *buf_, long long len_, long long flags_, void *from_, + void *fromlenaddr_) { + /* TODO */ +} +POST_SYSCALL(compat_43_orecvfrom) +(long long res, long long s_, void *buf_, long long len_, long long flags_, + void *from_, void *fromlenaddr_) { + /* TODO */ +} +PRE_SYSCALL(setreuid)(long long ruid_, long long euid_) { /* Nothing to do */ } +POST_SYSCALL(setreuid)(long long res, long long ruid_, long long euid_) { + /* Nothing to do */ +} +PRE_SYSCALL(setregid)(long long rgid_, long long egid_) { /* Nothing to do */ } +POST_SYSCALL(setregid)(long long res, long long rgid_, long long egid_) { + /* Nothing to do */ +} +PRE_SYSCALL(rename)(void *from_, void *to_) { + const char *from = (const char *)from_; + const char *to = (const char *)to_; + if (from) { + PRE_READ(from, __sanitizer::internal_strlen(from) + 1); + } + if (to) { + PRE_READ(to, __sanitizer::internal_strlen(to) + 1); + } +} +POST_SYSCALL(rename)(long long res, void *from_, void *to_) { + if (res == 0) { + const char *from = (const char *)from_; + const char *to = (const char *)to_; + if (from) { + POST_READ(from, __sanitizer::internal_strlen(from) + 1); + } + if (to) { + POST_READ(to, __sanitizer::internal_strlen(to) + 1); + } + } +} +PRE_SYSCALL(compat_43_otruncate)(void *path_, long long length_) { /* TODO */ } +POST_SYSCALL(compat_43_otruncate) +(long long res, void *path_, long long length_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_oftruncate)(long long fd_, long long length_) { + /* TODO */ +} +POST_SYSCALL(compat_43_oftruncate) +(long long res, long long fd_, long long length_) { + /* TODO */ +} +PRE_SYSCALL(flock)(long long fd_, long long how_) { /* Nothing to do */ } +POST_SYSCALL(flock)(long long res, long long fd_, long long how_) { + /* Nothing to do */ +} +PRE_SYSCALL(mkfifo)(void *path_, long long mode_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(mkfifo)(long long res, void *path_, long long mode_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(sendto) +(long long s_, void *buf_, long long len_, long long flags_, void *to_, + long long tolen_) { + PRE_READ(buf_, len_); + PRE_READ(to_, tolen_); +} +POST_SYSCALL(sendto) +(long long res, long long s_, void *buf_, long long len_, long long flags_, + void *to_, long long tolen_) { + if (res >= 0) { + POST_READ(buf_, len_); + POST_READ(to_, tolen_); + } +} +PRE_SYSCALL(shutdown)(long long s_, long long how_) { /* Nothing to do */ } +POST_SYSCALL(shutdown)(long long res, long long s_, long long how_) { + /* Nothing to do */ +} +PRE_SYSCALL(socketpair) +(long long domain_, long long type_, long long protocol_, void *rsv_) { + PRE_WRITE(rsv_, 2 * sizeof(int)); +} +POST_SYSCALL(socketpair) +(long long res, long long domain_, long long type_, long long protocol_, + void *rsv_) { + if (res == 0) { + POST_WRITE(rsv_, 2 * sizeof(int)); + } +} +PRE_SYSCALL(mkdir)(void *path_, long long mode_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(mkdir)(long long res, void *path_, long long mode_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(rmdir)(void *path_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(rmdir)(long long res, void *path_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(compat_50_utimes)(void *path_, void *tptr_) { /* TODO */ } +POST_SYSCALL(compat_50_utimes)(long long res, void *path_, void *tptr_) { + /* TODO */ +} +/* syscall 139 has been skipped */ +PRE_SYSCALL(compat_50_adjtime)(void *delta_, void *olddelta_) { /* TODO */ } +POST_SYSCALL(compat_50_adjtime)(long long res, void *delta_, void *olddelta_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_ogetpeername)(long long fdes_, void *asa_, void *alen_) { + /* TODO */ +} +POST_SYSCALL(compat_43_ogetpeername) +(long long res, long long fdes_, void *asa_, void *alen_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_ogethostid)(void) { /* TODO */ } +POST_SYSCALL(compat_43_ogethostid)(long long res) { /* TODO */ } +PRE_SYSCALL(compat_43_osethostid)(long long hostid_) { /* TODO */ } +POST_SYSCALL(compat_43_osethostid)(long long res, long long hostid_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_ogetrlimit)(long long which_, void *rlp_) { /* TODO */ } +POST_SYSCALL(compat_43_ogetrlimit) +(long long res, long long which_, void *rlp_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_osetrlimit)(long long which_, void *rlp_) { /* TODO */ } +POST_SYSCALL(compat_43_osetrlimit) +(long long res, long long which_, void *rlp_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_okillpg)(long long pgid_, long long signum_) { + /* TODO */ +} +POST_SYSCALL(compat_43_okillpg) +(long long res, long long pgid_, long long signum_) { + /* TODO */ +} +PRE_SYSCALL(setsid)(void) { /* Nothing to do */ } +POST_SYSCALL(setsid)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(compat_50_quotactl) +(void *path_, long long cmd_, long long uid_, void *arg_) { + /* TODO */ +} +POST_SYSCALL(compat_50_quotactl) +(long long res, void *path_, long long cmd_, long long uid_, void *arg_) { + /* TODO */ +} +PRE_SYSCALL(compat_43_oquota)(void) { /* TODO */ } +POST_SYSCALL(compat_43_oquota)(long long res) { /* TODO */ } +PRE_SYSCALL(compat_43_ogetsockname)(long long fdec_, void *asa_, void *alen_) { + /* TODO */ +} +POST_SYSCALL(compat_43_ogetsockname) +(long long res, long long fdec_, void *asa_, void *alen_) { + /* TODO */ +} +/* syscall 151 has been skipped */ +/* syscall 152 has been skipped */ +/* syscall 153 has been skipped */ +/* syscall 154 has been skipped */ +PRE_SYSCALL(nfssvc)(long long flag_, void *argp_) { /* Nothing to do */ } +POST_SYSCALL(nfssvc)(long long res, long long flag_, void *argp_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_43_ogetdirentries) +(long long fd_, void *buf_, long long count_, void *basep_) { + /* TODO */ +} +POST_SYSCALL(compat_43_ogetdirentries) +(long long res, long long fd_, void *buf_, long long count_, void *basep_) { + /* TODO */ +} +PRE_SYSCALL(compat_20_statfs)(void *path_, void *buf_) { /* TODO */ } +POST_SYSCALL(compat_20_statfs)(long long res, void *path_, void *buf_) { + /* TODO */ +} +PRE_SYSCALL(compat_20_fstatfs)(long long fd_, void *buf_) { /* TODO */ } +POST_SYSCALL(compat_20_fstatfs)(long long res, long long fd_, void *buf_) { + /* TODO */ +} +/* syscall 159 has been skipped */ +/* syscall 160 has been skipped */ +PRE_SYSCALL(compat_30_getfh)(void *fname_, void *fhp_) { /* TODO */ } +POST_SYSCALL(compat_30_getfh)(long long res, void *fname_, void *fhp_) { + /* TODO */ +} +PRE_SYSCALL(compat_09_ogetdomainname)(void *domainname_, long long len_) { + /* TODO */ +} +POST_SYSCALL(compat_09_ogetdomainname) +(long long res, void *domainname_, long long len_) { + /* TODO */ +} +PRE_SYSCALL(compat_09_osetdomainname)(void *domainname_, long long len_) { + /* TODO */ +} +POST_SYSCALL(compat_09_osetdomainname) +(long long res, void *domainname_, long long len_) { + /* TODO */ +} +PRE_SYSCALL(compat_09_ouname)(void *name_) { /* TODO */ } +POST_SYSCALL(compat_09_ouname)(long long res, void *name_) { /* TODO */ } +PRE_SYSCALL(sysarch)(long long op_, void *parms_) { /* TODO */ } +POST_SYSCALL(sysarch)(long long res, long long op_, void *parms_) { /* TODO */ } +/* syscall 166 has been skipped */ +/* syscall 167 has been skipped */ +/* syscall 168 has been skipped */ +#if !defined(_LP64) +PRE_SYSCALL(compat_10_osemsys) +(long long which_, long long a2_, long long a3_, long long a4_, long long a5_) { + /* TODO */ +} +POST_SYSCALL(compat_10_osemsys) +(long long res, long long which_, long long a2_, long long a3_, long long a4_, + long long a5_) { + /* TODO */ +} +#else +/* syscall 169 has been skipped */ +#endif +#if !defined(_LP64) +PRE_SYSCALL(compat_10_omsgsys) +(long long which_, long long a2_, long long a3_, long long a4_, long long a5_, + long long a6_) { + /* TODO */ +} +POST_SYSCALL(compat_10_omsgsys) +(long long res, long long which_, long long a2_, long long a3_, long long a4_, + long long a5_, long long a6_) { + /* TODO */ +} +#else +/* syscall 170 has been skipped */ +#endif +#if !defined(_LP64) +PRE_SYSCALL(compat_10_oshmsys) +(long long which_, long long a2_, long long a3_, long long a4_) { + /* TODO */ +} +POST_SYSCALL(compat_10_oshmsys) +(long long res, long long which_, long long a2_, long long a3_, long long a4_) { + /* TODO */ +} +#else +/* syscall 171 has been skipped */ +#endif +/* syscall 172 has been skipped */ +PRE_SYSCALL(pread) +(long long fd_, void *buf_, long long nbyte_, long long PAD_, + long long offset_) { + if (buf_) { + PRE_WRITE(buf_, nbyte_); + } +} +POST_SYSCALL(pread) +(long long res, long long fd_, void *buf_, long long nbyte_, long long PAD_, + long long offset_) { + if (res > 0) { + POST_WRITE(buf_, res); + } +} +PRE_SYSCALL(pwrite) +(long long fd_, void *buf_, long long nbyte_, long long PAD_, + long long offset_) { + if (buf_) { + PRE_READ(buf_, nbyte_); + } +} +POST_SYSCALL(pwrite) +(long long res, long long fd_, void *buf_, long long nbyte_, long long PAD_, + long long offset_) { + if (res > 0) { + POST_READ(buf_, res); + } +} +PRE_SYSCALL(compat_30_ntp_gettime)(void *ntvp_) { /* TODO */ } +POST_SYSCALL(compat_30_ntp_gettime)(long long res, void *ntvp_) { /* TODO */ } +#if defined(NTP) || !defined(_KERNEL_OPT) +PRE_SYSCALL(ntp_adjtime)(void *tp_) { /* Nothing to do */ } +POST_SYSCALL(ntp_adjtime)(long long res, void *tp_) { /* Nothing to do */ } +#else +/* syscall 176 has been skipped */ +#endif +/* syscall 177 has been skipped */ +/* syscall 178 has been skipped */ +/* syscall 179 has been skipped */ +/* syscall 180 has been skipped */ +PRE_SYSCALL(setgid)(long long gid_) { /* Nothing to do */ } +POST_SYSCALL(setgid)(long long res, long long gid_) { /* Nothing to do */ } +PRE_SYSCALL(setegid)(long long egid_) { /* Nothing to do */ } +POST_SYSCALL(setegid)(long long res, long long egid_) { /* Nothing to do */ } +PRE_SYSCALL(seteuid)(long long euid_) { /* Nothing to do */ } +POST_SYSCALL(seteuid)(long long res, long long euid_) { /* Nothing to do */ } +PRE_SYSCALL(lfs_bmapv)(void *fsidp_, void *blkiov_, long long blkcnt_) { + /* TODO */ +} +POST_SYSCALL(lfs_bmapv) +(long long res, void *fsidp_, void *blkiov_, long long blkcnt_) { + /* TODO */ +} +PRE_SYSCALL(lfs_markv)(void *fsidp_, void *blkiov_, long long blkcnt_) { + /* TODO */ +} +POST_SYSCALL(lfs_markv) +(long long res, void *fsidp_, void *blkiov_, long long blkcnt_) { + /* TODO */ +} +PRE_SYSCALL(lfs_segclean)(void *fsidp_, long long segment_) { /* TODO */ } +POST_SYSCALL(lfs_segclean)(long long res, void *fsidp_, long long segment_) { + /* TODO */ +} +PRE_SYSCALL(compat_50_lfs_segwait)(void *fsidp_, void *tv_) { /* TODO */ } +POST_SYSCALL(compat_50_lfs_segwait)(long long res, void *fsidp_, void *tv_) { + /* TODO */ +} +PRE_SYSCALL(compat_12_stat12)(void *path_, void *ub_) { /* TODO */ } +POST_SYSCALL(compat_12_stat12)(long long res, void *path_, void *ub_) { + /* TODO */ +} +PRE_SYSCALL(compat_12_fstat12)(long long fd_, void *sb_) { /* TODO */ } +POST_SYSCALL(compat_12_fstat12)(long long res, long long fd_, void *sb_) { + /* TODO */ +} +PRE_SYSCALL(compat_12_lstat12)(void *path_, void *ub_) { /* TODO */ } +POST_SYSCALL(compat_12_lstat12)(long long res, void *path_, void *ub_) { + /* TODO */ +} +PRE_SYSCALL(pathconf)(void *path_, long long name_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(pathconf)(long long res, void *path_, long long name_) { + if (res != -1) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(fpathconf)(long long fd_, long long name_) { /* Nothing to do */ } +POST_SYSCALL(fpathconf)(long long res, long long fd_, long long name_) { + /* Nothing to do */ +} +/* syscall 193 has been skipped */ +PRE_SYSCALL(getrlimit)(long long which_, void *rlp_) { + PRE_WRITE(rlp_, struct_rlimit_sz); +} +POST_SYSCALL(getrlimit)(long long res, long long which_, void *rlp_) { + if (res == 0) { + POST_WRITE(rlp_, struct_rlimit_sz); + } +} +PRE_SYSCALL(setrlimit)(long long which_, void *rlp_) { + PRE_READ(rlp_, struct_rlimit_sz); +} +POST_SYSCALL(setrlimit)(long long res, long long which_, void *rlp_) { + if (res == 0) { + POST_READ(rlp_, struct_rlimit_sz); + } +} +PRE_SYSCALL(compat_12_getdirentries) +(long long fd_, void *buf_, long long count_, void *basep_) { + /* TODO */ +} +POST_SYSCALL(compat_12_getdirentries) +(long long res, long long fd_, void *buf_, long long count_, void *basep_) { + /* TODO */ +} +PRE_SYSCALL(mmap) +(void *addr_, long long len_, long long prot_, long long flags_, long long fd_, + long long PAD_, long long pos_) { + /* Nothing to do */ +} +POST_SYSCALL(mmap) +(long long res, void *addr_, long long len_, long long prot_, long long flags_, + long long fd_, long long PAD_, long long pos_) { + /* Nothing to do */ +} +PRE_SYSCALL(__syscall)(long long code_, long long args_[SYS_MAXSYSARGS]) { + /* Nothing to do */ +} +POST_SYSCALL(__syscall) +(long long res, long long code_, long long args_[SYS_MAXSYSARGS]) { + /* Nothing to do */ +} +PRE_SYSCALL(lseek) +(long long fd_, long long PAD_, long long offset_, long long whence_) { + /* Nothing to do */ +} +POST_SYSCALL(lseek) +(long long res, long long fd_, long long PAD_, long long offset_, + long long whence_) { + /* Nothing to do */ +} +PRE_SYSCALL(truncate)(void *path_, long long PAD_, long long length_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(truncate) +(long long res, void *path_, long long PAD_, long long length_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(ftruncate)(long long fd_, long long PAD_, long long length_) { + /* Nothing to do */ +} +POST_SYSCALL(ftruncate) +(long long res, long long fd_, long long PAD_, long long length_) { + /* Nothing to do */ +} +PRE_SYSCALL(__sysctl) +(void *name_, long long namelen_, void *oldv_, void *oldlenp_, void *newv_, + long long newlen_) { + const int *name = (const int *)name_; + if (name) { + PRE_READ(name, namelen_ * sizeof(*name)); + } + if (newv_) { + PRE_READ(name, newlen_); + } +} +POST_SYSCALL(__sysctl) +(long long res, void *name_, long long namelen_, void *oldv_, void *oldlenp_, + void *newv_, long long newlen_) { + if (res == 0) { + const int *name = (const int *)name_; + if (name) { + POST_READ(name, namelen_ * sizeof(*name)); + } + if (newv_) { + POST_READ(name, newlen_); + } + } +} +PRE_SYSCALL(mlock)(void *addr_, long long len_) { /* Nothing to do */ } +POST_SYSCALL(mlock)(long long res, void *addr_, long long len_) { + /* Nothing to do */ +} +PRE_SYSCALL(munlock)(void *addr_, long long len_) { /* Nothing to do */ } +POST_SYSCALL(munlock)(long long res, void *addr_, long long len_) { + /* Nothing to do */ +} +PRE_SYSCALL(undelete)(void *path_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(undelete)(long long res, void *path_) { + if (res == 0) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(compat_50_futimes)(long long fd_, void *tptr_) { /* TODO */ } +POST_SYSCALL(compat_50_futimes)(long long res, long long fd_, void *tptr_) { + /* TODO */ +} +PRE_SYSCALL(getpgid)(long long pid_) { /* Nothing to do */ } +POST_SYSCALL(getpgid)(long long res, long long pid_) { /* Nothing to do */ } +PRE_SYSCALL(reboot)(long long opt_, void *bootstr_) { + const char *bootstr = (const char *)bootstr_; + if (bootstr) { + PRE_READ(bootstr, __sanitizer::internal_strlen(bootstr) + 1); + } +} +POST_SYSCALL(reboot)(long long res, long long opt_, void *bootstr_) { + /* This call should never return */ + const char *bootstr = (const char *)bootstr_; + if (bootstr) { + POST_READ(bootstr, __sanitizer::internal_strlen(bootstr) + 1); + } +} +PRE_SYSCALL(poll)(void *fds_, long long nfds_, long long timeout_) { + /* Nothing to do */ +} +POST_SYSCALL(poll) +(long long res, void *fds_, long long nfds_, long long timeout_) { + /* Nothing to do */ +} +PRE_SYSCALL(afssys) +(long long id_, long long a1_, long long a2_, long long a3_, long long a4_, + long long a5_, long long a6_) { + /* TODO */ +} +POST_SYSCALL(afssys) +(long long res, long long id_, long long a1_, long long a2_, long long a3_, + long long a4_, long long a5_, long long a6_) { + /* TODO */ +} +/* syscall 211 has been skipped */ +/* syscall 212 has been skipped */ +/* syscall 213 has been skipped */ +/* syscall 214 has been skipped */ +/* syscall 215 has been skipped */ +/* syscall 216 has been skipped */ +/* syscall 217 has been skipped */ +/* syscall 218 has been skipped */ +/* syscall 219 has been skipped */ +PRE_SYSCALL(compat_14___semctl) +(long long semid_, long long semnum_, long long cmd_, void *arg_) { + /* TODO */ +} +POST_SYSCALL(compat_14___semctl) +(long long res, long long semid_, long long semnum_, long long cmd_, + void *arg_) { + /* TODO */ +} +PRE_SYSCALL(semget)(long long key_, long long nsems_, long long semflg_) { + /* Nothing to do */ +} +POST_SYSCALL(semget) +(long long res, long long key_, long long nsems_, long long semflg_) { + /* Nothing to do */ +} +PRE_SYSCALL(semop)(long long semid_, void *sops_, long long nsops_) { + if (sops_) { + PRE_READ(sops_, nsops_ * struct_sembuf_sz); + } +} +POST_SYSCALL(semop) +(long long res, long long semid_, void *sops_, long long nsops_) { + if (res == 0) { + if (sops_) { + POST_READ(sops_, nsops_ * struct_sembuf_sz); + } + } +} +PRE_SYSCALL(semconfig)(long long flag_) { /* Nothing to do */ } +POST_SYSCALL(semconfig)(long long res, long long flag_) { /* Nothing to do */ } +PRE_SYSCALL(compat_14_msgctl)(long long msqid_, long long cmd_, void *buf_) { + /* TODO */ +} +POST_SYSCALL(compat_14_msgctl) +(long long res, long long msqid_, long long cmd_, void *buf_) { + /* TODO */ +} +PRE_SYSCALL(msgget)(long long key_, long long msgflg_) { /* Nothing to do */ } +POST_SYSCALL(msgget)(long long res, long long key_, long long msgflg_) { + /* Nothing to do */ +} +PRE_SYSCALL(msgsnd) +(long long msqid_, void *msgp_, long long msgsz_, long long msgflg_) { + if (msgp_) { + PRE_READ(msgp_, msgsz_); + } +} +POST_SYSCALL(msgsnd) +(long long res, long long msqid_, void *msgp_, long long msgsz_, + long long msgflg_) { + if (res == 0) { + if (msgp_) { + POST_READ(msgp_, msgsz_); + } + } +} +PRE_SYSCALL(msgrcv) +(long long msqid_, void *msgp_, long long msgsz_, long long msgtyp_, + long long msgflg_) { + /* Nothing to do */ +} +POST_SYSCALL(msgrcv) +(long long res, long long msqid_, void *msgp_, long long msgsz_, + long long msgtyp_, long long msgflg_) { + /* Nothing to do */ +} +PRE_SYSCALL(shmat)(long long shmid_, void *shmaddr_, long long shmflg_) { + /* Nothing to do */ +} +POST_SYSCALL(shmat) +(long long res, long long shmid_, void *shmaddr_, long long shmflg_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_14_shmctl)(long long shmid_, long long cmd_, void *buf_) { + /* TODO */ +} +POST_SYSCALL(compat_14_shmctl) +(long long res, long long shmid_, long long cmd_, void *buf_) { + /* TODO */ +} +PRE_SYSCALL(shmdt)(void *shmaddr_) { /* Nothing to do */ } +POST_SYSCALL(shmdt)(long long res, void *shmaddr_) { /* Nothing to do */ } +PRE_SYSCALL(shmget)(long long key_, long long size_, long long shmflg_) { + /* Nothing to do */ +} +POST_SYSCALL(shmget) +(long long res, long long key_, long long size_, long long shmflg_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_50_clock_gettime)(long long clock_id_, void *tp_) { + /* TODO */ +} +POST_SYSCALL(compat_50_clock_gettime) +(long long res, long long clock_id_, void *tp_) { + /* TODO */ +} +PRE_SYSCALL(compat_50_clock_settime)(long long clock_id_, void *tp_) { + /* TODO */ +} +POST_SYSCALL(compat_50_clock_settime) +(long long res, long long clock_id_, void *tp_) { + /* TODO */ +} +PRE_SYSCALL(compat_50_clock_getres)(long long clock_id_, void *tp_) { + /* TODO */ +} +POST_SYSCALL(compat_50_clock_getres) +(long long res, long long clock_id_, void *tp_) { + /* TODO */ +} +PRE_SYSCALL(timer_create)(long long clock_id_, void *evp_, void *timerid_) { + /* Nothing to do */ +} +POST_SYSCALL(timer_create) +(long long res, long long clock_id_, void *evp_, void *timerid_) { + /* Nothing to do */ +} +PRE_SYSCALL(timer_delete)(long long timerid_) { /* Nothing to do */ } +POST_SYSCALL(timer_delete)(long long res, long long timerid_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_50_timer_settime) +(long long timerid_, long long flags_, void *value_, void *ovalue_) { + /* TODO */ +} +POST_SYSCALL(compat_50_timer_settime) +(long long res, long long timerid_, long long flags_, void *value_, + void *ovalue_) { + /* TODO */ +} +PRE_SYSCALL(compat_50_timer_gettime)(long long timerid_, void *value_) { + /* TODO */ +} +POST_SYSCALL(compat_50_timer_gettime) +(long long res, long long timerid_, void *value_) { + /* TODO */ +} +PRE_SYSCALL(timer_getoverrun)(long long timerid_) { /* Nothing to do */ } +POST_SYSCALL(timer_getoverrun)(long long res, long long timerid_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_50_nanosleep)(void *rqtp_, void *rmtp_) { /* TODO */ } +POST_SYSCALL(compat_50_nanosleep)(long long res, void *rqtp_, void *rmtp_) { + /* TODO */ +} +PRE_SYSCALL(fdatasync)(long long fd_) { /* Nothing to do */ } +POST_SYSCALL(fdatasync)(long long res, long long fd_) { /* Nothing to do */ } +PRE_SYSCALL(mlockall)(long long flags_) { /* Nothing to do */ } +POST_SYSCALL(mlockall)(long long res, long long flags_) { /* Nothing to do */ } +PRE_SYSCALL(munlockall)(void) { /* Nothing to do */ } +POST_SYSCALL(munlockall)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(compat_50___sigtimedwait)(void *set_, void *info_, void *timeout_) { + /* TODO */ +} +POST_SYSCALL(compat_50___sigtimedwait) +(long long res, void *set_, void *info_, void *timeout_) { + /* TODO */ +} +PRE_SYSCALL(sigqueueinfo)(long long pid_, void *info_) { + if (info_) { + PRE_READ(info_, siginfo_t_sz); + } +} +POST_SYSCALL(sigqueueinfo)(long long res, long long pid_, void *info_) {} +PRE_SYSCALL(modctl)(long long cmd_, void *arg_) { /* TODO */ } +POST_SYSCALL(modctl)(long long res, long long cmd_, void *arg_) { /* TODO */ } +PRE_SYSCALL(_ksem_init)(long long value_, void *idp_) { /* Nothing to do */ } +POST_SYSCALL(_ksem_init)(long long res, long long value_, void *idp_) { + /* Nothing to do */ +} +PRE_SYSCALL(_ksem_open) +(void *name_, long long oflag_, long long mode_, long long value_, void *idp_) { + const char *name = (const char *)name_; + if (name) { + PRE_READ(name, __sanitizer::internal_strlen(name) + 1); + } +} +POST_SYSCALL(_ksem_open) +(long long res, void *name_, long long oflag_, long long mode_, + long long value_, void *idp_) { + const char *name = (const char *)name_; + if (name) { + POST_READ(name, __sanitizer::internal_strlen(name) + 1); + } +} +PRE_SYSCALL(_ksem_unlink)(void *name_) { + const char *name = (const char *)name_; + if (name) { + PRE_READ(name, __sanitizer::internal_strlen(name) + 1); + } +} +POST_SYSCALL(_ksem_unlink)(long long res, void *name_) { + const char *name = (const char *)name_; + if (name) { + POST_READ(name, __sanitizer::internal_strlen(name) + 1); + } +} +PRE_SYSCALL(_ksem_close)(long long id_) { /* Nothing to do */ } +POST_SYSCALL(_ksem_close)(long long res, long long id_) { /* Nothing to do */ } +PRE_SYSCALL(_ksem_post)(long long id_) { /* Nothing to do */ } +POST_SYSCALL(_ksem_post)(long long res, long long id_) { /* Nothing to do */ } +PRE_SYSCALL(_ksem_wait)(long long id_) { /* Nothing to do */ } +POST_SYSCALL(_ksem_wait)(long long res, long long id_) { /* Nothing to do */ } +PRE_SYSCALL(_ksem_trywait)(long long id_) { /* Nothing to do */ } +POST_SYSCALL(_ksem_trywait)(long long res, long long id_) { + /* Nothing to do */ +} +PRE_SYSCALL(_ksem_getvalue)(long long id_, void *value_) { /* Nothing to do */ } +POST_SYSCALL(_ksem_getvalue)(long long res, long long id_, void *value_) { + /* Nothing to do */ +} +PRE_SYSCALL(_ksem_destroy)(long long id_) { /* Nothing to do */ } +POST_SYSCALL(_ksem_destroy)(long long res, long long id_) { + /* Nothing to do */ +} +PRE_SYSCALL(_ksem_timedwait)(long long id_, void *abstime_) { + if (abstime_) { + PRE_READ(abstime_, struct_timespec_sz); + } +} +POST_SYSCALL(_ksem_timedwait)(long long res, long long id_, void *abstime_) {} +PRE_SYSCALL(mq_open) +(void *name_, long long oflag_, long long mode_, void *attr_) { + const char *name = (const char *)name_; + if (name) { + PRE_READ(name, __sanitizer::internal_strlen(name) + 1); + } +} +POST_SYSCALL(mq_open) +(long long res, void *name_, long long oflag_, long long mode_, void *attr_) { + const char *name = (const char *)name_; + if (name) { + POST_READ(name, __sanitizer::internal_strlen(name) + 1); + } +} +PRE_SYSCALL(mq_close)(long long mqdes_) { /* Nothing to do */ } +POST_SYSCALL(mq_close)(long long res, long long mqdes_) { /* Nothing to do */ } +PRE_SYSCALL(mq_unlink)(void *name_) { + const char *name = (const char *)name_; + if (name) { + PRE_READ(name, __sanitizer::internal_strlen(name) + 1); + } +} +POST_SYSCALL(mq_unlink)(long long res, void *name_) { + const char *name = (const char *)name_; + if (name) { + POST_READ(name, __sanitizer::internal_strlen(name) + 1); + } +} +PRE_SYSCALL(mq_getattr)(long long mqdes_, void *mqstat_) { /* Nothing to do */ } +POST_SYSCALL(mq_getattr)(long long res, long long mqdes_, void *mqstat_) { + /* Nothing to do */ +} +PRE_SYSCALL(mq_setattr)(long long mqdes_, void *mqstat_, void *omqstat_) { + if (mqstat_) { + PRE_READ(mqstat_, struct_mq_attr_sz); + } +} +POST_SYSCALL(mq_setattr) +(long long res, long long mqdes_, void *mqstat_, void *omqstat_) {} +PRE_SYSCALL(mq_notify)(long long mqdes_, void *notification_) { + if (notification_) { + PRE_READ(notification_, struct_sigevent_sz); + } +} +POST_SYSCALL(mq_notify)(long long res, long long mqdes_, void *notification_) {} +PRE_SYSCALL(mq_send) +(long long mqdes_, void *msg_ptr_, long long msg_len_, long long msg_prio_) { + if (msg_ptr_) { + PRE_READ(msg_ptr_, msg_len_); + } +} +POST_SYSCALL(mq_send) +(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_, + long long msg_prio_) {} +PRE_SYSCALL(mq_receive) +(long long mqdes_, void *msg_ptr_, long long msg_len_, void *msg_prio_) { + /* Nothing to do */ +} +POST_SYSCALL(mq_receive) +(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_, + void *msg_prio_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_50_mq_timedsend) +(long long mqdes_, void *msg_ptr_, long long msg_len_, long long msg_prio_, + void *abs_timeout_) { + /* TODO */ +} +POST_SYSCALL(compat_50_mq_timedsend) +(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_, + long long msg_prio_, void *abs_timeout_) { + /* TODO */ +} +PRE_SYSCALL(compat_50_mq_timedreceive) +(long long mqdes_, void *msg_ptr_, long long msg_len_, void *msg_prio_, + void *abs_timeout_) { + /* TODO */ +} +POST_SYSCALL(compat_50_mq_timedreceive) +(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_, + void *msg_prio_, void *abs_timeout_) { + /* TODO */ +} +/* syscall 267 has been skipped */ +/* syscall 268 has been skipped */ +/* syscall 269 has been skipped */ +PRE_SYSCALL(__posix_rename)(void *from_, void *to_) { + const char *from = (const char *)from_; + const char *to = (const char *)to_; + if (from_) { + PRE_READ(from, __sanitizer::internal_strlen(from) + 1); + } + if (to) { + PRE_READ(to, __sanitizer::internal_strlen(to) + 1); + } +} +POST_SYSCALL(__posix_rename)(long long res, void *from_, void *to_) { + const char *from = (const char *)from_; + const char *to = (const char *)to_; + if (from) { + POST_READ(from, __sanitizer::internal_strlen(from) + 1); + } + if (to) { + POST_READ(to, __sanitizer::internal_strlen(to) + 1); + } +} +PRE_SYSCALL(swapctl)(long long cmd_, void *arg_, long long misc_) { /* TODO */ } +POST_SYSCALL(swapctl) +(long long res, long long cmd_, void *arg_, long long misc_) { + /* TODO */ +} +PRE_SYSCALL(compat_30_getdents)(long long fd_, void *buf_, long long count_) { + /* TODO */ +} +POST_SYSCALL(compat_30_getdents) +(long long res, long long fd_, void *buf_, long long count_) { + /* TODO */ +} +PRE_SYSCALL(minherit)(void *addr_, long long len_, long long inherit_) { + /* Nothing to do */ +} +POST_SYSCALL(minherit) +(long long res, void *addr_, long long len_, long long inherit_) { + /* Nothing to do */ +} +PRE_SYSCALL(lchmod)(void *path_, long long mode_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(lchmod)(long long res, void *path_, long long mode_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(lchown)(void *path_, long long uid_, long long gid_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(lchown) +(long long res, void *path_, long long uid_, long long gid_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(compat_50_lutimes)(void *path_, void *tptr_) { /* TODO */ } +POST_SYSCALL(compat_50_lutimes)(long long res, void *path_, void *tptr_) { + /* TODO */ +} +PRE_SYSCALL(__msync13)(void *addr_, long long len_, long long flags_) { + /* Nothing to do */ +} +POST_SYSCALL(__msync13) +(long long res, void *addr_, long long len_, long long flags_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_30___stat13)(void *path_, void *ub_) { /* TODO */ } +POST_SYSCALL(compat_30___stat13)(long long res, void *path_, void *ub_) { + /* TODO */ +} +PRE_SYSCALL(compat_30___fstat13)(long long fd_, void *sb_) { /* TODO */ } +POST_SYSCALL(compat_30___fstat13)(long long res, long long fd_, void *sb_) { + /* TODO */ +} +PRE_SYSCALL(compat_30___lstat13)(void *path_, void *ub_) { /* TODO */ } +POST_SYSCALL(compat_30___lstat13)(long long res, void *path_, void *ub_) { + /* TODO */ +} +PRE_SYSCALL(__sigaltstack14)(void *nss_, void *oss_) { + if (nss_) { + PRE_READ(nss_, struct_sigaltstack_sz); + } + if (oss_) { + PRE_READ(oss_, struct_sigaltstack_sz); + } +} +POST_SYSCALL(__sigaltstack14)(long long res, void *nss_, void *oss_) {} +PRE_SYSCALL(__vfork14)(void) { /* Nothing to do */ } +POST_SYSCALL(__vfork14)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(__posix_chown)(void *path_, long long uid_, long long gid_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(__posix_chown) +(long long res, void *path_, long long uid_, long long gid_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(__posix_fchown)(long long fd_, long long uid_, long long gid_) { + /* Nothing to do */ +} +POST_SYSCALL(__posix_fchown) +(long long res, long long fd_, long long uid_, long long gid_) { + /* Nothing to do */ +} +PRE_SYSCALL(__posix_lchown)(void *path_, long long uid_, long long gid_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(__posix_lchown) +(long long res, void *path_, long long uid_, long long gid_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(getsid)(long long pid_) { /* Nothing to do */ } +POST_SYSCALL(getsid)(long long res, long long pid_) { /* Nothing to do */ } +PRE_SYSCALL(__clone)(long long flags_, void *stack_) { /* Nothing to do */ } +POST_SYSCALL(__clone)(long long res, long long flags_, void *stack_) { + /* Nothing to do */ +} +PRE_SYSCALL(fktrace) +(long long fd_, long long ops_, long long facs_, long long pid_) { + /* Nothing to do */ +} +POST_SYSCALL(fktrace) +(long long res, long long fd_, long long ops_, long long facs_, + long long pid_) { + /* Nothing to do */ +} +PRE_SYSCALL(preadv) +(long long fd_, void *iovp_, long long iovcnt_, long long PAD_, + long long offset_) { + /* Nothing to do */ +} +POST_SYSCALL(preadv) +(long long res, long long fd_, void *iovp_, long long iovcnt_, long long PAD_, + long long offset_) { + /* Nothing to do */ +} +PRE_SYSCALL(pwritev) +(long long fd_, void *iovp_, long long iovcnt_, long long PAD_, + long long offset_) { + /* Nothing to do */ +} +POST_SYSCALL(pwritev) +(long long res, long long fd_, void *iovp_, long long iovcnt_, long long PAD_, + long long offset_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_16___sigaction14) +(long long signum_, void *nsa_, void *osa_) { + /* TODO */ +} +POST_SYSCALL(compat_16___sigaction14) +(long long res, long long signum_, void *nsa_, void *osa_) { + /* TODO */ +} +PRE_SYSCALL(__sigpending14)(void *set_) { /* Nothing to do */ } +POST_SYSCALL(__sigpending14)(long long res, void *set_) { /* Nothing to do */ } +PRE_SYSCALL(__sigprocmask14)(long long how_, void *set_, void *oset_) { + /* Nothing to do */ +} +POST_SYSCALL(__sigprocmask14) +(long long res, long long how_, void *set_, void *oset_) { + /* Nothing to do */ +} +PRE_SYSCALL(__sigsuspend14)(void *set_) { + if (set_) { + PRE_READ(set_, sizeof(__sanitizer_sigset_t)); + } +} +POST_SYSCALL(__sigsuspend14)(long long res, void *set_) { + if (set_) { + PRE_READ(set_, sizeof(__sanitizer_sigset_t)); + } +} +PRE_SYSCALL(compat_16___sigreturn14)(void *sigcntxp_) { /* TODO */ } +POST_SYSCALL(compat_16___sigreturn14)(long long res, void *sigcntxp_) { + /* TODO */ +} +PRE_SYSCALL(__getcwd)(void *bufp_, long long length_) { /* Nothing to do */ } +POST_SYSCALL(__getcwd)(long long res, void *bufp_, long long length_) { + /* Nothing to do */ +} +PRE_SYSCALL(fchroot)(long long fd_) { /* Nothing to do */ } +POST_SYSCALL(fchroot)(long long res, long long fd_) { /* Nothing to do */ } +PRE_SYSCALL(compat_30_fhopen)(void *fhp_, long long flags_) { /* TODO */ } +POST_SYSCALL(compat_30_fhopen)(long long res, void *fhp_, long long flags_) { + /* TODO */ +} +PRE_SYSCALL(compat_30_fhstat)(void *fhp_, void *sb_) { /* TODO */ } +POST_SYSCALL(compat_30_fhstat)(long long res, void *fhp_, void *sb_) { + /* TODO */ +} +PRE_SYSCALL(compat_20_fhstatfs)(void *fhp_, void *buf_) { /* TODO */ } +POST_SYSCALL(compat_20_fhstatfs)(long long res, void *fhp_, void *buf_) { + /* TODO */ +} +PRE_SYSCALL(compat_50_____semctl13) +(long long semid_, long long semnum_, long long cmd_, void *arg_) { + /* TODO */ +} +POST_SYSCALL(compat_50_____semctl13) +(long long res, long long semid_, long long semnum_, long long cmd_, + void *arg_) { + /* TODO */ +} +PRE_SYSCALL(compat_50___msgctl13) +(long long msqid_, long long cmd_, void *buf_) { + /* TODO */ +} +POST_SYSCALL(compat_50___msgctl13) +(long long res, long long msqid_, long long cmd_, void *buf_) { + /* TODO */ +} +PRE_SYSCALL(compat_50___shmctl13) +(long long shmid_, long long cmd_, void *buf_) { + /* TODO */ +} +POST_SYSCALL(compat_50___shmctl13) +(long long res, long long shmid_, long long cmd_, void *buf_) { + /* TODO */ +} +PRE_SYSCALL(lchflags)(void *path_, long long flags_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(lchflags)(long long res, void *path_, long long flags_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(issetugid)(void) { /* Nothing to do */ } +POST_SYSCALL(issetugid)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(utrace)(void *label_, void *addr_, long long len_) { + const char *label = (const char *)label_; + if (label) { + PRE_READ(label, __sanitizer::internal_strlen(label) + 1); + } + if (addr_) { + PRE_READ(addr_, len_); + } +} +POST_SYSCALL(utrace)(long long res, void *label_, void *addr_, long long len_) { + const char *label = (const char *)label_; + if (label) { + POST_READ(label, __sanitizer::internal_strlen(label) + 1); + } + if (addr_) { + POST_READ(addr_, len_); + } +} +PRE_SYSCALL(getcontext)(void *ucp_) { /* Nothing to do */ } +POST_SYSCALL(getcontext)(long long res, void *ucp_) { /* Nothing to do */ } +PRE_SYSCALL(setcontext)(void *ucp_) { + if (ucp_) { + PRE_READ(ucp_, ucontext_t_sz); + } +} +POST_SYSCALL(setcontext)(long long res, void *ucp_) {} +PRE_SYSCALL(_lwp_create)(void *ucp_, long long flags_, void *new_lwp_) { + if (ucp_) { + PRE_READ(ucp_, ucontext_t_sz); + } +} +POST_SYSCALL(_lwp_create) +(long long res, void *ucp_, long long flags_, void *new_lwp_) {} +PRE_SYSCALL(_lwp_exit)(void) { /* Nothing to do */ } +POST_SYSCALL(_lwp_exit)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(_lwp_self)(void) { /* Nothing to do */ } +POST_SYSCALL(_lwp_self)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(_lwp_wait)(long long wait_for_, void *departed_) { + /* Nothing to do */ +} +POST_SYSCALL(_lwp_wait)(long long res, long long wait_for_, void *departed_) { + /* Nothing to do */ +} +PRE_SYSCALL(_lwp_suspend)(long long target_) { /* Nothing to do */ } +POST_SYSCALL(_lwp_suspend)(long long res, long long target_) { + /* Nothing to do */ +} +PRE_SYSCALL(_lwp_continue)(long long target_) { /* Nothing to do */ } +POST_SYSCALL(_lwp_continue)(long long res, long long target_) { + /* Nothing to do */ +} +PRE_SYSCALL(_lwp_wakeup)(long long target_) { /* Nothing to do */ } +POST_SYSCALL(_lwp_wakeup)(long long res, long long target_) { + /* Nothing to do */ +} +PRE_SYSCALL(_lwp_getprivate)(void) { /* Nothing to do */ } +POST_SYSCALL(_lwp_getprivate)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(_lwp_setprivate)(void *ptr_) { /* Nothing to do */ } +POST_SYSCALL(_lwp_setprivate)(long long res, void *ptr_) { /* Nothing to do */ } +PRE_SYSCALL(_lwp_kill)(long long target_, long long signo_) { + /* Nothing to do */ +} +POST_SYSCALL(_lwp_kill)(long long res, long long target_, long long signo_) { + /* Nothing to do */ +} +PRE_SYSCALL(_lwp_detach)(long long target_) { /* Nothing to do */ } +POST_SYSCALL(_lwp_detach)(long long res, long long target_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_50__lwp_park) +(void *ts_, long long unpark_, void *hint_, void *unparkhint_) { + /* TODO */ +} +POST_SYSCALL(compat_50__lwp_park) +(long long res, void *ts_, long long unpark_, void *hint_, void *unparkhint_) { + /* TODO */ +} +PRE_SYSCALL(_lwp_unpark)(long long target_, void *hint_) { /* Nothing to do */ } +POST_SYSCALL(_lwp_unpark)(long long res, long long target_, void *hint_) { + /* Nothing to do */ +} +PRE_SYSCALL(_lwp_unpark_all)(void *targets_, long long ntargets_, void *hint_) { + if (targets_) { + PRE_READ(targets_, ntargets_ * sizeof(__sanitizer_lwpid_t)); + } +} +POST_SYSCALL(_lwp_unpark_all) +(long long res, void *targets_, long long ntargets_, void *hint_) {} +PRE_SYSCALL(_lwp_setname)(long long target_, void *name_) { + const char *name = (const char *)name_; + if (name) { + PRE_READ(name, __sanitizer::internal_strlen(name) + 1); + } +} +POST_SYSCALL(_lwp_setname)(long long res, long long target_, void *name_) { + const char *name = (const char *)name_; + if (name) { + POST_READ(name, __sanitizer::internal_strlen(name) + 1); + } +} +PRE_SYSCALL(_lwp_getname)(long long target_, void *name_, long long len_) { + /* Nothing to do */ +} +POST_SYSCALL(_lwp_getname) +(long long res, long long target_, void *name_, long long len_) { + /* Nothing to do */ +} +PRE_SYSCALL(_lwp_ctl)(long long features_, void **address_) { + /* Nothing to do */ +} +POST_SYSCALL(_lwp_ctl)(long long res, long long features_, void **address_) { + /* Nothing to do */ +} +/* syscall 326 has been skipped */ +/* syscall 327 has been skipped */ +/* syscall 328 has been skipped */ +/* syscall 329 has been skipped */ +PRE_SYSCALL(compat_60_sa_register) +(void *newv_, void **oldv_, long long flags_, long long stackinfo_offset_) { + /* TODO */ +} +POST_SYSCALL(compat_60_sa_register) +(long long res, void *newv_, void **oldv_, long long flags_, + long long stackinfo_offset_) { + /* TODO */ +} +PRE_SYSCALL(compat_60_sa_stacks)(long long num_, void *stacks_) { /* TODO */ } +POST_SYSCALL(compat_60_sa_stacks) +(long long res, long long num_, void *stacks_) { + /* TODO */ +} +PRE_SYSCALL(compat_60_sa_enable)(void) { /* TODO */ } +POST_SYSCALL(compat_60_sa_enable)(long long res) { /* TODO */ } +PRE_SYSCALL(compat_60_sa_setconcurrency)(long long concurrency_) { /* TODO */ } +POST_SYSCALL(compat_60_sa_setconcurrency) +(long long res, long long concurrency_) { + /* TODO */ +} +PRE_SYSCALL(compat_60_sa_yield)(void) { /* TODO */ } +POST_SYSCALL(compat_60_sa_yield)(long long res) { /* TODO */ } +PRE_SYSCALL(compat_60_sa_preempt)(long long sa_id_) { /* TODO */ } +POST_SYSCALL(compat_60_sa_preempt)(long long res, long long sa_id_) { + /* TODO */ +} +/* syscall 336 has been skipped */ +/* syscall 337 has been skipped */ +/* syscall 338 has been skipped */ +/* syscall 339 has been skipped */ +PRE_SYSCALL(__sigaction_sigtramp) +(long long signum_, void *nsa_, void *osa_, void *tramp_, long long vers_) { + if (nsa_) { + PRE_READ(nsa_, sizeof(__sanitizer_sigaction)); + } +} +POST_SYSCALL(__sigaction_sigtramp) +(long long res, long long signum_, void *nsa_, void *osa_, void *tramp_, + long long vers_) { + if (nsa_) { + PRE_READ(nsa_, sizeof(__sanitizer_sigaction)); + } +} +PRE_SYSCALL(pmc_get_info)(long long ctr_, long long op_, void *args_) { + /* TODO */ +} +POST_SYSCALL(pmc_get_info) +(long long res, long long ctr_, long long op_, void *args_) { + /* TODO */ +} +PRE_SYSCALL(pmc_control)(long long ctr_, long long op_, void *args_) { + /* TODO */ +} +POST_SYSCALL(pmc_control) +(long long res, long long ctr_, long long op_, void *args_) { + /* TODO */ +} +PRE_SYSCALL(rasctl)(void *addr_, long long len_, long long op_) { + /* Nothing to do */ +} +POST_SYSCALL(rasctl) +(long long res, void *addr_, long long len_, long long op_) { + /* Nothing to do */ +} +PRE_SYSCALL(kqueue)(void) { /* Nothing to do */ } +POST_SYSCALL(kqueue)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(compat_50_kevent) +(long long fd_, void *changelist_, long long nchanges_, void *eventlist_, + long long nevents_, void *timeout_) { + /* TODO */ +} +POST_SYSCALL(compat_50_kevent) +(long long res, long long fd_, void *changelist_, long long nchanges_, + void *eventlist_, long long nevents_, void *timeout_) { + /* TODO */ +} +PRE_SYSCALL(_sched_setparam) +(long long pid_, long long lid_, long long policy_, void *params_) { + if (params_) { + PRE_READ(params_, struct_sched_param_sz); + } +} +POST_SYSCALL(_sched_setparam) +(long long res, long long pid_, long long lid_, long long policy_, + void *params_) { + if (params_) { + PRE_READ(params_, struct_sched_param_sz); + } +} +PRE_SYSCALL(_sched_getparam) +(long long pid_, long long lid_, void *policy_, void *params_) { + /* Nothing to do */ +} +POST_SYSCALL(_sched_getparam) +(long long res, long long pid_, long long lid_, void *policy_, void *params_) { + /* Nothing to do */ +} +PRE_SYSCALL(_sched_setaffinity) +(long long pid_, long long lid_, long long size_, void *cpuset_) { + if (cpuset_) { + PRE_READ(cpuset_, size_); + } +} +POST_SYSCALL(_sched_setaffinity) +(long long res, long long pid_, long long lid_, long long size_, + void *cpuset_) { + if (cpuset_) { + PRE_READ(cpuset_, size_); + } +} +PRE_SYSCALL(_sched_getaffinity) +(long long pid_, long long lid_, long long size_, void *cpuset_) { + /* Nothing to do */ +} +POST_SYSCALL(_sched_getaffinity) +(long long res, long long pid_, long long lid_, long long size_, + void *cpuset_) { + /* Nothing to do */ +} +PRE_SYSCALL(sched_yield)(void) { /* Nothing to do */ } +POST_SYSCALL(sched_yield)(long long res) { /* Nothing to do */ } +PRE_SYSCALL(_sched_protect)(long long priority_) { /* Nothing to do */ } +POST_SYSCALL(_sched_protect)(long long res, long long priority_) { + /* Nothing to do */ +} +/* syscall 352 has been skipped */ +/* syscall 353 has been skipped */ +PRE_SYSCALL(fsync_range) +(long long fd_, long long flags_, long long start_, long long length_) { + /* Nothing to do */ +} +POST_SYSCALL(fsync_range) +(long long res, long long fd_, long long flags_, long long start_, + long long length_) { + /* Nothing to do */ +} +PRE_SYSCALL(uuidgen)(void *store_, long long count_) { /* Nothing to do */ } +POST_SYSCALL(uuidgen)(long long res, void *store_, long long count_) { + /* Nothing to do */ +} +PRE_SYSCALL(getvfsstat)(void *buf_, long long bufsize_, long long flags_) { + /* Nothing to do */ +} +POST_SYSCALL(getvfsstat) +(long long res, void *buf_, long long bufsize_, long long flags_) { + /* Nothing to do */ +} +PRE_SYSCALL(statvfs1)(void *path_, void *buf_, long long flags_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(statvfs1) +(long long res, void *path_, void *buf_, long long flags_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(fstatvfs1)(long long fd_, void *buf_, long long flags_) { + /* Nothing to do */ +} +POST_SYSCALL(fstatvfs1) +(long long res, long long fd_, void *buf_, long long flags_) { + /* Nothing to do */ +} +PRE_SYSCALL(compat_30_fhstatvfs1)(void *fhp_, void *buf_, long long flags_) { + /* TODO */ +} +POST_SYSCALL(compat_30_fhstatvfs1) +(long long res, void *fhp_, void *buf_, long long flags_) { + /* TODO */ +} +PRE_SYSCALL(extattrctl) +(void *path_, long long cmd_, void *filename_, long long attrnamespace_, + void *attrname_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(extattrctl) +(long long res, void *path_, long long cmd_, void *filename_, + long long attrnamespace_, void *attrname_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(extattr_set_file) +(void *path_, long long attrnamespace_, void *attrname_, void *data_, + long long nbytes_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(extattr_set_file) +(long long res, void *path_, long long attrnamespace_, void *attrname_, + void *data_, long long nbytes_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(extattr_get_file) +(void *path_, long long attrnamespace_, void *attrname_, void *data_, + long long nbytes_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(extattr_get_file) +(long long res, void *path_, long long attrnamespace_, void *attrname_, + void *data_, long long nbytes_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(extattr_delete_file) +(void *path_, long long attrnamespace_, void *attrname_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(extattr_delete_file) +(long long res, void *path_, long long attrnamespace_, void *attrname_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(extattr_set_fd) +(long long fd_, long long attrnamespace_, void *attrname_, void *data_, + long long nbytes_) { + /* TODO */ +} +POST_SYSCALL(extattr_set_fd) +(long long res, long long fd_, long long attrnamespace_, void *attrname_, + void *data_, long long nbytes_) { + /* TODO */ +} +PRE_SYSCALL(extattr_get_fd) +(long long fd_, long long attrnamespace_, void *attrname_, void *data_, + long long nbytes_) { + /* TODO */ +} +POST_SYSCALL(extattr_get_fd) +(long long res, long long fd_, long long attrnamespace_, void *attrname_, + void *data_, long long nbytes_) { + /* TODO */ +} +PRE_SYSCALL(extattr_delete_fd) +(long long fd_, long long attrnamespace_, void *attrname_) { + /* TODO */ +} +POST_SYSCALL(extattr_delete_fd) +(long long res, long long fd_, long long attrnamespace_, void *attrname_) { + /* TODO */ +} +PRE_SYSCALL(extattr_set_link) +(void *path_, long long attrnamespace_, void *attrname_, void *data_, + long long nbytes_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(extattr_set_link) +(long long res, void *path_, long long attrnamespace_, void *attrname_, + void *data_, long long nbytes_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(extattr_get_link) +(void *path_, long long attrnamespace_, void *attrname_, void *data_, + long long nbytes_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(extattr_get_link) +(long long res, void *path_, long long attrnamespace_, void *attrname_, + void *data_, long long nbytes_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(extattr_delete_link) +(void *path_, long long attrnamespace_, void *attrname_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(extattr_delete_link) +(long long res, void *path_, long long attrnamespace_, void *attrname_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(extattr_list_fd) +(long long fd_, long long attrnamespace_, void *data_, long long nbytes_) { + /* TODO */ +} +POST_SYSCALL(extattr_list_fd) +(long long res, long long fd_, long long attrnamespace_, void *data_, + long long nbytes_) { + /* TODO */ +} +PRE_SYSCALL(extattr_list_file) +(void *path_, long long attrnamespace_, void *data_, long long nbytes_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(extattr_list_file) +(long long res, void *path_, long long attrnamespace_, void *data_, + long long nbytes_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(extattr_list_link) +(void *path_, long long attrnamespace_, void *data_, long long nbytes_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(extattr_list_link) +(long long res, void *path_, long long attrnamespace_, void *data_, + long long nbytes_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(compat_50_pselect) +(long long nd_, void *in_, void *ou_, void *ex_, void *ts_, void *mask_) { + /* TODO */ +} +POST_SYSCALL(compat_50_pselect) +(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *ts_, + void *mask_) { + /* TODO */ +} +PRE_SYSCALL(compat_50_pollts) +(void *fds_, long long nfds_, void *ts_, void *mask_) { + /* TODO */ +} +POST_SYSCALL(compat_50_pollts) +(long long res, void *fds_, long long nfds_, void *ts_, void *mask_) { + /* TODO */ +} +PRE_SYSCALL(setxattr) +(void *path_, void *name_, void *value_, long long size_, long long flags_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(setxattr) +(long long res, void *path_, void *name_, void *value_, long long size_, + long long flags_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(lsetxattr) +(void *path_, void *name_, void *value_, long long size_, long long flags_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(lsetxattr) +(long long res, void *path_, void *name_, void *value_, long long size_, + long long flags_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(fsetxattr) +(long long fd_, void *name_, void *value_, long long size_, long long flags_) { + /* Nothing to do */ +} +POST_SYSCALL(fsetxattr) +(long long res, long long fd_, void *name_, void *value_, long long size_, + long long flags_) { + /* Nothing to do */ +} +PRE_SYSCALL(getxattr)(void *path_, void *name_, void *value_, long long size_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(getxattr) +(long long res, void *path_, void *name_, void *value_, long long size_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(lgetxattr) +(void *path_, void *name_, void *value_, long long size_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(lgetxattr) +(long long res, void *path_, void *name_, void *value_, long long size_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(fgetxattr) +(long long fd_, void *name_, void *value_, long long size_) { + /* Nothing to do */ +} +POST_SYSCALL(fgetxattr) +(long long res, long long fd_, void *name_, void *value_, long long size_) { + /* Nothing to do */ +} +PRE_SYSCALL(listxattr)(void *path_, void *list_, long long size_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(listxattr) +(long long res, void *path_, void *list_, long long size_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(llistxattr)(void *path_, void *list_, long long size_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(llistxattr) +(long long res, void *path_, void *list_, long long size_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(flistxattr)(long long fd_, void *list_, long long size_) { + /* TODO */ +} +POST_SYSCALL(flistxattr) +(long long res, long long fd_, void *list_, long long size_) { + /* TODO */ +} +PRE_SYSCALL(removexattr)(void *path_, void *name_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(removexattr)(long long res, void *path_, void *name_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(lremovexattr)(void *path_, void *name_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(lremovexattr)(long long res, void *path_, void *name_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(fremovexattr)(long long fd_, void *name_) { /* TODO */ } +POST_SYSCALL(fremovexattr)(long long res, long long fd_, void *name_) { + /* TODO */ +} +PRE_SYSCALL(compat_50___stat30)(void *path_, void *ub_) { /* TODO */ } +POST_SYSCALL(compat_50___stat30)(long long res, void *path_, void *ub_) { + /* TODO */ +} +PRE_SYSCALL(compat_50___fstat30)(long long fd_, void *sb_) { /* TODO */ } +POST_SYSCALL(compat_50___fstat30)(long long res, long long fd_, void *sb_) { + /* TODO */ +} +PRE_SYSCALL(compat_50___lstat30)(void *path_, void *ub_) { /* TODO */ } +POST_SYSCALL(compat_50___lstat30)(long long res, void *path_, void *ub_) { + /* TODO */ +} +PRE_SYSCALL(__getdents30)(long long fd_, void *buf_, long long count_) { + /* Nothing to do */ +} +POST_SYSCALL(__getdents30) +(long long res, long long fd_, void *buf_, long long count_) { + /* Nothing to do */ +} +PRE_SYSCALL(posix_fadvise)(long long) { /* Nothing to do */ } +POST_SYSCALL(posix_fadvise)(long long res, long long) { /* Nothing to do */ } +PRE_SYSCALL(compat_30___fhstat30)(void *fhp_, void *sb_) { /* TODO */ } +POST_SYSCALL(compat_30___fhstat30)(long long res, void *fhp_, void *sb_) { + /* TODO */ +} +PRE_SYSCALL(compat_50___ntp_gettime30)(void *ntvp_) { /* TODO */ } +POST_SYSCALL(compat_50___ntp_gettime30)(long long res, void *ntvp_) { + /* TODO */ +} +PRE_SYSCALL(__socket30) +(long long domain_, long long type_, long long protocol_) { + /* Nothing to do */ +} +POST_SYSCALL(__socket30) +(long long res, long long domain_, long long type_, long long protocol_) { + /* Nothing to do */ +} +PRE_SYSCALL(__getfh30)(void *fname_, void *fhp_, void *fh_size_) { + const char *fname = (const char *)fname_; + if (fname) { + PRE_READ(fname, __sanitizer::internal_strlen(fname) + 1); + } +} +POST_SYSCALL(__getfh30) +(long long res, void *fname_, void *fhp_, void *fh_size_) { + const char *fname = (const char *)fname_; + if (res == 0) { + if (fname) { + POST_READ(fname, __sanitizer::internal_strlen(fname) + 1); + } + } +} +PRE_SYSCALL(__fhopen40)(void *fhp_, long long fh_size_, long long flags_) { + if (fhp_) { + PRE_READ(fhp_, fh_size_); + } +} +POST_SYSCALL(__fhopen40) +(long long res, void *fhp_, long long fh_size_, long long flags_) {} +PRE_SYSCALL(__fhstatvfs140) +(void *fhp_, long long fh_size_, void *buf_, long long flags_) { + if (fhp_) { + PRE_READ(fhp_, fh_size_); + } +} +POST_SYSCALL(__fhstatvfs140) +(long long res, void *fhp_, long long fh_size_, void *buf_, long long flags_) {} +PRE_SYSCALL(compat_50___fhstat40)(void *fhp_, long long fh_size_, void *sb_) { + if (fhp_) { + PRE_READ(fhp_, fh_size_); + } +} +POST_SYSCALL(compat_50___fhstat40) +(long long res, void *fhp_, long long fh_size_, void *sb_) {} +PRE_SYSCALL(aio_cancel)(long long fildes_, void *aiocbp_) { + if (aiocbp_) { + PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb)); + } +} +POST_SYSCALL(aio_cancel)(long long res, long long fildes_, void *aiocbp_) {} +PRE_SYSCALL(aio_error)(void *aiocbp_) { + if (aiocbp_) { + PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb)); + } +} +POST_SYSCALL(aio_error)(long long res, void *aiocbp_) {} +PRE_SYSCALL(aio_fsync)(long long op_, void *aiocbp_) { + if (aiocbp_) { + PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb)); + } +} +POST_SYSCALL(aio_fsync)(long long res, long long op_, void *aiocbp_) {} +PRE_SYSCALL(aio_read)(void *aiocbp_) { + if (aiocbp_) { + PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb)); + } +} +POST_SYSCALL(aio_read)(long long res, void *aiocbp_) {} +PRE_SYSCALL(aio_return)(void *aiocbp_) { + if (aiocbp_) { + PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb)); + } +} +POST_SYSCALL(aio_return)(long long res, void *aiocbp_) {} +PRE_SYSCALL(compat_50_aio_suspend) +(void *list_, long long nent_, void *timeout_) { + /* TODO */ +} +POST_SYSCALL(compat_50_aio_suspend) +(long long res, void *list_, long long nent_, void *timeout_) { + /* TODO */ +} +PRE_SYSCALL(aio_write)(void *aiocbp_) { + if (aiocbp_) { + PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb)); + } +} +POST_SYSCALL(aio_write)(long long res, void *aiocbp_) {} +PRE_SYSCALL(lio_listio) +(long long mode_, void *list_, long long nent_, void *sig_) { + /* Nothing to do */ +} +POST_SYSCALL(lio_listio) +(long long res, long long mode_, void *list_, long long nent_, void *sig_) { + /* Nothing to do */ +} +/* syscall 407 has been skipped */ +/* syscall 408 has been skipped */ +/* syscall 409 has been skipped */ +PRE_SYSCALL(__mount50) +(void *type_, void *path_, long long flags_, void *data_, long long data_len_) { + const char *type = (const char *)type_; + const char *path = (const char *)path_; + if (type) { + PRE_READ(type, __sanitizer::internal_strlen(type) + 1); + } + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (data_) { + PRE_READ(data_, data_len_); + } +} +POST_SYSCALL(__mount50) +(long long res, void *type_, void *path_, long long flags_, void *data_, + long long data_len_) { + const char *type = (const char *)type_; + const char *path = (const char *)path_; + if (type) { + POST_READ(type, __sanitizer::internal_strlen(type) + 1); + } + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (data_) { + POST_READ(data_, data_len_); + } +} +PRE_SYSCALL(mremap) +(void *old_address_, long long old_size_, void *new_address_, + long long new_size_, long long flags_) { + /* Nothing to do */ +} +POST_SYSCALL(mremap) +(long long res, void *old_address_, long long old_size_, void *new_address_, + long long new_size_, long long flags_) { + /* Nothing to do */ +} +PRE_SYSCALL(pset_create)(void *psid_) { /* Nothing to do */ } +POST_SYSCALL(pset_create)(long long res, void *psid_) { /* Nothing to do */ } +PRE_SYSCALL(pset_destroy)(long long psid_) { /* Nothing to do */ } +POST_SYSCALL(pset_destroy)(long long res, long long psid_) { + /* Nothing to do */ +} +PRE_SYSCALL(pset_assign)(long long psid_, long long cpuid_, void *opsid_) { + /* Nothing to do */ +} +POST_SYSCALL(pset_assign) +(long long res, long long psid_, long long cpuid_, void *opsid_) { + /* Nothing to do */ +} +PRE_SYSCALL(_pset_bind) +(long long idtype_, long long first_id_, long long second_id_, long long psid_, + void *opsid_) { + /* Nothing to do */ +} +POST_SYSCALL(_pset_bind) +(long long res, long long idtype_, long long first_id_, long long second_id_, + long long psid_, void *opsid_) { + /* Nothing to do */ +} +PRE_SYSCALL(__posix_fadvise50) +(long long fd_, long long PAD_, long long offset_, long long len_, + long long advice_) { + /* Nothing to do */ +} +POST_SYSCALL(__posix_fadvise50) +(long long res, long long fd_, long long PAD_, long long offset_, + long long len_, long long advice_) { + /* Nothing to do */ +} +PRE_SYSCALL(__select50) +(long long nd_, void *in_, void *ou_, void *ex_, void *tv_) { + /* Nothing to do */ +} +POST_SYSCALL(__select50) +(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *tv_) { + /* Nothing to do */ +} +PRE_SYSCALL(__gettimeofday50)(void *tp_, void *tzp_) { /* Nothing to do */ } +POST_SYSCALL(__gettimeofday50)(long long res, void *tp_, void *tzp_) { + /* Nothing to do */ +} +PRE_SYSCALL(__settimeofday50)(void *tv_, void *tzp_) { + if (tv_) { + PRE_READ(tv_, timeval_sz); + } + if (tzp_) { + PRE_READ(tzp_, struct_timezone_sz); + } +} +POST_SYSCALL(__settimeofday50)(long long res, void *tv_, void *tzp_) {} +PRE_SYSCALL(__utimes50)(void *path_, void *tptr_) { + struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_; + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (tptr) { + PRE_READ(tptr[0], struct_timespec_sz); + PRE_READ(tptr[1], struct_timespec_sz); + } +} +POST_SYSCALL(__utimes50)(long long res, void *path_, void *tptr_) {} +PRE_SYSCALL(__adjtime50)(void *delta_, void *olddelta_) { + if (delta_) { + PRE_READ(delta_, timeval_sz); + } +} +POST_SYSCALL(__adjtime50)(long long res, void *delta_, void *olddelta_) {} +PRE_SYSCALL(__lfs_segwait50)(void *fsidp_, void *tv_) { /* TODO */ } +POST_SYSCALL(__lfs_segwait50)(long long res, void *fsidp_, void *tv_) { + /* TODO */ +} +PRE_SYSCALL(__futimes50)(long long fd_, void *tptr_) { + struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_; + if (tptr) { + PRE_READ(tptr[0], struct_timespec_sz); + PRE_READ(tptr[1], struct_timespec_sz); + } +} +POST_SYSCALL(__futimes50)(long long res, long long fd_, void *tptr_) {} +PRE_SYSCALL(__lutimes50)(void *path_, void *tptr_) { + struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_; + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (tptr) { + PRE_READ(tptr[0], struct_timespec_sz); + PRE_READ(tptr[1], struct_timespec_sz); + } +} +POST_SYSCALL(__lutimes50)(long long res, void *path_, void *tptr_) { + struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_; + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (tptr) { + POST_READ(tptr[0], struct_timespec_sz); + POST_READ(tptr[1], struct_timespec_sz); + } +} +PRE_SYSCALL(__setitimer50)(long long which_, void *itv_, void *oitv_) { + struct __sanitizer_itimerval *itv = (struct __sanitizer_itimerval *)itv_; + if (itv) { + PRE_READ(&itv->it_interval.tv_sec, sizeof(__sanitizer_time_t)); + PRE_READ(&itv->it_interval.tv_usec, sizeof(__sanitizer_suseconds_t)); + PRE_READ(&itv->it_value.tv_sec, sizeof(__sanitizer_time_t)); + PRE_READ(&itv->it_value.tv_usec, sizeof(__sanitizer_suseconds_t)); + } +} +POST_SYSCALL(__setitimer50) +(long long res, long long which_, void *itv_, void *oitv_) {} +PRE_SYSCALL(__getitimer50)(long long which_, void *itv_) { /* Nothing to do */ } +POST_SYSCALL(__getitimer50)(long long res, long long which_, void *itv_) { + /* Nothing to do */ +} +PRE_SYSCALL(__clock_gettime50)(long long clock_id_, void *tp_) { + /* Nothing to do */ +} +POST_SYSCALL(__clock_gettime50)(long long res, long long clock_id_, void *tp_) { + /* Nothing to do */ +} +PRE_SYSCALL(__clock_settime50)(long long clock_id_, void *tp_) { + if (tp_) { + PRE_READ(tp_, struct_timespec_sz); + } +} +POST_SYSCALL(__clock_settime50) +(long long res, long long clock_id_, void *tp_) {} +PRE_SYSCALL(__clock_getres50)(long long clock_id_, void *tp_) { + /* Nothing to do */ +} +POST_SYSCALL(__clock_getres50)(long long res, long long clock_id_, void *tp_) { + /* Nothing to do */ +} +PRE_SYSCALL(__nanosleep50)(void *rqtp_, void *rmtp_) { + if (rqtp_) { + PRE_READ(rqtp_, struct_timespec_sz); + } +} +POST_SYSCALL(__nanosleep50)(long long res, void *rqtp_, void *rmtp_) {} +PRE_SYSCALL(____sigtimedwait50)(void *set_, void *info_, void *timeout_) { + if (set_) { + PRE_READ(set_, sizeof(__sanitizer_sigset_t)); + } + if (timeout_) { + PRE_READ(timeout_, struct_timespec_sz); + } +} +POST_SYSCALL(____sigtimedwait50) +(long long res, void *set_, void *info_, void *timeout_) {} +PRE_SYSCALL(__mq_timedsend50) +(long long mqdes_, void *msg_ptr_, long long msg_len_, long long msg_prio_, + void *abs_timeout_) { + if (msg_ptr_) { + PRE_READ(msg_ptr_, msg_len_); + } + if (abs_timeout_) { + PRE_READ(abs_timeout_, struct_timespec_sz); + } +} +POST_SYSCALL(__mq_timedsend50) +(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_, + long long msg_prio_, void *abs_timeout_) {} +PRE_SYSCALL(__mq_timedreceive50) +(long long mqdes_, void *msg_ptr_, long long msg_len_, void *msg_prio_, + void *abs_timeout_) { + if (msg_ptr_) { + PRE_READ(msg_ptr_, msg_len_); + } + if (abs_timeout_) { + PRE_READ(abs_timeout_, struct_timespec_sz); + } +} +POST_SYSCALL(__mq_timedreceive50) +(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_, + void *msg_prio_, void *abs_timeout_) {} +PRE_SYSCALL(compat_60__lwp_park) +(void *ts_, long long unpark_, void *hint_, void *unparkhint_) { + /* TODO */ +} +POST_SYSCALL(compat_60__lwp_park) +(long long res, void *ts_, long long unpark_, void *hint_, void *unparkhint_) { + /* TODO */ +} +PRE_SYSCALL(__kevent50) +(long long fd_, void *changelist_, long long nchanges_, void *eventlist_, + long long nevents_, void *timeout_) { + if (changelist_) { + PRE_READ(changelist_, nchanges_ * struct_kevent_sz); + } + if (timeout_) { + PRE_READ(timeout_, struct_timespec_sz); + } +} +POST_SYSCALL(__kevent50) +(long long res, long long fd_, void *changelist_, long long nchanges_, + void *eventlist_, long long nevents_, void *timeout_) {} +PRE_SYSCALL(__pselect50) +(long long nd_, void *in_, void *ou_, void *ex_, void *ts_, void *mask_) { + if (ts_) { + PRE_READ(ts_, struct_timespec_sz); + } + if (mask_) { + PRE_READ(mask_, sizeof(struct __sanitizer_sigset_t)); + } +} +POST_SYSCALL(__pselect50) +(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *ts_, + void *mask_) {} +PRE_SYSCALL(__pollts50)(void *fds_, long long nfds_, void *ts_, void *mask_) { + if (ts_) { + PRE_READ(ts_, struct_timespec_sz); + } + if (mask_) { + PRE_READ(mask_, sizeof(struct __sanitizer_sigset_t)); + } +} +POST_SYSCALL(__pollts50) +(long long res, void *fds_, long long nfds_, void *ts_, void *mask_) {} +PRE_SYSCALL(__aio_suspend50)(void *list_, long long nent_, void *timeout_) { + int i; + const struct aiocb *const *list = (const struct aiocb *const *)list_; + if (list) { + for (i = 0; i < nent_; i++) { + if (list[i]) { + PRE_READ(list[i], sizeof(struct __sanitizer_aiocb)); + } + } + } + if (timeout_) { + PRE_READ(timeout_, struct_timespec_sz); + } +} +POST_SYSCALL(__aio_suspend50) +(long long res, void *list_, long long nent_, void *timeout_) {} +PRE_SYSCALL(__stat50)(void *path_, void *ub_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(__stat50)(long long res, void *path_, void *ub_) { + const char *path = (const char *)path_; + if (res == 0) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(__fstat50)(long long fd_, void *sb_) { /* Nothing to do */ } +POST_SYSCALL(__fstat50)(long long res, long long fd_, void *sb_) { + /* Nothing to do */ +} +PRE_SYSCALL(__lstat50)(void *path_, void *ub_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(__lstat50)(long long res, void *path_, void *ub_) { + const char *path = (const char *)path_; + if (res == 0) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(____semctl50) +(long long semid_, long long semnum_, long long cmd_, void *arg_) { + /* Nothing to do */ +} +POST_SYSCALL(____semctl50) +(long long res, long long semid_, long long semnum_, long long cmd_, + void *arg_) { + /* Nothing to do */ +} +PRE_SYSCALL(__shmctl50)(long long shmid_, long long cmd_, void *buf_) { + /* Nothing to do */ +} +POST_SYSCALL(__shmctl50) +(long long res, long long shmid_, long long cmd_, void *buf_) { + /* Nothing to do */ +} +PRE_SYSCALL(__msgctl50)(long long msqid_, long long cmd_, void *buf_) { + /* Nothing to do */ +} +POST_SYSCALL(__msgctl50) +(long long res, long long msqid_, long long cmd_, void *buf_) { + /* Nothing to do */ +} +PRE_SYSCALL(__getrusage50)(long long who_, void *rusage_) { + /* Nothing to do */ +} +POST_SYSCALL(__getrusage50)(long long res, long long who_, void *rusage_) { + /* Nothing to do */ +} +PRE_SYSCALL(__timer_settime50) +(long long timerid_, long long flags_, void *value_, void *ovalue_) { + struct __sanitizer_itimerval *value = (struct __sanitizer_itimerval *)value_; + if (value) { + PRE_READ(&value->it_interval.tv_sec, sizeof(__sanitizer_time_t)); + PRE_READ(&value->it_interval.tv_usec, sizeof(__sanitizer_suseconds_t)); + PRE_READ(&value->it_value.tv_sec, sizeof(__sanitizer_time_t)); + PRE_READ(&value->it_value.tv_usec, sizeof(__sanitizer_suseconds_t)); + } +} +POST_SYSCALL(__timer_settime50) +(long long res, long long timerid_, long long flags_, void *value_, + void *ovalue_) { + struct __sanitizer_itimerval *value = (struct __sanitizer_itimerval *)value_; + if (res == 0) { + if (value) { + POST_READ(&value->it_interval.tv_sec, sizeof(__sanitizer_time_t)); + POST_READ(&value->it_interval.tv_usec, sizeof(__sanitizer_suseconds_t)); + POST_READ(&value->it_value.tv_sec, sizeof(__sanitizer_time_t)); + POST_READ(&value->it_value.tv_usec, sizeof(__sanitizer_suseconds_t)); + } + } +} +PRE_SYSCALL(__timer_gettime50)(long long timerid_, void *value_) { + /* Nothing to do */ +} +POST_SYSCALL(__timer_gettime50) +(long long res, long long timerid_, void *value_) { + /* Nothing to do */ +} +#if defined(NTP) || !defined(_KERNEL_OPT) +PRE_SYSCALL(__ntp_gettime50)(void *ntvp_) { /* Nothing to do */ } +POST_SYSCALL(__ntp_gettime50)(long long res, void *ntvp_) { + /* Nothing to do */ +} +#else +/* syscall 448 has been skipped */ +#endif +PRE_SYSCALL(__wait450) +(long long pid_, void *status_, long long options_, void *rusage_) { + /* Nothing to do */ +} +POST_SYSCALL(__wait450) +(long long res, long long pid_, void *status_, long long options_, + void *rusage_) { + /* Nothing to do */ +} +PRE_SYSCALL(__mknod50)(void *path_, long long mode_, long long dev_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(__mknod50) +(long long res, void *path_, long long mode_, long long dev_) { + const char *path = (const char *)path_; + if (res == 0) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(__fhstat50)(void *fhp_, long long fh_size_, void *sb_) { + if (fhp_) { + PRE_READ(fhp_, fh_size_); + } +} +POST_SYSCALL(__fhstat50) +(long long res, void *fhp_, long long fh_size_, void *sb_) { + if (res == 0) { + if (fhp_) { + POST_READ(fhp_, fh_size_); + } + } +} +/* syscall 452 has been skipped */ +PRE_SYSCALL(pipe2)(void *fildes_, long long flags_) { /* Nothing to do */ } +POST_SYSCALL(pipe2)(long long res, void *fildes_, long long flags_) { + /* Nothing to do */ +} +PRE_SYSCALL(dup3)(long long from_, long long to_, long long flags_) { + /* Nothing to do */ +} +POST_SYSCALL(dup3) +(long long res, long long from_, long long to_, long long flags_) { + /* Nothing to do */ +} +PRE_SYSCALL(kqueue1)(long long flags_) { /* Nothing to do */ } +POST_SYSCALL(kqueue1)(long long res, long long flags_) { /* Nothing to do */ } +PRE_SYSCALL(paccept) +(long long s_, void *name_, void *anamelen_, void *mask_, long long flags_) { + if (mask_) { + PRE_READ(mask_, sizeof(__sanitizer_sigset_t)); + } +} +POST_SYSCALL(paccept) +(long long res, long long s_, void *name_, void *anamelen_, void *mask_, + long long flags_) { + if (res >= 0) { + if (mask_) { + PRE_READ(mask_, sizeof(__sanitizer_sigset_t)); + } + } +} +PRE_SYSCALL(linkat) +(long long fd1_, void *name1_, long long fd2_, void *name2_, long long flags_) { + const char *name1 = (const char *)name1_; + const char *name2 = (const char *)name2_; + if (name1) { + PRE_READ(name1, __sanitizer::internal_strlen(name1) + 1); + } + if (name2) { + PRE_READ(name2, __sanitizer::internal_strlen(name2) + 1); + } +} +POST_SYSCALL(linkat) +(long long res, long long fd1_, void *name1_, long long fd2_, void *name2_, + long long flags_) { + const char *name1 = (const char *)name1_; + const char *name2 = (const char *)name2_; + if (res == 0) { + if (name1) { + POST_READ(name1, __sanitizer::internal_strlen(name1) + 1); + } + if (name2) { + POST_READ(name2, __sanitizer::internal_strlen(name2) + 1); + } + } +} +PRE_SYSCALL(renameat) +(long long fromfd_, void *from_, long long tofd_, void *to_) { + const char *from = (const char *)from_; + const char *to = (const char *)to_; + if (from) { + PRE_READ(from, __sanitizer::internal_strlen(from) + 1); + } + if (to) { + PRE_READ(to, __sanitizer::internal_strlen(to) + 1); + } +} +POST_SYSCALL(renameat) +(long long res, long long fromfd_, void *from_, long long tofd_, void *to_) { + const char *from = (const char *)from_; + const char *to = (const char *)to_; + if (res == 0) { + if (from) { + POST_READ(from, __sanitizer::internal_strlen(from) + 1); + } + if (to) { + POST_READ(to, __sanitizer::internal_strlen(to) + 1); + } + } +} +PRE_SYSCALL(mkfifoat)(long long fd_, void *path_, long long mode_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(mkfifoat) +(long long res, long long fd_, void *path_, long long mode_) { + const char *path = (const char *)path_; + if (res == 0) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(mknodat) +(long long fd_, void *path_, long long mode_, long long PAD_, long long dev_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(mknodat) +(long long res, long long fd_, void *path_, long long mode_, long long PAD_, + long long dev_) { + const char *path = (const char *)path_; + if (res == 0) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(mkdirat)(long long fd_, void *path_, long long mode_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(mkdirat) +(long long res, long long fd_, void *path_, long long mode_) { + const char *path = (const char *)path_; + if (res == 0) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(faccessat) +(long long fd_, void *path_, long long amode_, long long flag_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(faccessat) +(long long res, long long fd_, void *path_, long long amode_, long long flag_) { + const char *path = (const char *)path_; + if (res == 0) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(fchmodat) +(long long fd_, void *path_, long long mode_, long long flag_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(fchmodat) +(long long res, long long fd_, void *path_, long long mode_, long long flag_) { + const char *path = (const char *)path_; + if (res == 0) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(fchownat) +(long long fd_, void *path_, long long owner_, long long group_, + long long flag_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(fchownat) +(long long res, long long fd_, void *path_, long long owner_, long long group_, + long long flag_) { + const char *path = (const char *)path_; + if (res == 0) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(fexecve)(long long fd_, void *argp_, void *envp_) { /* TODO */ } +POST_SYSCALL(fexecve)(long long res, long long fd_, void *argp_, void *envp_) { + /* TODO */ +} +PRE_SYSCALL(fstatat)(long long fd_, void *path_, void *buf_, long long flag_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(fstatat) +(long long res, long long fd_, void *path_, void *buf_, long long flag_) { + const char *path = (const char *)path_; + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +PRE_SYSCALL(utimensat) +(long long fd_, void *path_, void *tptr_, long long flag_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (tptr_) { + PRE_READ(tptr_, struct_timespec_sz); + } +} +POST_SYSCALL(utimensat) +(long long res, long long fd_, void *path_, void *tptr_, long long flag_) { + const char *path = (const char *)path_; + if (res > 0) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + if (tptr_) { + POST_READ(tptr_, struct_timespec_sz); + } + } +} +PRE_SYSCALL(openat) +(long long fd_, void *path_, long long oflags_, long long mode_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(openat) +(long long res, long long fd_, void *path_, long long oflags_, + long long mode_) { + const char *path = (const char *)path_; + if (res > 0) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(readlinkat) +(long long fd_, void *path_, void *buf_, long long bufsize_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(readlinkat) +(long long res, long long fd_, void *path_, void *buf_, long long bufsize_) { + const char *path = (const char *)path_; + if (res > 0) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(symlinkat)(void *path1_, long long fd_, void *path2_) { + const char *path1 = (const char *)path1_; + const char *path2 = (const char *)path2_; + if (path1) { + PRE_READ(path1, __sanitizer::internal_strlen(path1) + 1); + } + if (path2) { + PRE_READ(path2, __sanitizer::internal_strlen(path2) + 1); + } +} +POST_SYSCALL(symlinkat) +(long long res, void *path1_, long long fd_, void *path2_) { + const char *path1 = (const char *)path1_; + const char *path2 = (const char *)path2_; + if (res == 0) { + if (path1) { + POST_READ(path1, __sanitizer::internal_strlen(path1) + 1); + } + if (path2) { + POST_READ(path2, __sanitizer::internal_strlen(path2) + 1); + } + } +} +PRE_SYSCALL(unlinkat)(long long fd_, void *path_, long long flag_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(unlinkat) +(long long res, long long fd_, void *path_, long long flag_) { + const char *path = (const char *)path_; + if (res == 0) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(futimens)(long long fd_, void *tptr_) { + struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_; + if (tptr) { + PRE_READ(tptr[0], struct_timespec_sz); + PRE_READ(tptr[1], struct_timespec_sz); + } +} +POST_SYSCALL(futimens)(long long res, long long fd_, void *tptr_) { + struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_; + if (res == 0) { + if (tptr) { + POST_READ(tptr[0], struct_timespec_sz); + POST_READ(tptr[1], struct_timespec_sz); + } + } +} +PRE_SYSCALL(__quotactl)(void *path_, void *args_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(__quotactl)(long long res, void *path_, void *args_) { + const char *path = (const char *)path_; + if (res == 0) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(posix_spawn) +(void *pid_, void *path_, void *file_actions_, void *attrp_, void *argv_, + void *envp_) { + const char *path = (const char *)path_; + if (path) { + PRE_READ(path, __sanitizer::internal_strlen(path) + 1); + } +} +POST_SYSCALL(posix_spawn) +(long long res, void *pid_, void *path_, void *file_actions_, void *attrp_, + void *argv_, void *envp_) { + const char *path = (const char *)path_; + if (pid_) { + if (path) { + POST_READ(path, __sanitizer::internal_strlen(path) + 1); + } + } +} +PRE_SYSCALL(recvmmsg) +(long long s_, void *mmsg_, long long vlen_, long long flags_, void *timeout_) { + if (timeout_) { + PRE_READ(timeout_, struct_timespec_sz); + } +} +POST_SYSCALL(recvmmsg) +(long long res, long long s_, void *mmsg_, long long vlen_, long long flags_, + void *timeout_) { + if (res >= 0) { + if (timeout_) { + POST_READ(timeout_, struct_timespec_sz); + } + } +} +PRE_SYSCALL(sendmmsg) +(long long s_, void *mmsg_, long long vlen_, long long flags_) { + struct __sanitizer_mmsghdr *mmsg = (struct __sanitizer_mmsghdr *)mmsg_; + unsigned int vlen = (vlen_ > 1024 ? 1024 : vlen_); + if (mmsg) { + PRE_READ(mmsg, sizeof(struct __sanitizer_mmsghdr) * vlen); + } +} +POST_SYSCALL(sendmmsg) +(long long res, long long s_, void *mmsg_, long long vlen_, long long flags_) { + struct __sanitizer_mmsghdr *mmsg = (struct __sanitizer_mmsghdr *)mmsg_; + unsigned int vlen = (vlen_ > 1024 ? 1024 : vlen_); + if (res >= 0) { + if (mmsg) { + POST_READ(mmsg, sizeof(struct __sanitizer_mmsghdr) * vlen); + } + } +} +PRE_SYSCALL(clock_nanosleep) +(long long clock_id_, long long flags_, void *rqtp_, void *rmtp_) { + if (rqtp_) { + PRE_READ(rqtp_, struct_timespec_sz); + } +} +POST_SYSCALL(clock_nanosleep) +(long long res, long long clock_id_, long long flags_, void *rqtp_, + void *rmtp_) { + if (rqtp_) { + POST_READ(rqtp_, struct_timespec_sz); + } +} +PRE_SYSCALL(___lwp_park60) +(long long clock_id_, long long flags_, void *ts_, long long unpark_, + void *hint_, void *unparkhint_) { + if (ts_) { + PRE_READ(ts_, struct_timespec_sz); + } +} +POST_SYSCALL(___lwp_park60) +(long long res, long long clock_id_, long long flags_, void *ts_, + long long unpark_, void *hint_, void *unparkhint_) { + if (res == 0) { + if (ts_) { + POST_READ(ts_, struct_timespec_sz); + } + } +} +PRE_SYSCALL(posix_fallocate) +(long long fd_, long long PAD_, long long pos_, long long len_) { + /* Nothing to do */ +} +POST_SYSCALL(posix_fallocate) +(long long res, long long fd_, long long PAD_, long long pos_, long long len_) { + /* Nothing to do */ +} +PRE_SYSCALL(fdiscard) +(long long fd_, long long PAD_, long long pos_, long long len_) { + /* Nothing to do */ +} +POST_SYSCALL(fdiscard) +(long long res, long long fd_, long long PAD_, long long pos_, long long len_) { + /* Nothing to do */ +} +PRE_SYSCALL(wait6) +(long long idtype_, long long id_, void *status_, long long options_, + void *wru_, void *info_) { + /* Nothing to do */ +} +POST_SYSCALL(wait6) +(long long res, long long idtype_, long long id_, void *status_, + long long options_, void *wru_, void *info_) { + /* Nothing to do */ +} +PRE_SYSCALL(clock_getcpuclockid2) +(long long idtype_, long long id_, void *clock_id_) { + /* Nothing to do */ +} +POST_SYSCALL(clock_getcpuclockid2) +(long long res, long long idtype_, long long id_, void *clock_id_) { + /* Nothing to do */ +} +#undef SYS_MAXSYSARGS +} // extern "C" + +#undef PRE_SYSCALL +#undef PRE_READ +#undef PRE_WRITE +#undef POST_SYSCALL +#undef POST_READ +#undef POST_WRITE + +#endif // SANITIZER_NETBSD diff --git a/lib/sanitizer_common/sanitizer_tls_get_addr.cc b/lib/sanitizer_common/sanitizer_tls_get_addr.cc index 6fa0e9298b31..85dc806df284 100644 --- a/lib/sanitizer_common/sanitizer_tls_get_addr.cc +++ b/lib/sanitizer_common/sanitizer_tls_get_addr.cc @@ -142,7 +142,8 @@ bool DTLSInDestruction(DTLS *dtls) { #else void DTLS_on_libc_memalign(void *ptr, uptr size) {} -DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res) { return 0; } +DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res, + unsigned long, unsigned long) { return 0; } DTLS *DTLS_Get() { return 0; } void DTLS_Destroy() {} bool DTLSInDestruction(DTLS *dtls) { diff --git a/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cc index 24c94700173d..9e12c417c71d 100644 --- a/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cc +++ b/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cc @@ -149,7 +149,7 @@ void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context, void *map = acquire_my_map_info_list(); CHECK(map); - InternalScopedBuffer<backtrace_frame_t> frames(kStackTraceMax); + InternalMmapVector<backtrace_frame_t> frames(kStackTraceMax); // siginfo argument appears to be unused. sptr res = unwind_backtrace_signal_arch(/* siginfo */ 0, context, map, frames.data(), diff --git a/lib/sanitizer_common/sanitizer_unwind_win.cc b/lib/sanitizer_common/sanitizer_unwind_win.cc new file mode 100644 index 000000000000..62bac4e9b4a5 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_unwind_win.cc @@ -0,0 +1,75 @@ +//===-- sanitizer_unwind_win.cc -------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// Sanitizer unwind Windows specific functions. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_platform.h" +#if SANITIZER_WINDOWS + +#define WIN32_LEAN_AND_MEAN +#define NOGDI +#include <windows.h> + +#include "sanitizer_dbghelp.h" // for StackWalk64 +#include "sanitizer_stacktrace.h" +#include "sanitizer_symbolizer.h" // for InitializeDbgHelpIfNeeded + +using namespace __sanitizer; + +#if !SANITIZER_GO +void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) { + CHECK_GE(max_depth, 2); + // FIXME: CaptureStackBackTrace might be too slow for us. + // FIXME: Compare with StackWalk64. + // FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc + size = CaptureStackBackTrace(1, Min(max_depth, kStackTraceMax), + (void **)&trace_buffer[0], 0); + if (size == 0) + return; + + // Skip the RTL frames by searching for the PC in the stacktrace. + uptr pc_location = LocatePcInTrace(pc); + PopStackFrames(pc_location); +} + +void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context, + u32 max_depth) { + CONTEXT ctx = *(CONTEXT *)context; + STACKFRAME64 stack_frame; + memset(&stack_frame, 0, sizeof(stack_frame)); + + InitializeDbgHelpIfNeeded(); + + size = 0; +#if defined(_WIN64) + int machine_type = IMAGE_FILE_MACHINE_AMD64; + stack_frame.AddrPC.Offset = ctx.Rip; + stack_frame.AddrFrame.Offset = ctx.Rbp; + stack_frame.AddrStack.Offset = ctx.Rsp; +#else + int machine_type = IMAGE_FILE_MACHINE_I386; + stack_frame.AddrPC.Offset = ctx.Eip; + stack_frame.AddrFrame.Offset = ctx.Ebp; + stack_frame.AddrStack.Offset = ctx.Esp; +#endif + stack_frame.AddrPC.Mode = AddrModeFlat; + stack_frame.AddrFrame.Mode = AddrModeFlat; + stack_frame.AddrStack.Mode = AddrModeFlat; + while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(), + &stack_frame, &ctx, NULL, SymFunctionTableAccess64, + SymGetModuleBase64, NULL) && + size < Min(max_depth, kStackTraceMax)) { + trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset; + } +} +#endif // #if !SANITIZER_GO + +#endif // SANITIZER_WINDOWS diff --git a/lib/sanitizer_common/sanitizer_vector.h b/lib/sanitizer_common/sanitizer_vector.h index 25cfeed35f22..0632ccce262e 100644 --- a/lib/sanitizer_common/sanitizer_vector.h +++ b/lib/sanitizer_common/sanitizer_vector.h @@ -82,6 +82,10 @@ class Vector { return; } uptr old_size = Size(); + if (size <= old_size) { + end_ = begin_ + size; + return; + } EnsureSize(size); if (old_size < size) { for (uptr i = old_size; i < size; i++) diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc index 34bf1812d605..b8060b2e640c 100644 --- a/lib/sanitizer_common/sanitizer_win.cc +++ b/lib/sanitizer_common/sanitizer_win.cc @@ -23,13 +23,10 @@ #include <stdlib.h> #include "sanitizer_common.h" -#include "sanitizer_dbghelp.h" #include "sanitizer_file.h" #include "sanitizer_libc.h" #include "sanitizer_mutex.h" #include "sanitizer_placement_new.h" -#include "sanitizer_stacktrace.h" -#include "sanitizer_symbolizer.h" #include "sanitizer_win_defs.h" // A macro to tell the compiler that this part of the code cannot be reached, @@ -205,7 +202,7 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, return (void *)mapped_addr; } -void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { +bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { // FIXME: is this really "NoReserve"? On Win32 this does not matter much, // but on Win64 it does. (void)name; // unsupported @@ -218,11 +215,13 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); #endif - if (p == 0) + if (p == 0) { Report("ERROR: %s failed to " "allocate %p (%zd) bytes at %p (error code: %d)\n", SanitizerToolName, size, size, fixed_addr, GetLastError()); - return p; + return false; + } + return true; } // Memory space mapped by 'MmapFixedOrDie' must have been reserved by @@ -250,15 +249,12 @@ uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size) { } void ReservedAddressRange::Unmap(uptr addr, uptr size) { - void* addr_as_void = reinterpret_cast<void*>(addr); - uptr base_as_uptr = reinterpret_cast<uptr>(base_); // Only unmap if it covers the entire range. - CHECK((addr == base_as_uptr) && (size == size_)); - UnmapOrDie(addr_as_void, size); - if (addr_as_void == base_) { - base_ = reinterpret_cast<void*>(addr + size); - } - size_ = size_ - size; + CHECK((addr == reinterpret_cast<uptr>(base_)) && (size == size_)); + // We unmap the whole range, just null out the base. + base_ = nullptr; + size_ = 0; + UnmapOrDie(reinterpret_cast<void*>(addr), size); } void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) { @@ -279,11 +275,7 @@ void *MmapNoReserveOrDie(uptr size, const char *mem_type) { } uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) { - if (fixed_addr) { - base_ = MmapFixedNoAccess(fixed_addr, size, name); - } else { - base_ = MmapNoAccess(size); - } + base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size); size_ = size; name_ = name; (void)os_handle_; // unsupported @@ -321,17 +313,20 @@ void ReleaseMemoryPagesToOS(uptr beg, uptr end) { // FIXME: add madvise-analog when we move to 64-bits. } -void NoHugePagesInRegion(uptr addr, uptr size) { +bool NoHugePagesInRegion(uptr addr, uptr size) { // FIXME: probably similar to ReleaseMemoryToOS. + return true; } -void DontDumpShadowMemory(uptr addr, uptr length) { +bool DontDumpShadowMemory(uptr addr, uptr length) { // This is almost useless on 32-bits. // FIXME: add madvise-analog when we move to 64-bits. + return true; } uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, - uptr *largest_gap_found) { + uptr *largest_gap_found, + uptr *max_occupied_addr) { uptr address = 0; while (true) { MEMORY_BASIC_INFORMATION info; @@ -433,7 +428,7 @@ void DumpProcessMap() { modules.init(); uptr num_modules = modules.size(); - InternalScopedBuffer<ModuleInfo> module_infos(num_modules); + InternalMmapVector<ModuleInfo> module_infos(num_modules); for (size_t i = 0; i < num_modules; ++i) { module_infos[i].filepath = modules[i].full_name(); module_infos[i].base_address = modules[i].ranges().front()->beg; @@ -466,8 +461,7 @@ void ReExec() { UNIMPLEMENTED(); } -void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) { -} +void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {} bool StackSizeIsUnlimited() { UNIMPLEMENTED(); @@ -502,12 +496,19 @@ void SleepForMillis(int millis) { } u64 NanoTime() { - return 0; + static LARGE_INTEGER frequency = {}; + LARGE_INTEGER counter; + if (UNLIKELY(frequency.QuadPart == 0)) { + QueryPerformanceFrequency(&frequency); + CHECK_NE(frequency.QuadPart, 0); + } + QueryPerformanceCounter(&counter); + counter.QuadPart *= 1000ULL * 1000000ULL; + counter.QuadPart /= frequency.QuadPart; + return counter.QuadPart; } -u64 MonotonicNanoTime() { - return 0; -} +u64 MonotonicNanoTime() { return NanoTime(); } void Abort() { internal__exit(3); @@ -756,7 +757,10 @@ uptr internal_ftruncate(fd_t fd, uptr size) { } uptr GetRSS() { - return 0; + PROCESS_MEMORY_COUNTERS counters; + if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters))) + return 0; + return counters.WorkingSetSize; } void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; } @@ -830,54 +834,6 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, #endif } -#if !SANITIZER_GO -void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) { - CHECK_GE(max_depth, 2); - // FIXME: CaptureStackBackTrace might be too slow for us. - // FIXME: Compare with StackWalk64. - // FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc - size = CaptureStackBackTrace(1, Min(max_depth, kStackTraceMax), - (void **)&trace_buffer[0], 0); - if (size == 0) - return; - - // Skip the RTL frames by searching for the PC in the stacktrace. - uptr pc_location = LocatePcInTrace(pc); - PopStackFrames(pc_location); -} - -void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context, - u32 max_depth) { - CONTEXT ctx = *(CONTEXT *)context; - STACKFRAME64 stack_frame; - memset(&stack_frame, 0, sizeof(stack_frame)); - - InitializeDbgHelpIfNeeded(); - - size = 0; -#if defined(_WIN64) - int machine_type = IMAGE_FILE_MACHINE_AMD64; - stack_frame.AddrPC.Offset = ctx.Rip; - stack_frame.AddrFrame.Offset = ctx.Rbp; - stack_frame.AddrStack.Offset = ctx.Rsp; -#else - int machine_type = IMAGE_FILE_MACHINE_I386; - stack_frame.AddrPC.Offset = ctx.Eip; - stack_frame.AddrFrame.Offset = ctx.Ebp; - stack_frame.AddrStack.Offset = ctx.Esp; -#endif - stack_frame.AddrPC.Mode = AddrModeFlat; - stack_frame.AddrFrame.Mode = AddrModeFlat; - stack_frame.AddrStack.Mode = AddrModeFlat; - while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(), - &stack_frame, &ctx, NULL, SymFunctionTableAccess64, - SymGetModuleBase64, NULL) && - size < Min(max_depth, kStackTraceMax)) { - trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset; - } -} -#endif // #if !SANITIZER_GO - void ReportFile::Write(const char *buffer, uptr length) { SpinMutexLock l(mu); ReopenIfNecessary(); @@ -1073,6 +1029,10 @@ void MaybeReexec() { // No need to re-exec on Windows. } +void CheckASLR() { + // Do nothing +} + char **GetArgv() { // FIXME: Actually implement this function. return 0; @@ -1106,9 +1066,10 @@ bool GetRandom(void *buffer, uptr length, bool blocking) { UNIMPLEMENTED(); } -// FIXME: implement on this platform. u32 GetNumberOfCPUs() { - UNIMPLEMENTED(); + SYSTEM_INFO sysinfo = {}; + GetNativeSystemInfo(&sysinfo); + return sysinfo.dwNumberOfProcessors; } } // namespace __sanitizer diff --git a/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh b/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh index 0559a2e7eb05..0c3917c6b17b 100755 --- a/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh +++ b/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh @@ -2,7 +2,7 @@ # # Run as: CLANG=bin/clang ZLIB_SRC=src/zlib \ # build_symbolizer.sh runtime_build/lib/clang/4.0.0/lib/linux/ -# zlib can be downloaded from from http://www.zlib.net. +# zlib can be downloaded from http://www.zlib.net. # # Script compiles self-contained object file with symbolization code and injects # it into the given set of runtime libraries. Script updates only libraries @@ -98,12 +98,10 @@ if [[ ! -d ${LIBCXX_BUILD} ]]; then -DLIBCXXABI_ENABLE_ASSERTIONS=OFF \ -DLIBCXXABI_ENABLE_EXCEPTIONS=OFF \ -DLIBCXXABI_ENABLE_SHARED=OFF \ - -DLIBCXXABI_ENABLE_THREADS=OFF \ -DLIBCXX_ENABLE_ASSERTIONS=OFF \ -DLIBCXX_ENABLE_EXCEPTIONS=OFF \ -DLIBCXX_ENABLE_RTTI=OFF \ -DLIBCXX_ENABLE_SHARED=OFF \ - -DLIBCXX_ENABLE_THREADS=OFF \ $LLVM_SRC fi cd ${LIBCXX_BUILD} @@ -129,7 +127,7 @@ if [[ ! -d ${LLVM_BUILD} ]]; then $LLVM_SRC fi cd ${LLVM_BUILD} -ninja LLVMSymbolize LLVMObject LLVMBinaryFormat LLVMDebugInfoDWARF LLVMSupport LLVMDebugInfoPDB LLVMMC +ninja LLVMSymbolize LLVMObject LLVMBinaryFormat LLVMDebugInfoDWARF LLVMSupport LLVMDebugInfoPDB LLVMMC LLVMDemangle cd ${BUILD_DIR} rm -rf ${SYMBOLIZER_BUILD} @@ -152,6 +150,7 @@ $SCRIPT_DIR/ar_to_bc.sh $LIBCXX_BUILD/lib/libc++.a \ $LLVM_BUILD/lib/libLLVMDebugInfoDWARF.a \ $LLVM_BUILD/lib/libLLVMSupport.a \ $LLVM_BUILD/lib/libLLVMDebugInfoPDB.a \ + $LLVM_BUILD/lib/libLLVMDemangle.a \ $LLVM_BUILD/lib/libLLVMMC.a \ $ZLIB_BUILD/libz.a \ symbolizer.a \ diff --git a/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt b/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt index a23c9534701d..f77648d7818a 100644 --- a/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt +++ b/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt @@ -15,6 +15,14 @@ __divdi3 U __dso_handle U __errno_location U __interceptor_pread w +__interceptor_pthread_cond_broadcast w +__interceptor_pthread_cond_wait w +__interceptor_pthread_getspecific w +__interceptor_pthread_key_create w +__interceptor_pthread_mutex_lock w +__interceptor_pthread_mutex_unlock w +__interceptor_pthread_once w +__interceptor_pthread_setspecific w __interceptor_read w __interceptor_realpath w __moddi3 U @@ -61,6 +69,7 @@ gettimeofday U ioctl U isalpha U isatty U +islower U isprint U isupper U isxdigit U diff --git a/lib/sanitizer_common/tests/CMakeLists.txt b/lib/sanitizer_common/tests/CMakeLists.txt index 1bccaa78f39b..401682baa07b 100644 --- a/lib/sanitizer_common/tests/CMakeLists.txt +++ b/lib/sanitizer_common/tests/CMakeLists.txt @@ -41,7 +41,7 @@ set(SANITIZER_TEST_HEADERS sanitizer_pthread_wrappers.h sanitizer_test_config.h sanitizer_test_utils.h) -foreach(header ${SANITIZER_HEADERS}) +foreach(header ${SANITIZER_IMPL_HEADERS}) list(APPEND SANITIZER_TEST_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header}) endforeach() @@ -54,7 +54,8 @@ set(SANITIZER_TEST_CFLAGS_COMMON -fno-rtti -O2 -Werror=sign-compare - -Wno-non-virtual-dtor) + -Wno-non-virtual-dtor + -Wno-gnu-zero-variadic-macro-arguments) if(MSVC) # Disable exceptions on Windows until they work reliably. @@ -178,7 +179,8 @@ if(COMPILER_RT_CAN_EXECUTE_TESTS AND NOT ANDROID) if(APPLE) add_sanitizer_common_lib("RTSanitizerCommon.test.osx" $<TARGET_OBJECTS:RTSanitizerCommon.osx> - $<TARGET_OBJECTS:RTSanitizerCommonLibc.osx>) + $<TARGET_OBJECTS:RTSanitizerCommonLibc.osx> + $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.osx>) else() if(CAN_TARGET_x86_64) add_sanitizer_common_lib("RTSanitizerCommon.test.nolibc.x86_64" @@ -188,7 +190,8 @@ if(COMPILER_RT_CAN_EXECUTE_TESTS AND NOT ANDROID) foreach(arch ${SANITIZER_UNITTEST_SUPPORTED_ARCH}) add_sanitizer_common_lib("RTSanitizerCommon.test.${arch}" $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> - $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>) + $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}>) endforeach() endif() foreach(arch ${SANITIZER_UNITTEST_SUPPORTED_ARCH}) @@ -202,7 +205,8 @@ if(ANDROID) ${SANITIZER_UNITTESTS} ${COMPILER_RT_GTEST_SOURCE} $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> - $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>) + $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}>) set_target_compile_flags(SanitizerTest ${SANITIZER_COMMON_CFLAGS} ${SANITIZER_TEST_CFLAGS_COMMON}) diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc index 7b5e3e21f1ee..ef4c10b8de5d 100644 --- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -444,7 +444,7 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) { TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) { TestMapUnmapCallback::map_count = 0; TestMapUnmapCallback::unmap_count = 0; - LargeMmapAllocator<TestMapUnmapCallback, DieOnFailure> a; + LargeMmapAllocator<TestMapUnmapCallback> a; a.Init(); AllocatorStats stats; stats.Init(); @@ -482,7 +482,7 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) { #endif TEST(SanitizerCommon, LargeMmapAllocator) { - LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a; + LargeMmapAllocator<NoOpMapUnmapCallback> a; a.Init(); AllocatorStats stats; stats.Init(); @@ -565,7 +565,6 @@ void TestCombinedAllocator() { typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator> Allocator; - SetAllocatorMayReturnNull(true); Allocator *a = new Allocator; a->Init(kReleaseToOSIntervalNever); std::mt19937 r; @@ -579,11 +578,7 @@ void TestCombinedAllocator() { EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0); EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0); EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0); - - // Set to false - SetAllocatorMayReturnNull(false); - EXPECT_DEATH(a->Allocate(&cache, -1, 1), - "allocator is terminating the process"); + EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0); const uptr kNumAllocs = 100000; const uptr kNumIter = 10; @@ -818,11 +813,11 @@ TEST(Allocator, LargeAlloc) { TEST(Allocator, ScopedBuffer) { const int kSize = 512; { - InternalScopedBuffer<int> int_buf(kSize); - EXPECT_EQ(sizeof(int) * kSize, int_buf.size()); // NOLINT + InternalMmapVector<int> int_buf(kSize); + EXPECT_EQ((uptr)kSize, int_buf.size()); // NOLINT } - InternalScopedBuffer<char> char_buf(kSize); - EXPECT_EQ(sizeof(char) * kSize, char_buf.size()); // NOLINT + InternalMmapVector<char> char_buf(kSize); + EXPECT_EQ((uptr)kSize, char_buf.size()); // NOLINT internal_memset(char_buf.data(), 'c', kSize); for (int i = 0; i < kSize; i++) { EXPECT_EQ('c', char_buf[i]); @@ -893,7 +888,7 @@ TEST(SanitizerCommon, SizeClassAllocator32Iteration) { } TEST(SanitizerCommon, LargeMmapAllocatorIteration) { - LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a; + LargeMmapAllocator<NoOpMapUnmapCallback> a; a.Init(); AllocatorStats stats; stats.Init(); @@ -920,7 +915,7 @@ TEST(SanitizerCommon, LargeMmapAllocatorIteration) { } TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) { - LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a; + LargeMmapAllocator<NoOpMapUnmapCallback> a; a.Init(); AllocatorStats stats; stats.Init(); @@ -1294,7 +1289,7 @@ TEST(SanitizerCommon, TwoLevelByteMap) { const u64 kSize1 = 1 << 6, kSize2 = 1 << 12; const u64 n = kSize1 * kSize2; TwoLevelByteMap<kSize1, kSize2> m; - m.TestOnlyInit(); + m.Init(); for (u64 i = 0; i < n; i += 7) { m.set(i, (i % 100) + 1); } @@ -1329,7 +1324,7 @@ void *TwoLevelByteMapUserThread(void *param) { TEST(SanitizerCommon, ThreadedTwoLevelByteMap) { TestByteMap m; - m.TestOnlyInit(); + m.Init(); TestMapUnmapCallback::map_count = 0; TestMapUnmapCallback::unmap_count = 0; static const int kNumThreads = 4; diff --git a/lib/sanitizer_common/tests/sanitizer_common_test.cc b/lib/sanitizer_common/tests/sanitizer_common_test.cc index 576649cea359..0177484a55a8 100644 --- a/lib/sanitizer_common/tests/sanitizer_common_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_common_test.cc @@ -39,37 +39,37 @@ TEST(SanitizerCommon, SortTest) { for (uptr i = 0; i < n; i++) { array[i] = i; } - SortArray(array, n); + Sort(array, n); EXPECT_TRUE(IsSorted(array, n)); // Reverse order. for (uptr i = 0; i < n; i++) { array[i] = n - 1 - i; } - SortArray(array, n); + Sort(array, n); EXPECT_TRUE(IsSorted(array, n)); // Mixed order. for (uptr i = 0; i < n; i++) { array[i] = (i % 2 == 0) ? i : n - 1 - i; } - SortArray(array, n); + Sort(array, n); EXPECT_TRUE(IsSorted(array, n)); // All equal. for (uptr i = 0; i < n; i++) { array[i] = 42; } - SortArray(array, n); + Sort(array, n); EXPECT_TRUE(IsSorted(array, n)); // All but one sorted. for (uptr i = 0; i < n - 1; i++) { array[i] = i; } array[n - 1] = 42; - SortArray(array, n); + Sort(array, n); EXPECT_TRUE(IsSorted(array, n)); // Minimal case - sort three elements. array[0] = 1; array[1] = 0; - SortArray(array, 2); + Sort(array, 2); EXPECT_TRUE(IsSorted(array, 2)); } @@ -88,25 +88,37 @@ TEST(SanitizerCommon, MmapAlignedOrDieOnFatalError) { } } -#if SANITIZER_LINUX -TEST(SanitizerCommon, SanitizerSetThreadName) { - const char *names[] = { - "0123456789012", - "01234567890123", - "012345678901234", // Larger names will be truncated on linux. - }; +TEST(SanitizerCommon, InternalMmapVectorRoundUpCapacity) { + InternalMmapVector<uptr> v; + v.reserve(1); + CHECK_EQ(v.capacity(), GetPageSizeCached() / sizeof(uptr)); +} - for (size_t i = 0; i < ARRAY_SIZE(names); i++) { - EXPECT_TRUE(SanitizerSetThreadName(names[i])); - char buff[100]; - EXPECT_TRUE(SanitizerGetThreadName(buff, sizeof(buff) - 1)); - EXPECT_EQ(0, internal_strcmp(buff, names[i])); - } +TEST(SanitizerCommon, InternalMmapVectorReize) { + InternalMmapVector<uptr> v; + CHECK_EQ(0U, v.size()); + CHECK_GE(v.capacity(), v.size()); + + v.reserve(1000); + CHECK_EQ(0U, v.size()); + CHECK_GE(v.capacity(), 1000U); + + v.resize(10000); + CHECK_EQ(10000U, v.size()); + CHECK_GE(v.capacity(), v.size()); + uptr cap = v.capacity(); + + v.resize(100); + CHECK_EQ(100U, v.size()); + CHECK_EQ(v.capacity(), cap); + + v.reserve(10); + CHECK_EQ(100U, v.size()); + CHECK_EQ(v.capacity(), cap); } -#endif TEST(SanitizerCommon, InternalMmapVector) { - InternalMmapVector<uptr> vector(1); + InternalMmapVector<uptr> vector; for (uptr i = 0; i < 100; i++) { EXPECT_EQ(i, vector.size()); vector.push_back(i); @@ -119,11 +131,52 @@ TEST(SanitizerCommon, InternalMmapVector) { vector.pop_back(); EXPECT_EQ((uptr)i, vector.size()); } - InternalMmapVector<uptr> empty_vector(0); + InternalMmapVector<uptr> empty_vector; CHECK_GT(empty_vector.capacity(), 0U); CHECK_EQ(0U, empty_vector.size()); } +TEST(SanitizerCommon, InternalMmapVectorEq) { + InternalMmapVector<uptr> vector1; + InternalMmapVector<uptr> vector2; + for (uptr i = 0; i < 100; i++) { + vector1.push_back(i); + vector2.push_back(i); + } + EXPECT_TRUE(vector1 == vector2); + EXPECT_FALSE(vector1 != vector2); + + vector1.push_back(1); + EXPECT_FALSE(vector1 == vector2); + EXPECT_TRUE(vector1 != vector2); + + vector2.push_back(1); + EXPECT_TRUE(vector1 == vector2); + EXPECT_FALSE(vector1 != vector2); + + vector1[55] = 1; + EXPECT_FALSE(vector1 == vector2); + EXPECT_TRUE(vector1 != vector2); +} + +TEST(SanitizerCommon, InternalMmapVectorSwap) { + InternalMmapVector<uptr> vector1; + InternalMmapVector<uptr> vector2; + InternalMmapVector<uptr> vector3; + InternalMmapVector<uptr> vector4; + for (uptr i = 0; i < 100; i++) { + vector1.push_back(i); + vector2.push_back(i); + vector3.push_back(-i); + vector4.push_back(-i); + } + EXPECT_NE(vector2, vector3); + EXPECT_NE(vector1, vector4); + vector1.swap(vector3); + EXPECT_EQ(vector2, vector3); + EXPECT_EQ(vector1, vector4); +} + void TestThreadInfo(bool main) { uptr stk_addr = 0; uptr stk_size = 0; diff --git a/lib/sanitizer_common/tests/sanitizer_libc_test.cc b/lib/sanitizer_common/tests/sanitizer_libc_test.cc index a73c65a510c2..2f61601cdff9 100644 --- a/lib/sanitizer_common/tests/sanitizer_libc_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_libc_test.cc @@ -9,6 +9,7 @@ // Tests for sanitizer_libc.h. //===----------------------------------------------------------------------===// #include <algorithm> +#include <fstream> #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_file.h" @@ -82,8 +83,18 @@ static void temp_file_name(char *buf, size_t bufsize, const char *prefix) { // on Android already. tmpdir = GetEnv("EXTERNAL_STORAGE"); #endif - u32 uid = GetUid(); - internal_snprintf(buf, bufsize, "%s/%s%d", tmpdir, prefix, uid); + internal_snprintf(buf, bufsize, "%s/%sXXXXXX", tmpdir, prefix); + ASSERT_TRUE(mkstemp(buf)); +#endif +} + +static void Unlink(const char *path) { +#if SANITIZER_WINDOWS + // No sanitizer needs to delete a file on Windows yet. If we ever do, we can + // add a portable wrapper and test it from here. + ::DeleteFileA(&path[0]); +#else + internal_unlink(path); #endif } @@ -97,6 +108,14 @@ TEST(SanitizerCommon, FileOps) { temp_file_name(tmpfile, sizeof(tmpfile), "sanitizer_common.fileops.tmp."); fd_t fd = OpenFile(tmpfile, WrOnly); ASSERT_NE(fd, kInvalidFd); + ASSERT_TRUE(WriteToFile(fd, "A", 1)); + CloseFile(fd); + + fd = OpenFile(tmpfile, WrOnly); + ASSERT_NE(fd, kInvalidFd); +#if SANITIZER_POSIX && !SANITIZER_MAC + EXPECT_EQ(internal_lseek(fd, 0, SEEK_END), 0u); +#endif uptr bytes_written = 0; EXPECT_TRUE(WriteToFile(fd, str1, len1, &bytes_written)); EXPECT_EQ(len1, bytes_written); @@ -142,15 +161,67 @@ TEST(SanitizerCommon, FileOps) { EXPECT_EQ(0, internal_memcmp(buf, str2, len2)); CloseFile(fd); -#if SANITIZER_WINDOWS - // No sanitizer needs to delete a file on Windows yet. If we ever do, we can - // add a portable wrapper and test it from here. - ::DeleteFileA(&tmpfile[0]); -#else - internal_unlink(tmpfile); -#endif + Unlink(tmpfile); } +class SanitizerCommonFileTest : public ::testing::TestWithParam<uptr> { + void SetUp() override { + data_.resize(GetParam()); + std::generate(data_.begin(), data_.end(), [] { + return rand() % 256; // NOLINT + }); + + temp_file_name(file_name_, sizeof(file_name_), + "sanitizer_common.ReadFile.tmp."); + + std::ofstream f(file_name_, std::ios::out | std::ios::binary); + if (!data_.empty()) + f.write(data_.data(), data_.size()); + } + + void TearDown() override { Unlink(file_name_); } + + protected: + char file_name_[256]; + std::vector<char> data_; +}; + +TEST_P(SanitizerCommonFileTest, ReadFileToBuffer) { + char *buff; + uptr size; + uptr len; + EXPECT_TRUE(ReadFileToBuffer(file_name_, &buff, &len, &size)); + EXPECT_EQ(data_, std::vector<char>(buff, buff + size)); + UnmapOrDie(buff, len); +} + +TEST_P(SanitizerCommonFileTest, ReadFileToBufferHalf) { + char *buff; + uptr size; + uptr len; + data_.resize(data_.size() / 2); + EXPECT_TRUE(ReadFileToBuffer(file_name_, &buff, &len, &size, data_.size())); + EXPECT_EQ(data_, std::vector<char>(buff, buff + size)); + UnmapOrDie(buff, len); +} + +TEST_P(SanitizerCommonFileTest, ReadFileToVector) { + InternalMmapVector<char> buff; + EXPECT_TRUE(ReadFileToVector(file_name_, &buff)); + EXPECT_EQ(data_, std::vector<char>(buff.begin(), buff.end())); +} + +TEST_P(SanitizerCommonFileTest, ReadFileToVectorHalf) { + InternalMmapVector<char> buff; + data_.resize(data_.size() / 2); + EXPECT_TRUE(ReadFileToVector(file_name_, &buff, data_.size())); + EXPECT_EQ(data_, std::vector<char>(buff.begin(), buff.end())); +} + +INSTANTIATE_TEST_CASE_P(FileSizes, SanitizerCommonFileTest, + ::testing::Values(0, 1, 7, 13, 32, 4096, 4097, 1048575, + 1048576, 1048577)); + static const size_t kStrlcpyBufSize = 8; void test_internal_strlcpy(char *dbuf, const char *sbuf) { uptr retval = 0; diff --git a/lib/sanitizer_common/tests/sanitizer_linux_test.cc b/lib/sanitizer_common/tests/sanitizer_linux_test.cc index 8a6afab65adb..fbac9cc1438e 100644 --- a/lib/sanitizer_common/tests/sanitizer_linux_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_linux_test.cc @@ -45,7 +45,7 @@ struct TidReporterArgument { pthread_cond_destroy(&tid_reported_cond); } - pid_t reported_tid; + tid_t reported_tid; // For signaling to spawned threads that they should terminate. pthread_cond_t terminate_thread_cond; pthread_mutex_t terminate_thread_mutex; @@ -64,7 +64,7 @@ class ThreadListerTest : public ::testing::Test { protected: virtual void SetUp() { pthread_t pthread_id; - pid_t tid; + tid_t tid; for (uptr i = 0; i < kThreadCount; i++) { SpawnTidReporter(&pthread_id, &tid); pthread_ids_.push_back(pthread_id); @@ -81,12 +81,12 @@ class ThreadListerTest : public ::testing::Test { pthread_join(pthread_ids_[i], NULL); } - void SpawnTidReporter(pthread_t *pthread_id, pid_t *tid); + void SpawnTidReporter(pthread_t *pthread_id, tid_t *tid); static const uptr kThreadCount = 20; std::vector<pthread_t> pthread_ids_; - std::vector<pid_t> tids_; + std::vector<tid_t> tids_; TidReporterArgument thread_arg; }; @@ -107,72 +107,66 @@ void *TidReporterThread(void *argument) { return NULL; } -void ThreadListerTest::SpawnTidReporter(pthread_t *pthread_id, - pid_t *tid) { +void ThreadListerTest::SpawnTidReporter(pthread_t *pthread_id, tid_t *tid) { pthread_mutex_lock(&thread_arg.tid_reported_mutex); thread_arg.reported_tid = -1; ASSERT_EQ(0, pthread_create(pthread_id, NULL, TidReporterThread, &thread_arg)); - while (thread_arg.reported_tid == -1) + while (thread_arg.reported_tid == (tid_t)(-1)) pthread_cond_wait(&thread_arg.tid_reported_cond, &thread_arg.tid_reported_mutex); pthread_mutex_unlock(&thread_arg.tid_reported_mutex); *tid = thread_arg.reported_tid; } -static std::vector<pid_t> ReadTidsToVector(ThreadLister *thread_lister) { - std::vector<pid_t> listed_tids; - pid_t tid; - while ((tid = thread_lister->GetNextTID()) >= 0) - listed_tids.push_back(tid); - EXPECT_FALSE(thread_lister->error()); - return listed_tids; +static std::vector<tid_t> ReadTidsToVector(ThreadLister *thread_lister) { + std::vector<tid_t> listed_tids; + InternalMmapVector<tid_t> threads(128); + EXPECT_TRUE(thread_lister->ListThreads(&threads)); + return std::vector<tid_t>(threads.begin(), threads.end()); } -static bool Includes(std::vector<pid_t> first, std::vector<pid_t> second) { +static bool Includes(std::vector<tid_t> first, std::vector<tid_t> second) { std::sort(first.begin(), first.end()); std::sort(second.begin(), second.end()); return std::includes(first.begin(), first.end(), second.begin(), second.end()); } -static bool HasElement(std::vector<pid_t> vector, pid_t element) { +static bool HasElement(const std::vector<tid_t> &vector, tid_t element) { return std::find(vector.begin(), vector.end(), element) != vector.end(); } // ThreadLister's output should include the current thread's TID and the TID of // every thread we spawned. TEST_F(ThreadListerTest, ThreadListerSeesAllSpawnedThreads) { - pid_t self_tid = GetTid(); + tid_t self_tid = GetTid(); ThreadLister thread_lister(getpid()); - std::vector<pid_t> listed_tids = ReadTidsToVector(&thread_lister); + std::vector<tid_t> listed_tids = ReadTidsToVector(&thread_lister); ASSERT_TRUE(HasElement(listed_tids, self_tid)); ASSERT_TRUE(Includes(listed_tids, tids_)); } -// Calling Reset() should not cause ThreadLister to forget any threads it's -// supposed to know about. -TEST_F(ThreadListerTest, ResetDoesNotForgetThreads) { +TEST_F(ThreadListerTest, DoNotForgetThreads) { ThreadLister thread_lister(getpid()); - // Run the loop body twice, because Reset() might behave differently if called - // on a freshly created object. + // Run the loop body twice, because ThreadLister might behave differently if + // called on a freshly created object. for (uptr i = 0; i < 2; i++) { - thread_lister.Reset(); - std::vector<pid_t> listed_tids = ReadTidsToVector(&thread_lister); + std::vector<tid_t> listed_tids = ReadTidsToVector(&thread_lister); ASSERT_TRUE(Includes(listed_tids, tids_)); } } // If new threads have spawned during ThreadLister object's lifetime, calling -// Reset() should cause ThreadLister to recognize their existence. -TEST_F(ThreadListerTest, ResetMakesNewThreadsKnown) { +// relisting should cause ThreadLister to recognize their existence. +TEST_F(ThreadListerTest, NewThreads) { ThreadLister thread_lister(getpid()); - std::vector<pid_t> threads_before_extra = ReadTidsToVector(&thread_lister); + std::vector<tid_t> threads_before_extra = ReadTidsToVector(&thread_lister); pthread_t extra_pthread_id; - pid_t extra_tid; + tid_t extra_tid; SpawnTidReporter(&extra_pthread_id, &extra_tid); // Register the new thread so it gets terminated in TearDown(). pthread_ids_.push_back(extra_pthread_id); @@ -182,9 +176,7 @@ TEST_F(ThreadListerTest, ResetMakesNewThreadsKnown) { // so better check for that. ASSERT_FALSE(HasElement(threads_before_extra, extra_tid)); - thread_lister.Reset(); - - std::vector<pid_t> threads_after_extra = ReadTidsToVector(&thread_lister); + std::vector<tid_t> threads_after_extra = ReadTidsToVector(&thread_lister); ASSERT_TRUE(HasElement(threads_after_extra, extra_tid)); } diff --git a/lib/sanitizer_common/tests/sanitizer_printf_test.cc b/lib/sanitizer_common/tests/sanitizer_printf_test.cc index 5a77b470caf5..75fe6667864f 100644 --- a/lib/sanitizer_common/tests/sanitizer_printf_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_printf_test.cc @@ -148,6 +148,14 @@ TEST(Printf, Precision) { len = internal_snprintf(buf, sizeof(buf), "%.*s", 6, "12345"); EXPECT_EQ(5U, len); EXPECT_STREQ("12345", buf); + len = internal_snprintf(buf, sizeof(buf), "%-6s", "12345"); + EXPECT_EQ(6U, len); + EXPECT_STREQ("12345 ", buf); + // Check that width does not overflow the smaller buffer, although + // 10 chars is requested, it stops at the buffer size, 8. + len = internal_snprintf(buf, 8, "%-10s", "12345"); + EXPECT_EQ(10U, len); // The required size reported. + EXPECT_STREQ("12345 ", buf); } } // namespace __sanitizer diff --git a/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc b/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc index 4ac55c706d6c..22052d9a4f59 100644 --- a/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc @@ -37,7 +37,8 @@ TEST(MemoryMappingLayout, DumpListOfModules) { const char *binary_name = last_slash ? last_slash + 1 : argv0; MemoryMappingLayout memory_mapping(false); const uptr kMaxModules = 100; - InternalMmapVector<LoadedModule> modules(kMaxModules); + InternalMmapVector<LoadedModule> modules; + modules.reserve(kMaxModules); memory_mapping.DumpListOfModules(&modules); EXPECT_GT(modules.size(), 0U); bool found = false; @@ -56,7 +57,8 @@ TEST(MemoryMapping, LoadedModuleArchAndUUID) { if (SANITIZER_MAC) { MemoryMappingLayout memory_mapping(false); const uptr kMaxModules = 100; - InternalMmapVector<LoadedModule> modules(kMaxModules); + InternalMmapVector<LoadedModule> modules; + modules.reserve(kMaxModules); memory_mapping.DumpListOfModules(&modules); for (uptr i = 0; i < modules.size(); ++i) { ModuleArch arch = modules[i].arch(); diff --git a/lib/sanitizer_common/tests/sanitizer_test_utils.h b/lib/sanitizer_common/tests/sanitizer_test_utils.h index f8821a15d9b9..5c1f8ad48864 100644 --- a/lib/sanitizer_common/tests/sanitizer_test_utils.h +++ b/lib/sanitizer_common/tests/sanitizer_test_utils.h @@ -104,10 +104,16 @@ static inline uint32_t my_rand() { #if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__ANDROID__) && \ !defined(__NetBSD__) && !defined(_WIN32) # define SANITIZER_TEST_HAS_MEMALIGN 1 +#else +# define SANITIZER_TEST_HAS_MEMALIGN 0 +#endif + +#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__ANDROID__) && \ + !defined(__NetBSD__) && !defined(_WIN32) && \ + !(defined(__sun__) && defined(__svr4__)) # define SANITIZER_TEST_HAS_PVALLOC 1 # define SANITIZER_TEST_HAS_MALLOC_USABLE_SIZE 1 #else -# define SANITIZER_TEST_HAS_MEMALIGN 0 # define SANITIZER_TEST_HAS_PVALLOC 0 # define SANITIZER_TEST_HAS_MALLOC_USABLE_SIZE 0 #endif diff --git a/lib/sanitizer_common/tests/sanitizer_vector_test.cc b/lib/sanitizer_common/tests/sanitizer_vector_test.cc index 33ed14e190c5..59fbf39685bd 100644 --- a/lib/sanitizer_common/tests/sanitizer_vector_test.cc +++ b/lib/sanitizer_common/tests/sanitizer_vector_test.cc @@ -17,12 +17,12 @@ namespace __sanitizer { TEST(Vector, Basic) { Vector<int> v; - EXPECT_EQ(v.Size(), (uptr)0); + EXPECT_EQ(v.Size(), 0u); v.PushBack(42); - EXPECT_EQ(v.Size(), (uptr)1); + EXPECT_EQ(v.Size(), 1u); EXPECT_EQ(v[0], 42); v.PushBack(43); - EXPECT_EQ(v.Size(), (uptr)2); + EXPECT_EQ(v.Size(), 2u); EXPECT_EQ(v[0], 42); EXPECT_EQ(v[1], 43); } @@ -31,7 +31,7 @@ TEST(Vector, Stride) { Vector<int> v; for (int i = 0; i < 1000; i++) { v.PushBack(i); - EXPECT_EQ(v.Size(), (uptr)(i + 1)); + EXPECT_EQ(v.Size(), i + 1u); EXPECT_EQ(v[i], i); } for (int i = 0; i < 1000; i++) { @@ -39,4 +39,13 @@ TEST(Vector, Stride) { } } +TEST(Vector, ResizeReduction) { + Vector<int> v; + v.PushBack(0); + v.PushBack(0); + EXPECT_EQ(v.Size(), 2u); + v.Resize(1); + EXPECT_EQ(v.Size(), 1u); +} + } // namespace __sanitizer diff --git a/lib/scudo/CMakeLists.txt b/lib/scudo/CMakeLists.txt index 4d26a3477feb..0646c3dd4f53 100644 --- a/lib/scudo/CMakeLists.txt +++ b/lib/scudo/CMakeLists.txt @@ -7,11 +7,41 @@ set(SCUDO_CFLAGS ${SANITIZER_COMMON_CFLAGS}) list(APPEND SCUDO_CFLAGS -fbuiltin) append_rtti_flag(OFF SCUDO_CFLAGS) +set(SCUDO_MINIMAL_DYNAMIC_LIBS ${SANITIZER_COMMON_LINK_LIBS}) +append_list_if(COMPILER_RT_HAS_LIBDL dl SCUDO_MINIMAL_DYNAMIC_LIBS) +append_list_if(COMPILER_RT_HAS_LIBRT rt SCUDO_MINIMAL_DYNAMIC_LIBS) +append_list_if(COMPILER_RT_HAS_LIBPTHREAD pthread SCUDO_MINIMAL_DYNAMIC_LIBS) +append_list_if(COMPILER_RT_HAS_LIBLOG log SCUDO_MINIMAL_DYNAMIC_LIBS) + +set(SCUDO_DYNAMIC_LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS}) +# Use gc-sections by default to avoid unused code being pulled in. +list(APPEND SCUDO_DYNAMIC_LINK_FLAGS -Wl,--gc-sections) + +# The minimal Scudo runtime does not inlude the UBSan runtime. +set(SCUDO_MINIMAL_OBJECT_LIBS + RTSanitizerCommonNoTermination + RTSanitizerCommonLibc + RTInterception) +set(SCUDO_OBJECT_LIBS ${SCUDO_MINIMAL_OBJECT_LIBS}) +set(SCUDO_DYNAMIC_LIBS ${SCUDO_MINIMAL_DYNAMIC_LIBS}) + +if (FUCHSIA) + list(APPEND SCUDO_CFLAGS -nostdinc++) + list(APPEND SCUDO_DYNAMIC_LINK_FLAGS -nostdlib++) +else() + list(APPEND SCUDO_DYNAMIC_LIBS ${SANITIZER_CXX_ABI_LIBRARY}) + list(APPEND SCUDO_OBJECT_LIBS + RTSanitizerCommonCoverage + RTSanitizerCommonSymbolizer + RTUbsan) +endif() + set(SCUDO_SOURCES scudo_allocator.cpp - scudo_flags.cpp scudo_crc32.cpp - scudo_interceptors.cpp + scudo_errors.cpp + scudo_flags.cpp + scudo_malloc.cpp scudo_termination.cpp scudo_tsd_exclusive.cpp scudo_tsd_shared.cpp @@ -20,6 +50,21 @@ set(SCUDO_SOURCES set(SCUDO_CXX_SOURCES scudo_new_delete.cpp) +set(SCUDO_HEADERS + scudo_allocator.h + scudo_allocator_combined.h + scudo_allocator_secondary.h + scudo_crc32.h + scudo_errors.h + scudo_flags.h + scudo_flags.inc + scudo_interface_internal.h + scudo_platform.h + scudo_tsd.h + scudo_tsd_exclusive.inc + scudo_tsd_shared.inc + scudo_utils.h) + # Enable the SSE 4.2 instruction set for scudo_crc32.cpp, if available. if (COMPILER_RT_HAS_MSSE4_2_FLAG) set_source_files_properties(scudo_crc32.cpp PROPERTIES COMPILE_FLAGS -msse4.2) @@ -32,41 +77,58 @@ if (COMPILER_RT_HAS_MCRC_FLAG) endif() if(COMPILER_RT_HAS_SCUDO) - set(SCUDO_DYNAMIC_LIBS ${SANITIZER_COMMON_LINK_LIBS}) - append_list_if(COMPILER_RT_HAS_LIBDL dl SCUDO_DYNAMIC_LIBS) - append_list_if(COMPILER_RT_HAS_LIBRT rt SCUDO_DYNAMIC_LIBS) - append_list_if(COMPILER_RT_HAS_LIBPTHREAD pthread SCUDO_DYNAMIC_LIBS) - append_list_if(COMPILER_RT_HAS_LIBLOG log SCUDO_DYNAMIC_LIBS) + add_compiler_rt_runtime(clang_rt.scudo_minimal + STATIC + ARCHS ${SCUDO_SUPPORTED_ARCH} + SOURCES ${SCUDO_SOURCES} + ADDITIONAL_HEADERS ${SCUDO_HEADERS} + OBJECT_LIBS ${SCUDO_MINIMAL_OBJECT_LIBS} + CFLAGS ${SCUDO_CFLAGS} + PARENT_TARGET scudo) + add_compiler_rt_runtime(clang_rt.scudo_cxx_minimal + STATIC + ARCHS ${SCUDO_SUPPORTED_ARCH} + SOURCES ${SCUDO_CXX_SOURCES} + ADDITIONAL_HEADERS ${SCUDO_HEADERS} + CFLAGS ${SCUDO_CFLAGS} + PARENT_TARGET scudo) add_compiler_rt_runtime(clang_rt.scudo STATIC ARCHS ${SCUDO_SUPPORTED_ARCH} SOURCES ${SCUDO_SOURCES} - OBJECT_LIBS RTSanitizerCommonNoTermination - RTSanitizerCommonLibc - RTInterception - RTUbsan + ADDITIONAL_HEADERS ${SCUDO_HEADERS} + OBJECT_LIBS ${SCUDO_OBJECT_LIBS} CFLAGS ${SCUDO_CFLAGS} PARENT_TARGET scudo) - add_compiler_rt_runtime(clang_rt.scudo_cxx STATIC ARCHS ${SCUDO_SUPPORTED_ARCH} SOURCES ${SCUDO_CXX_SOURCES} + ADDITIONAL_HEADERS ${SCUDO_HEADERS} OBJECT_LIBS RTUbsan_cxx CFLAGS ${SCUDO_CFLAGS} PARENT_TARGET scudo) + add_compiler_rt_runtime(clang_rt.scudo_minimal + SHARED + ARCHS ${SCUDO_SUPPORTED_ARCH} + SOURCES ${SCUDO_SOURCES} ${SCUDO_CXX_SOURCES} + ADDITIONAL_HEADERS ${SCUDO_HEADERS} + OBJECT_LIBS ${SCUDO_MINIMAL_OBJECT_LIBS} + CFLAGS ${SCUDO_CFLAGS} + LINK_FLAGS ${SCUDO_DYNAMIC_LINK_FLAGS} + LINK_LIBS ${SCUDO_MINIMAL_DYNAMIC_LIBS} + PARENT_TARGET scudo) + add_compiler_rt_runtime(clang_rt.scudo SHARED ARCHS ${SCUDO_SUPPORTED_ARCH} SOURCES ${SCUDO_SOURCES} ${SCUDO_CXX_SOURCES} - OBJECT_LIBS RTSanitizerCommonNoTermination - RTSanitizerCommonLibc - RTInterception - RTUbsan - RTUbsan_cxx + ADDITIONAL_HEADERS ${SCUDO_HEADERS} + OBJECT_LIBS ${SCUDO_OBJECT_LIBS} CFLAGS ${SCUDO_CFLAGS} + LINK_FLAGS ${SCUDO_DYNAMIC_LINK_FLAGS} LINK_LIBS ${SCUDO_DYNAMIC_LIBS} PARENT_TARGET scudo) endif() diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp index e5a4d714c66e..4a11bf5fcc21 100644 --- a/lib/scudo/scudo_allocator.cpp +++ b/lib/scudo/scudo_allocator.cpp @@ -16,7 +16,9 @@ #include "scudo_allocator.h" #include "scudo_crc32.h" +#include "scudo_errors.h" #include "scudo_flags.h" +#include "scudo_interface_internal.h" #include "scudo_tsd.h" #include "scudo_utils.h" @@ -60,40 +62,49 @@ INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) { #endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) } -static ScudoBackendAllocator &getBackendAllocator(); +static BackendT &getBackend(); namespace Chunk { - // We can't use the offset member of the chunk itself, as we would double - // fetch it without any warranty that it wouldn't have been tampered. To - // prevent this, we work with a local copy of the header. - static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) { - return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) - - AlignedChunkHeaderSize - - (Header->Offset << MinAlignmentLog)); - } - static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) { return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) - - AlignedChunkHeaderSize); + getHeaderSize()); } static INLINE const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) { return reinterpret_cast<const AtomicPackedHeader *>( - reinterpret_cast<uptr>(Ptr) - AlignedChunkHeaderSize); + reinterpret_cast<uptr>(Ptr) - getHeaderSize()); } static INLINE bool isAligned(const void *Ptr) { return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment); } + // We can't use the offset member of the chunk itself, as we would double + // fetch it without any warranty that it wouldn't have been tampered. To + // prevent this, we work with a local copy of the header. + static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) { + return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) - + getHeaderSize() - (Header->Offset << MinAlignmentLog)); + } + // Returns the usable size for a chunk, meaning the amount of bytes from the // beginning of the user data to the end of the backend allocated chunk. static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) { - const uptr Size = getBackendAllocator().getActuallyAllocatedSize( - getBackendPtr(Ptr, Header), Header->ClassId); - if (Size == 0) - return 0; - return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog); + const uptr ClassId = Header->ClassId; + if (ClassId) + return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() - + (Header->Offset << MinAlignmentLog); + return SecondaryT::GetActuallyAllocatedSize( + getBackendPtr(Ptr, Header)) - getHeaderSize(); + } + + // Returns the size the user requested when allocating the chunk. + static INLINE uptr getSize(const void *Ptr, UnpackedHeader *Header) { + const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes; + if (Header->ClassId) + return SizeOrUnusedBytes; + return SecondaryT::GetActuallyAllocatedSize( + getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes; } // Compute the checksum of the chunk pointer and its header. @@ -136,9 +147,8 @@ namespace Chunk { atomic_load_relaxed(getConstAtomicHeader(Ptr)); *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader); if (UNLIKELY(NewUnpackedHeader->Checksum != - computeChecksum(Ptr, NewUnpackedHeader))) { - dieWithMessage("ERROR: corrupted chunk header at address %p\n", Ptr); - } + computeChecksum(Ptr, NewUnpackedHeader))) + dieWithMessage("corrupted chunk header at address %p\n", Ptr); } // Packs and stores the header, computing the checksum in the process. @@ -159,14 +169,13 @@ namespace Chunk { PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader); if (UNLIKELY(!atomic_compare_exchange_strong( getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader, - memory_order_relaxed))) { - dieWithMessage("ERROR: race on chunk header at address %p\n", Ptr); - } + memory_order_relaxed))) + dieWithMessage("race on chunk header at address %p\n", Ptr); } } // namespace Chunk struct QuarantineCallback { - explicit QuarantineCallback(AllocatorCache *Cache) + explicit QuarantineCallback(AllocatorCacheT *Cache) : Cache_(Cache) {} // Chunk recycling function, returns a quarantined chunk to the backend, @@ -174,53 +183,48 @@ struct QuarantineCallback { void Recycle(void *Ptr) { UnpackedHeader Header; Chunk::loadHeader(Ptr, &Header); - if (UNLIKELY(Header.State != ChunkQuarantine)) { - dieWithMessage("ERROR: invalid chunk state when recycling address %p\n", - Ptr); - } + if (UNLIKELY(Header.State != ChunkQuarantine)) + dieWithMessage("invalid chunk state when recycling address %p\n", Ptr); Chunk::eraseHeader(Ptr); void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header); if (Header.ClassId) - getBackendAllocator().deallocatePrimary(Cache_, BackendPtr, - Header.ClassId); + getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId); else - getBackendAllocator().deallocateSecondary(BackendPtr); + getBackend().deallocateSecondary(BackendPtr); } // Internal quarantine allocation and deallocation functions. We first check // that the batches are indeed serviced by the Primary. // TODO(kostyak): figure out the best way to protect the batches. void *Allocate(uptr Size) { - return getBackendAllocator().allocatePrimary(Cache_, BatchClassId); + const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch)); + return getBackend().allocatePrimary(Cache_, BatchClassId); } void Deallocate(void *Ptr) { - getBackendAllocator().deallocatePrimary(Cache_, Ptr, BatchClassId); + const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch)); + getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId); } - AllocatorCache *Cache_; + AllocatorCacheT *Cache_; COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize); - const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch)); }; -typedef Quarantine<QuarantineCallback, void> ScudoQuarantine; -typedef ScudoQuarantine::Cache ScudoQuarantineCache; -COMPILER_CHECK(sizeof(ScudoQuarantineCache) <= +typedef Quarantine<QuarantineCallback, void> QuarantineT; +typedef QuarantineT::Cache QuarantineCacheT; +COMPILER_CHECK(sizeof(QuarantineCacheT) <= sizeof(ScudoTSD::QuarantineCachePlaceHolder)); -ScudoQuarantineCache *getQuarantineCache(ScudoTSD *TSD) { - return reinterpret_cast<ScudoQuarantineCache *>( - TSD->QuarantineCachePlaceHolder); +QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) { + return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder); } -struct ScudoAllocator { +struct Allocator { static const uptr MaxAllowedMallocSize = FIRST_32_SECOND_64(2UL << 30, 1ULL << 40); - typedef ReturnNullOrDieOnFailure FailureHandler; - - ScudoBackendAllocator BackendAllocator; - ScudoQuarantine AllocatorQuarantine; + BackendT Backend; + QuarantineT Quarantine; u32 QuarantineChunksUpToSize; @@ -234,49 +238,16 @@ struct ScudoAllocator { atomic_uint8_t RssLimitExceeded; atomic_uint64_t RssLastCheckedAtNS; - explicit ScudoAllocator(LinkerInitialized) - : AllocatorQuarantine(LINKER_INITIALIZED) {} - - void performSanityChecks() { - // Verify that the header offset field can hold the maximum offset. In the - // case of the Secondary allocator, it takes care of alignment and the - // offset will always be 0. In the case of the Primary, the worst case - // scenario happens in the last size class, when the backend allocation - // would already be aligned on the requested alignment, which would happen - // to be the maximum alignment that would fit in that size class. As a - // result, the maximum offset will be at most the maximum alignment for the - // last size class minus the header size, in multiples of MinAlignment. - UnpackedHeader Header = {}; - const uptr MaxPrimaryAlignment = - 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment); - const uptr MaxOffset = - (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog; - Header.Offset = MaxOffset; - if (Header.Offset != MaxOffset) { - dieWithMessage("ERROR: the maximum possible offset doesn't fit in the " - "header\n"); - } - // Verify that we can fit the maximum size or amount of unused bytes in the - // header. Given that the Secondary fits the allocation to a page, the worst - // case scenario happens in the Primary. It will depend on the second to - // last and last class sizes, as well as the dynamic base for the Primary. - // The following is an over-approximation that works for our needs. - const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1; - Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes; - if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) { - dieWithMessage("ERROR: the maximum possible unused bytes doesn't fit in " - "the header\n"); - } + explicit Allocator(LinkerInitialized) + : Quarantine(LINKER_INITIALIZED) {} - const uptr LargestClassId = SizeClassMap::kLargestClassID; - Header.ClassId = LargestClassId; - if (Header.ClassId != LargestClassId) { - dieWithMessage("ERROR: the largest class ID doesn't fit in the header\n"); - } - } + NOINLINE void performSanityChecks(); void init() { SanitizerToolName = "Scudo"; + PrimaryAllocatorName = "ScudoPrimary"; + SecondaryAllocatorName = "ScudoSecondary"; + initFlags(); performSanityChecks(); @@ -287,10 +258,10 @@ struct ScudoAllocator { atomic_store_relaxed(&HashAlgorithm, CRC32Hardware); SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); - BackendAllocator.init(common_flags()->allocator_release_to_os_interval_ms); + Backend.init(common_flags()->allocator_release_to_os_interval_ms); HardRssLimitMb = common_flags()->hard_rss_limit_mb; SoftRssLimitMb = common_flags()->soft_rss_limit_mb; - AllocatorQuarantine.Init( + Quarantine.Init( static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10, static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10); QuarantineChunksUpToSize = getFlags()->QuarantineChunksUpToSize; @@ -319,62 +290,36 @@ struct ScudoAllocator { return Chunk::isValid(Ptr); } - // Opportunistic RSS limit check. This will update the RSS limit status, if - // it can, every 100ms, otherwise it will just return the current one. - bool isRssLimitExceeded() { - u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS); - const u64 CurrentCheck = MonotonicNanoTime(); - if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL))) - return atomic_load_relaxed(&RssLimitExceeded); - if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck, - CurrentCheck, memory_order_relaxed)) - return atomic_load_relaxed(&RssLimitExceeded); - // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the - // RSS from /proc/self/statm by default. We might want to - // call getrusage directly, even if it's less accurate. - const uptr CurrentRssMb = GetRSS() >> 20; - if (HardRssLimitMb && HardRssLimitMb < CurrentRssMb) { - Report("%s: hard RSS limit exhausted (%zdMb vs %zdMb)\n", - SanitizerToolName, HardRssLimitMb, CurrentRssMb); - DumpProcessMap(); - Die(); - } - if (SoftRssLimitMb) { - if (atomic_load_relaxed(&RssLimitExceeded)) { - if (CurrentRssMb <= SoftRssLimitMb) - atomic_store_relaxed(&RssLimitExceeded, false); - } else { - if (CurrentRssMb > SoftRssLimitMb) { - atomic_store_relaxed(&RssLimitExceeded, true); - Report("%s: soft RSS limit exhausted (%zdMb vs %zdMb)\n", - SanitizerToolName, SoftRssLimitMb, CurrentRssMb); - } - } - } - return atomic_load_relaxed(&RssLimitExceeded); - } + NOINLINE bool isRssLimitExceeded(); // Allocates a chunk. void *allocate(uptr Size, uptr Alignment, AllocType Type, bool ForceZeroContents = false) { initThreadMaybe(); - if (UNLIKELY(Alignment > MaxAlignment)) - return FailureHandler::OnBadRequest(); + if (UNLIKELY(Alignment > MaxAlignment)) { + if (AllocatorMayReturnNull()) + return nullptr; + reportAllocationAlignmentTooBig(Alignment, MaxAlignment); + } if (UNLIKELY(Alignment < MinAlignment)) Alignment = MinAlignment; - if (UNLIKELY(Size >= MaxAllowedMallocSize)) - return FailureHandler::OnBadRequest(); - if (UNLIKELY(Size == 0)) - Size = 1; - uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize; - uptr AlignedSize = (Alignment > MinAlignment) ? - NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize; - if (UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) - return FailureHandler::OnBadRequest(); + const uptr NeededSize = RoundUpTo(Size ? Size : 1, MinAlignment) + + Chunk::getHeaderSize(); + const uptr AlignedSize = (Alignment > MinAlignment) ? + NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize; + if (UNLIKELY(Size >= MaxAllowedMallocSize) || + UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) { + if (AllocatorMayReturnNull()) + return nullptr; + reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize); + } - if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())) - return FailureHandler::OnOOM(); + if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())) { + if (AllocatorMayReturnNull()) + return nullptr; + reportRssLimitExceeded(); + } // Primary and Secondary backed allocations have a different treatment. We // deal with alignment requirements of Primary serviced allocations here, @@ -382,27 +327,32 @@ struct ScudoAllocator { void *BackendPtr; uptr BackendSize; u8 ClassId; - if (PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment)) { + if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) { BackendSize = AlignedSize; ClassId = SizeClassMap::ClassID(BackendSize); - ScudoTSD *TSD = getTSDAndLock(); - BackendPtr = BackendAllocator.allocatePrimary(&TSD->Cache, ClassId); - TSD->unlock(); + bool UnlockRequired; + ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); + BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId); + if (UnlockRequired) + TSD->unlock(); } else { BackendSize = NeededSize; ClassId = 0; - BackendPtr = BackendAllocator.allocateSecondary(BackendSize, Alignment); + BackendPtr = Backend.allocateSecondary(BackendSize, Alignment); + } + if (UNLIKELY(!BackendPtr)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + reportOutOfMemory(Size); } - if (UNLIKELY(!BackendPtr)) - return FailureHandler::OnOOM(); // If requested, we will zero out the entire contents of the returned chunk. if ((ForceZeroContents || ZeroContents) && ClassId) - memset(BackendPtr, 0, - BackendAllocator.getActuallyAllocatedSize(BackendPtr, ClassId)); + memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId)); UnpackedHeader Header = {}; - uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + AlignedChunkHeaderSize; + uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize(); if (UNLIKELY(!IsAligned(UserPtr, Alignment))) { // Since the Secondary takes care of alignment, a non-aligned pointer // means it is from the Primary. It is also the only case where the offset @@ -412,7 +362,7 @@ struct ScudoAllocator { Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog; UserPtr = AlignedUserPtr; } - CHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize); + DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize); Header.State = ChunkAllocated; Header.AllocType = Type; if (ClassId) { @@ -429,7 +379,8 @@ struct ScudoAllocator { } void *Ptr = reinterpret_cast<void *>(UserPtr); Chunk::storeHeader(Ptr, &Header); - // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(Ptr, Size); + if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook) + __sanitizer_malloc_hook(Ptr, Size); return Ptr; } @@ -438,18 +389,20 @@ struct ScudoAllocator { // quarantine chunk size threshold. void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, uptr Size) { - const bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0) || + const bool BypassQuarantine = (Quarantine.GetCacheSize() == 0) || (Size > QuarantineChunksUpToSize); if (BypassQuarantine) { Chunk::eraseHeader(Ptr); void *BackendPtr = Chunk::getBackendPtr(Ptr, Header); if (Header->ClassId) { - ScudoTSD *TSD = getTSDAndLock(); - getBackendAllocator().deallocatePrimary(&TSD->Cache, BackendPtr, - Header->ClassId); - TSD->unlock(); + bool UnlockRequired; + ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); + getBackend().deallocatePrimary(&TSD->Cache, BackendPtr, + Header->ClassId); + if (UnlockRequired) + TSD->unlock(); } else { - getBackendAllocator().deallocateSecondary(BackendPtr); + getBackend().deallocateSecondary(BackendPtr); } } else { // If a small memory amount was allocated with a larger alignment, we want @@ -457,21 +410,23 @@ struct ScudoAllocator { // with tiny chunks, taking a lot of VA memory. This is an approximation // of the usable size, that allows us to not call // GetActuallyAllocatedSize. - uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog); + const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog); UnpackedHeader NewHeader = *Header; NewHeader.State = ChunkQuarantine; Chunk::compareExchangeHeader(Ptr, &NewHeader, Header); - ScudoTSD *TSD = getTSDAndLock(); - AllocatorQuarantine.Put(getQuarantineCache(TSD), - QuarantineCallback(&TSD->Cache), Ptr, - EstimatedSize); - TSD->unlock(); + bool UnlockRequired; + ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); + Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache), + Ptr, EstimatedSize); + if (UnlockRequired) + TSD->unlock(); } } // Deallocates a Chunk, which means either adding it to the quarantine or // directly returning it to the backend if criteria are met. - void deallocate(void *Ptr, uptr DeleteSize, AllocType Type) { + void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment, + AllocType Type) { // For a deallocation, we only ensure minimal initialization, meaning thread // local data will be left uninitialized for now (when using ELF TLS). The // fallback cache will be used instead. This is a workaround for a situation @@ -479,37 +434,32 @@ struct ScudoAllocator { // the TLS destructors, ending up in initialized thread specific data never // being destroyed properly. Any other heap operation will do a full init. initThreadMaybe(/*MinimalInit=*/true); - // if (&__sanitizer_free_hook) __sanitizer_free_hook(Ptr); + if (SCUDO_CAN_USE_HOOKS && &__sanitizer_free_hook) + __sanitizer_free_hook(Ptr); if (UNLIKELY(!Ptr)) return; - if (UNLIKELY(!Chunk::isAligned(Ptr))) { - dieWithMessage("ERROR: attempted to deallocate a chunk not properly " - "aligned at address %p\n", Ptr); - } + if (UNLIKELY(!Chunk::isAligned(Ptr))) + dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr); UnpackedHeader Header; Chunk::loadHeader(Ptr, &Header); - if (UNLIKELY(Header.State != ChunkAllocated)) { - dieWithMessage("ERROR: invalid chunk state when deallocating address " - "%p\n", Ptr); - } + if (UNLIKELY(Header.State != ChunkAllocated)) + dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr); if (DeallocationTypeMismatch) { // The deallocation type has to match the allocation one. if (Header.AllocType != Type) { // With the exception of memalign'd Chunks, that can be still be free'd. - if (Header.AllocType != FromMemalign || Type != FromMalloc) { - dieWithMessage("ERROR: allocation type mismatch when deallocating " - "address %p\n", Ptr); - } + if (Header.AllocType != FromMemalign || Type != FromMalloc) + dieWithMessage("allocation type mismatch when deallocating address " + "%p\n", Ptr); } } - uptr Size = Header.ClassId ? Header.SizeOrUnusedBytes : - Chunk::getUsableSize(Ptr, &Header) - Header.SizeOrUnusedBytes; + const uptr Size = Chunk::getSize(Ptr, &Header); if (DeleteSizeMismatch) { - if (DeleteSize && DeleteSize != Size) { - dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n", + if (DeleteSize && DeleteSize != Size) + dieWithMessage("invalid sized delete when deallocating address %p\n", Ptr); - } } + (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches. quarantineOrDeallocateChunk(Ptr, &Header, Size); } @@ -517,21 +467,18 @@ struct ScudoAllocator { // size still fits in the chunk. void *reallocate(void *OldPtr, uptr NewSize) { initThreadMaybe(); - if (UNLIKELY(!Chunk::isAligned(OldPtr))) { - dieWithMessage("ERROR: attempted to reallocate a chunk not properly " - "aligned at address %p\n", OldPtr); - } + if (UNLIKELY(!Chunk::isAligned(OldPtr))) + dieWithMessage("misaligned address when reallocating address %p\n", + OldPtr); UnpackedHeader OldHeader; Chunk::loadHeader(OldPtr, &OldHeader); - if (UNLIKELY(OldHeader.State != ChunkAllocated)) { - dieWithMessage("ERROR: invalid chunk state when reallocating address " - "%p\n", OldPtr); - } + if (UNLIKELY(OldHeader.State != ChunkAllocated)) + dieWithMessage("invalid chunk state when reallocating address %p\n", + OldPtr); if (DeallocationTypeMismatch) { - if (UNLIKELY(OldHeader.AllocType != FromMalloc)) { - dieWithMessage("ERROR: allocation type mismatch when reallocating " - "address %p\n", OldPtr); - } + if (UNLIKELY(OldHeader.AllocType != FromMalloc)) + dieWithMessage("allocation type mismatch when reallocating address " + "%p\n", OldPtr); } const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader); // The new size still fits in the current chunk, and the size difference @@ -548,7 +495,7 @@ struct ScudoAllocator { // old one. void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc); if (NewPtr) { - uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes : + const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes : UsableSize - OldHeader.SizeOrUnusedBytes; memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize)); quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize); @@ -564,36 +511,36 @@ struct ScudoAllocator { UnpackedHeader Header; Chunk::loadHeader(Ptr, &Header); // Getting the usable size of a chunk only makes sense if it's allocated. - if (UNLIKELY(Header.State != ChunkAllocated)) { - dieWithMessage("ERROR: invalid chunk state when sizing address %p\n", - Ptr); - } + if (UNLIKELY(Header.State != ChunkAllocated)) + dieWithMessage("invalid chunk state when sizing address %p\n", Ptr); return Chunk::getUsableSize(Ptr, &Header); } void *calloc(uptr NMemB, uptr Size) { initThreadMaybe(); - if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))) - return FailureHandler::OnBadRequest(); + if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))) { + if (AllocatorMayReturnNull()) + return nullptr; + reportCallocOverflow(NMemB, Size); + } return allocate(NMemB * Size, MinAlignment, FromMalloc, true); } void commitBack(ScudoTSD *TSD) { - AllocatorQuarantine.Drain(getQuarantineCache(TSD), - QuarantineCallback(&TSD->Cache)); - BackendAllocator.destroyCache(&TSD->Cache); + Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache)); + Backend.destroyCache(&TSD->Cache); } uptr getStats(AllocatorStat StatType) { initThreadMaybe(); uptr stats[AllocatorStatCount]; - BackendAllocator.getStats(stats); + Backend.getStats(stats); return stats[StatType]; } - void *handleBadRequest() { + bool canReturnNull() { initThreadMaybe(); - return FailureHandler::OnBadRequest(); + return AllocatorMayReturnNull(); } void setRssLimit(uptr LimitMb, bool HardLimit) { @@ -603,21 +550,90 @@ struct ScudoAllocator { SoftRssLimitMb = LimitMb; CheckRssLimit = HardRssLimitMb || SoftRssLimitMb; } + + void printStats() { + initThreadMaybe(); + Backend.printStats(); + } }; -static ScudoAllocator Instance(LINKER_INITIALIZED); +NOINLINE void Allocator::performSanityChecks() { + // Verify that the header offset field can hold the maximum offset. In the + // case of the Secondary allocator, it takes care of alignment and the + // offset will always be 0. In the case of the Primary, the worst case + // scenario happens in the last size class, when the backend allocation + // would already be aligned on the requested alignment, which would happen + // to be the maximum alignment that would fit in that size class. As a + // result, the maximum offset will be at most the maximum alignment for the + // last size class minus the header size, in multiples of MinAlignment. + UnpackedHeader Header = {}; + const uptr MaxPrimaryAlignment = + 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment); + const uptr MaxOffset = + (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog; + Header.Offset = MaxOffset; + if (Header.Offset != MaxOffset) + dieWithMessage("maximum possible offset doesn't fit in header\n"); + // Verify that we can fit the maximum size or amount of unused bytes in the + // header. Given that the Secondary fits the allocation to a page, the worst + // case scenario happens in the Primary. It will depend on the second to + // last and last class sizes, as well as the dynamic base for the Primary. + // The following is an over-approximation that works for our needs. + const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1; + Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes; + if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes) + dieWithMessage("maximum possible unused bytes doesn't fit in header\n"); + + const uptr LargestClassId = SizeClassMap::kLargestClassID; + Header.ClassId = LargestClassId; + if (Header.ClassId != LargestClassId) + dieWithMessage("largest class ID doesn't fit in header\n"); +} -static ScudoBackendAllocator &getBackendAllocator() { - return Instance.BackendAllocator; +// Opportunistic RSS limit check. This will update the RSS limit status, if +// it can, every 100ms, otherwise it will just return the current one. +NOINLINE bool Allocator::isRssLimitExceeded() { + u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS); + const u64 CurrentCheck = MonotonicNanoTime(); + if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL))) + return atomic_load_relaxed(&RssLimitExceeded); + if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck, + CurrentCheck, memory_order_relaxed)) + return atomic_load_relaxed(&RssLimitExceeded); + // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the + // RSS from /proc/self/statm by default. We might want to + // call getrusage directly, even if it's less accurate. + const uptr CurrentRssMb = GetRSS() >> 20; + if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb)) + dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n", + HardRssLimitMb, CurrentRssMb); + if (SoftRssLimitMb) { + if (atomic_load_relaxed(&RssLimitExceeded)) { + if (CurrentRssMb <= SoftRssLimitMb) + atomic_store_relaxed(&RssLimitExceeded, false); + } else { + if (CurrentRssMb > SoftRssLimitMb) { + atomic_store_relaxed(&RssLimitExceeded, true); + Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n", + SoftRssLimitMb, CurrentRssMb); + } + } + } + return atomic_load_relaxed(&RssLimitExceeded); +} + +static Allocator Instance(LINKER_INITIALIZED); + +static BackendT &getBackend() { + return Instance.Backend; } void initScudo() { Instance.init(); } -void ScudoTSD::init(bool Shared) { - UnlockRequired = Shared; - getBackendAllocator().initCache(&Cache); +void ScudoTSD::init() { + getBackend().initCache(&Cache); memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); } @@ -625,23 +641,25 @@ void ScudoTSD::commitBack() { Instance.commitBack(this); } -void *scudoMalloc(uptr Size, AllocType Type) { - return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, Type)); -} - -void scudoFree(void *Ptr, AllocType Type) { - Instance.deallocate(Ptr, 0, Type); +void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) { + if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))) { + errno = EINVAL; + if (Instance.canReturnNull()) + return nullptr; + reportAllocationAlignmentNotPowerOfTwo(Alignment); + } + return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type)); } -void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) { - Instance.deallocate(Ptr, Size, Type); +void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) { + Instance.deallocate(Ptr, Size, Alignment, Type); } void *scudoRealloc(void *Ptr, uptr Size) { if (!Ptr) return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc)); if (Size == 0) { - Instance.deallocate(Ptr, 0, FromMalloc); + Instance.deallocate(Ptr, 0, 0, FromMalloc); return nullptr; } return SetErrnoOnNull(Instance.reallocate(Ptr, Size)); @@ -660,24 +678,19 @@ void *scudoPvalloc(uptr Size) { uptr PageSize = GetPageSizeCached(); if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) { errno = ENOMEM; - return Instance.handleBadRequest(); + if (Instance.canReturnNull()) + return nullptr; + reportPvallocOverflow(Size); } // pvalloc(0) should allocate one page. Size = Size ? RoundUpTo(Size, PageSize) : PageSize; return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign)); } -void *scudoMemalign(uptr Alignment, uptr Size) { - if (UNLIKELY(!IsPowerOfTwo(Alignment))) { - errno = EINVAL; - return Instance.handleBadRequest(); - } - return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMemalign)); -} - int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) { if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) { - Instance.handleBadRequest(); + if (!Instance.canReturnNull()) + reportInvalidPosixMemalignAlignment(Alignment); return EINVAL; } void *Ptr = Instance.allocate(Size, Alignment, FromMemalign); @@ -690,7 +703,9 @@ int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) { void *scudoAlignedAlloc(uptr Alignment, uptr Size) { if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) { errno = EINVAL; - return Instance.handleBadRequest(); + if (Instance.canReturnNull()) + return nullptr; + reportInvalidAlignedAllocAlignment(Size, Alignment); } return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc)); } @@ -721,8 +736,8 @@ uptr __sanitizer_get_unmapped_bytes() { return 1; } -uptr __sanitizer_get_estimated_allocated_size(uptr size) { - return size; +uptr __sanitizer_get_estimated_allocated_size(uptr Size) { + return Size; } int __sanitizer_get_ownership(const void *Ptr) { @@ -733,12 +748,26 @@ uptr __sanitizer_get_allocated_size(const void *Ptr) { return Instance.getUsableSize(Ptr); } +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, + void *Ptr, uptr Size) { + (void)Ptr; + (void)Size; +} + +SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr) { + (void)Ptr; +} +#endif + // Interface functions -extern "C" { -void __scudo_set_rss_limit(unsigned long LimitMb, int HardLimit) { // NOLINT +void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) { if (!SCUDO_CAN_USE_PUBLIC_INTERFACE) return; Instance.setRssLimit(LimitMb, !!HardLimit); } -} // extern "C" + +void __scudo_print_stats() { + Instance.printStats(); +} diff --git a/lib/scudo/scudo_allocator.h b/lib/scudo/scudo_allocator.h index a561247def9c..0002b4a44b78 100644 --- a/lib/scudo/scudo_allocator.h +++ b/lib/scudo/scudo_allocator.h @@ -59,9 +59,17 @@ const uptr MaxAlignmentLog = 24; // 16 MB const uptr MinAlignment = 1 << MinAlignmentLog; const uptr MaxAlignment = 1 << MaxAlignmentLog; -const uptr ChunkHeaderSize = sizeof(PackedHeader); -const uptr AlignedChunkHeaderSize = - (ChunkHeaderSize + MinAlignment - 1) & ~(MinAlignment - 1); +// constexpr version of __sanitizer::RoundUp without the extraneous CHECK. +// This way we can use it in constexpr variables and functions declarations. +constexpr uptr RoundUpTo(uptr Size, uptr Boundary) { + return (Size + Boundary - 1) & ~(Boundary - 1); +} + +namespace Chunk { + constexpr uptr getHeaderSize() { + return RoundUpTo(sizeof(PackedHeader), MinAlignment); + } +} #if SANITIZER_CAN_USE_ALLOCATOR64 const uptr AllocatorSpace = ~0ULL; @@ -74,7 +82,7 @@ struct AP64 { static const uptr kFlags = SizeClassAllocator64FlagMasks::kRandomShuffleChunks; }; -typedef SizeClassAllocator64<AP64> PrimaryAllocator; +typedef SizeClassAllocator64<AP64> PrimaryT; #else static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog; # if SANITIZER_WORDSIZE == 32 @@ -94,30 +102,22 @@ struct AP32 { SizeClassAllocator32FlagMasks::kRandomShuffleChunks | SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch; }; -typedef SizeClassAllocator32<AP32> PrimaryAllocator; +typedef SizeClassAllocator32<AP32> PrimaryT; #endif // SANITIZER_CAN_USE_ALLOCATOR64 -// __sanitizer::RoundUp has a CHECK that is extraneous for us. Use our own. -INLINE uptr RoundUpTo(uptr Size, uptr Boundary) { - return (Size + Boundary - 1) & ~(Boundary - 1); -} - #include "scudo_allocator_secondary.h" #include "scudo_allocator_combined.h" -typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; -typedef ScudoLargeMmapAllocator SecondaryAllocator; -typedef ScudoCombinedAllocator<PrimaryAllocator, AllocatorCache, - SecondaryAllocator> ScudoBackendAllocator; +typedef SizeClassAllocatorLocalCache<PrimaryT> AllocatorCacheT; +typedef LargeMmapAllocator SecondaryT; +typedef CombinedAllocator<PrimaryT, AllocatorCacheT, SecondaryT> BackendT; void initScudo(); -void *scudoMalloc(uptr Size, AllocType Type); -void scudoFree(void *Ptr, AllocType Type); -void scudoSizedFree(void *Ptr, uptr Size, AllocType Type); +void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type); +void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type); void *scudoRealloc(void *Ptr, uptr Size); void *scudoCalloc(uptr NMemB, uptr Size); -void *scudoMemalign(uptr Alignment, uptr Size); void *scudoValloc(uptr Size); void *scudoPvalloc(uptr Size); int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size); diff --git a/lib/scudo/scudo_allocator_combined.h b/lib/scudo/scudo_allocator_combined.h index 25e273114c23..6e40660ba9ab 100644 --- a/lib/scudo/scudo_allocator_combined.h +++ b/lib/scudo/scudo_allocator_combined.h @@ -16,12 +16,12 @@ #define SCUDO_ALLOCATOR_COMBINED_H_ #ifndef SCUDO_ALLOCATOR_H_ -#error "This file must be included inside scudo_allocator.h." +# error "This file must be included inside scudo_allocator.h." #endif template <class PrimaryAllocator, class AllocatorCache, class SecondaryAllocator> -class ScudoCombinedAllocator { +class CombinedAllocator { public: void init(s32 ReleaseToOSIntervalMs) { Primary.Init(ReleaseToOSIntervalMs); @@ -49,12 +49,6 @@ class ScudoCombinedAllocator { Secondary.Deallocate(&Stats, Ptr); } - uptr getActuallyAllocatedSize(void *Ptr, uptr ClassId) { - if (ClassId) - return PrimaryAllocator::ClassIdToSize(ClassId); - return Secondary.GetActuallyAllocatedSize(Ptr); - } - void initCache(AllocatorCache *Cache) { Cache->Init(&Stats); } @@ -67,6 +61,11 @@ class ScudoCombinedAllocator { Stats.Get(StatType); } + void printStats() { + Primary.PrintStats(); + Secondary.PrintStats(); + } + private: PrimaryAllocator Primary; SecondaryAllocator Secondary; diff --git a/lib/scudo/scudo_allocator_secondary.h b/lib/scudo/scudo_allocator_secondary.h index f2002ed986c3..ff6246e25883 100644 --- a/lib/scudo/scudo_allocator_secondary.h +++ b/lib/scudo/scudo_allocator_secondary.h @@ -21,120 +21,173 @@ # error "This file must be included inside scudo_allocator.h." #endif -class ScudoLargeMmapAllocator { +// Secondary backed allocations are standalone chunks that contain extra +// information stored in a LargeChunk::Header prior to the frontend's header. +// +// The secondary takes care of alignment requirements (so that it can release +// unnecessary pages in the rare event of larger alignments), and as such must +// know about the frontend's header size. +// +// Since Windows doesn't support partial releasing of a reserved memory region, +// we have to keep track of both the reserved and the committed memory. +// +// The resulting chunk resembles the following: +// +// +--------------------+ +// | Guard page(s) | +// +--------------------+ +// | Unused space* | +// +--------------------+ +// | LargeChunk::Header | +// +--------------------+ +// | {Unp,P}ackedHeader | +// +--------------------+ +// | Data (aligned) | +// +--------------------+ +// | Unused space** | +// +--------------------+ +// | Guard page(s) | +// +--------------------+ + +namespace LargeChunk { + struct Header { + ReservedAddressRange StoredRange; + uptr CommittedSize; + uptr Size; + }; + constexpr uptr getHeaderSize() { + return RoundUpTo(sizeof(Header), MinAlignment); + } + static Header *getHeader(uptr Ptr) { + return reinterpret_cast<Header *>(Ptr - getHeaderSize()); + } + static Header *getHeader(const void *Ptr) { + return getHeader(reinterpret_cast<uptr>(Ptr)); + } +} // namespace LargeChunk + +class LargeMmapAllocator { public: void Init() { - PageSizeCached = GetPageSizeCached(); + internal_memset(this, 0, sizeof(*this)); } void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) { - const uptr UserSize = Size - AlignedChunkHeaderSize; + const uptr UserSize = Size - Chunk::getHeaderSize(); // The Scudo frontend prevents us from allocating more than // MaxAllowedMallocSize, so integer overflow checks would be superfluous. - uptr MapSize = Size + AlignedReservedAddressRangeSize; - if (Alignment > MinAlignment) - MapSize += Alignment; - const uptr PageSize = PageSizeCached; - MapSize = RoundUpTo(MapSize, PageSize); + uptr ReservedSize = Size + LargeChunk::getHeaderSize(); + if (UNLIKELY(Alignment > MinAlignment)) + ReservedSize += Alignment; + const uptr PageSize = GetPageSizeCached(); + ReservedSize = RoundUpTo(ReservedSize, PageSize); // Account for 2 guard pages, one before and one after the chunk. - MapSize += 2 * PageSize; + ReservedSize += 2 * PageSize; ReservedAddressRange AddressRange; - uptr MapBeg = AddressRange.Init(MapSize); - if (MapBeg == ~static_cast<uptr>(0)) - return ReturnNullOrDieOnFailure::OnOOM(); + uptr ReservedBeg = AddressRange.Init(ReservedSize, SecondaryAllocatorName); + if (UNLIKELY(ReservedBeg == ~static_cast<uptr>(0))) + return nullptr; // A page-aligned pointer is assumed after that, so check it now. - CHECK(IsAligned(MapBeg, PageSize)); - uptr MapEnd = MapBeg + MapSize; + DCHECK(IsAligned(ReservedBeg, PageSize)); + uptr ReservedEnd = ReservedBeg + ReservedSize; // The beginning of the user area for that allocation comes after the // initial guard page, and both headers. This is the pointer that has to // abide by alignment requirements. - uptr UserBeg = MapBeg + PageSize + HeadersSize; + uptr CommittedBeg = ReservedBeg + PageSize; + uptr UserBeg = CommittedBeg + HeadersSize; uptr UserEnd = UserBeg + UserSize; + uptr CommittedEnd = RoundUpTo(UserEnd, PageSize); // In the rare event of larger alignments, we will attempt to fit the mmap // area better and unmap extraneous memory. This will also ensure that the // offset and unused bytes field of the header stay small. - if (Alignment > MinAlignment) { + if (UNLIKELY(Alignment > MinAlignment)) { if (!IsAligned(UserBeg, Alignment)) { UserBeg = RoundUpTo(UserBeg, Alignment); - CHECK_GE(UserBeg, MapBeg); - uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) - - PageSize; - CHECK_GE(NewMapBeg, MapBeg); - if (NewMapBeg != MapBeg) { - AddressRange.Unmap(MapBeg, NewMapBeg - MapBeg); - MapBeg = NewMapBeg; + CommittedBeg = RoundDownTo(UserBeg - HeadersSize, PageSize); + const uptr NewReservedBeg = CommittedBeg - PageSize; + DCHECK_GE(NewReservedBeg, ReservedBeg); + if (!SANITIZER_WINDOWS && NewReservedBeg != ReservedBeg) { + AddressRange.Unmap(ReservedBeg, NewReservedBeg - ReservedBeg); + ReservedBeg = NewReservedBeg; } UserEnd = UserBeg + UserSize; + CommittedEnd = RoundUpTo(UserEnd, PageSize); } - uptr NewMapEnd = RoundUpTo(UserEnd, PageSize) + PageSize; - if (NewMapEnd != MapEnd) { - AddressRange.Unmap(NewMapEnd, MapEnd - NewMapEnd); - MapEnd = NewMapEnd; + const uptr NewReservedEnd = CommittedEnd + PageSize; + DCHECK_LE(NewReservedEnd, ReservedEnd); + if (!SANITIZER_WINDOWS && NewReservedEnd != ReservedEnd) { + AddressRange.Unmap(NewReservedEnd, ReservedEnd - NewReservedEnd); + ReservedEnd = NewReservedEnd; } - MapSize = MapEnd - MapBeg; } - CHECK_LE(UserEnd, MapEnd - PageSize); - // Actually mmap the memory, preserving the guard pages on either side - CHECK_EQ(MapBeg + PageSize, - AddressRange.Map(MapBeg + PageSize, MapSize - 2 * PageSize)); - const uptr Ptr = UserBeg - AlignedChunkHeaderSize; - ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr); - *StoredRange = AddressRange; + DCHECK_LE(UserEnd, CommittedEnd); + const uptr CommittedSize = CommittedEnd - CommittedBeg; + // Actually mmap the memory, preserving the guard pages on either sides. + CHECK_EQ(CommittedBeg, AddressRange.Map(CommittedBeg, CommittedSize)); + const uptr Ptr = UserBeg - Chunk::getHeaderSize(); + LargeChunk::Header *H = LargeChunk::getHeader(Ptr); + H->StoredRange = AddressRange; + H->Size = CommittedEnd - Ptr; + H->CommittedSize = CommittedSize; // The primary adds the whole class size to the stats when allocating a // chunk, so we will do something similar here. But we will not account for // the guard pages. { SpinMutexLock l(&StatsMutex); - Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize); - Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize); + Stats->Add(AllocatorStatAllocated, CommittedSize); + Stats->Add(AllocatorStatMapped, CommittedSize); + AllocatedBytes += CommittedSize; + if (LargestSize < CommittedSize) + LargestSize = CommittedSize; + NumberOfAllocs++; } return reinterpret_cast<void *>(Ptr); } void Deallocate(AllocatorStats *Stats, void *Ptr) { + LargeChunk::Header *H = LargeChunk::getHeader(Ptr); // Since we're unmapping the entirety of where the ReservedAddressRange // actually is, copy onto the stack. - const uptr PageSize = PageSizeCached; - ReservedAddressRange AddressRange = *getReservedAddressRange(Ptr); + ReservedAddressRange AddressRange = H->StoredRange; + const uptr Size = H->CommittedSize; { SpinMutexLock l(&StatsMutex); - Stats->Sub(AllocatorStatAllocated, AddressRange.size() - 2 * PageSize); - Stats->Sub(AllocatorStatMapped, AddressRange.size() - 2 * PageSize); + Stats->Sub(AllocatorStatAllocated, Size); + Stats->Sub(AllocatorStatMapped, Size); + FreedBytes += Size; + NumberOfFrees++; } AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()), AddressRange.size()); } - uptr GetActuallyAllocatedSize(void *Ptr) { - ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr); - // Deduct PageSize as ReservedAddressRange size includes the trailing guard - // page. - uptr MapEnd = reinterpret_cast<uptr>(StoredRange->base()) + - StoredRange->size() - PageSizeCached; - return MapEnd - reinterpret_cast<uptr>(Ptr); + static uptr GetActuallyAllocatedSize(void *Ptr) { + return LargeChunk::getHeader(Ptr)->Size; } - private: - ReservedAddressRange *getReservedAddressRange(uptr Ptr) { - return reinterpret_cast<ReservedAddressRange*>( - Ptr - sizeof(ReservedAddressRange)); - } - ReservedAddressRange *getReservedAddressRange(const void *Ptr) { - return getReservedAddressRange(reinterpret_cast<uptr>(Ptr)); + void PrintStats() { + Printf("Stats: LargeMmapAllocator: allocated %zd times (%zd K), " + "freed %zd times (%zd K), remains %zd (%zd K) max %zd M\n", + NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, + FreedBytes >> 10, NumberOfAllocs - NumberOfFrees, + (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20); } - static constexpr uptr AlignedReservedAddressRangeSize = - (sizeof(ReservedAddressRange) + MinAlignment - 1) & ~(MinAlignment - 1); + private: static constexpr uptr HeadersSize = - AlignedReservedAddressRangeSize + AlignedChunkHeaderSize; + LargeChunk::getHeaderSize() + Chunk::getHeaderSize(); - uptr PageSizeCached; - SpinMutex StatsMutex; + StaticSpinMutex StatsMutex; + u32 NumberOfAllocs; + u32 NumberOfFrees; + uptr AllocatedBytes; + uptr FreedBytes; + uptr LargestSize; }; #endif // SCUDO_ALLOCATOR_SECONDARY_H_ diff --git a/lib/scudo/scudo_errors.cpp b/lib/scudo/scudo_errors.cpp new file mode 100644 index 000000000000..d11e03cf9163 --- /dev/null +++ b/lib/scudo/scudo_errors.cpp @@ -0,0 +1,77 @@ +//===-- scudo_errors.cpp ----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Verbose termination functions. +/// +//===----------------------------------------------------------------------===// + +#include "scudo_utils.h" + +#include "sanitizer_common/sanitizer_flags.h" + +namespace __scudo { + +void NORETURN reportCallocOverflow(uptr Count, uptr Size) { + dieWithMessage("calloc parameters overflow: count * size (%zd * %zd) cannot " + "be represented with type size_t\n", Count, Size); +} + +void NORETURN reportPvallocOverflow(uptr Size) { + dieWithMessage("pvalloc parameters overflow: size 0x%zx rounded up to system " + "page size 0x%zx cannot be represented in type size_t\n", Size, + GetPageSizeCached()); +} + +void NORETURN reportAllocationAlignmentTooBig(uptr Alignment, + uptr MaxAlignment) { + dieWithMessage("invalid allocation alignment: %zd exceeds maximum supported " + "allocation of %zd\n", Alignment, MaxAlignment); +} + +void NORETURN reportAllocationAlignmentNotPowerOfTwo(uptr Alignment) { + dieWithMessage("invalid allocation alignment: %zd, alignment must be a power " + "of two\n", Alignment); +} + +void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment) { + dieWithMessage("invalid alignment requested in posix_memalign: %zd, alignment" + " must be a power of two and a multiple of sizeof(void *) == %zd\n", + Alignment, sizeof(void *)); // NOLINT +} + +void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment) { +#if SANITIZER_POSIX + dieWithMessage("invalid alignment requested in aligned_alloc: %zd, alignment " + "must be a power of two and the requested size 0x%zx must be a multiple " + "of alignment\n", Alignment, Size); +#else + dieWithMessage("invalid alignment requested in aligned_alloc: %zd, the " + "requested size 0x%zx must be a multiple of alignment\n", Alignment, + Size); +#endif +} + +void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize, + uptr MaxSize) { + dieWithMessage("requested allocation size 0x%zx (0x%zx after adjustments) " + "exceeds maximum supported size of 0x%zx\n", UserSize, TotalSize, + MaxSize); +} + +void NORETURN reportRssLimitExceeded() { + dieWithMessage("specified RSS limit exceeded, currently set to " + "soft_rss_limit_mb=%zd\n", common_flags()->soft_rss_limit_mb); +} + +void NORETURN reportOutOfMemory(uptr RequestedSize) { + dieWithMessage("allocator is out of memory trying to allocate 0x%zx bytes\n", + RequestedSize); +} + +} // namespace __scudo diff --git a/lib/scudo/scudo_errors.h b/lib/scudo/scudo_errors.h new file mode 100644 index 000000000000..8b1af996be04 --- /dev/null +++ b/lib/scudo/scudo_errors.h @@ -0,0 +1,35 @@ +//===-- scudo_errors.h ------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Header for scudo_errors.cpp. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_ERRORS_H_ +#define SCUDO_ERRORS_H_ + +#include "sanitizer_common/sanitizer_internal_defs.h" + +namespace __scudo { + +void NORETURN reportCallocOverflow(uptr Count, uptr Size); +void NORETURN reportPvallocOverflow(uptr Size); +void NORETURN reportAllocationAlignmentTooBig(uptr Alignment, + uptr MaxAlignment); +void NORETURN reportAllocationAlignmentNotPowerOfTwo(uptr Alignment); +void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment); +void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment); +void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize, + uptr MaxSize); +void NORETURN reportRssLimitExceeded(); +void NORETURN reportOutOfMemory(uptr RequestedSize); + +} // namespace __scudo + +#endif // SCUDO_ERRORS_H_ diff --git a/lib/scudo/scudo_flags.cpp b/lib/scudo/scudo_flags.cpp index 2aff3ef1e8fa..c012471a8368 100644 --- a/lib/scudo/scudo_flags.cpp +++ b/lib/scudo/scudo_flags.cpp @@ -12,13 +12,12 @@ //===----------------------------------------------------------------------===// #include "scudo_flags.h" +#include "scudo_interface_internal.h" #include "scudo_utils.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flag_parser.h" -SANITIZER_INTERFACE_WEAK_DEF(const char*, __scudo_default_options, void); - namespace __scudo { static Flags ScudoFlags; // Use via getFlags(). @@ -36,6 +35,14 @@ static void RegisterScudoFlags(FlagParser *parser, Flags *f) { #undef SCUDO_FLAG } +static const char *getCompileDefinitionScudoDefaultOptions() { +#ifdef SCUDO_DEFAULT_OPTIONS + return SANITIZER_STRINGIFY(SCUDO_DEFAULT_OPTIONS); +#else + return ""; +#endif +} + static const char *getScudoDefaultOptions() { return (&__scudo_default_options) ? __scudo_default_options() : ""; } @@ -55,6 +62,9 @@ void initFlags() { RegisterScudoFlags(&ScudoParser, f); RegisterCommonFlags(&ScudoParser); + // Override from compile definition. + ScudoParser.ParseString(getCompileDefinitionScudoDefaultOptions()); + // Override from user-specified string. ScudoParser.ParseString(getScudoDefaultOptions()); @@ -119,3 +129,9 @@ Flags *getFlags() { } } // namespace __scudo + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +SANITIZER_INTERFACE_WEAK_DEF(const char*, __scudo_default_options, void) { + return ""; +} +#endif diff --git a/lib/scudo/scudo_interceptors.cpp b/lib/scudo/scudo_interceptors.cpp deleted file mode 100644 index 735a13196757..000000000000 --- a/lib/scudo/scudo_interceptors.cpp +++ /dev/null @@ -1,75 +0,0 @@ -//===-- scudo_interceptors.cpp ----------------------------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -/// -/// Linux specific malloc interception functions. -/// -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_platform.h" -#if SANITIZER_LINUX - -#include "scudo_allocator.h" - -#include "interception/interception.h" - -using namespace __scudo; - -INTERCEPTOR(void, free, void *ptr) { - scudoFree(ptr, FromMalloc); -} - -INTERCEPTOR(void, cfree, void *ptr) { - scudoFree(ptr, FromMalloc); -} - -INTERCEPTOR(void*, malloc, uptr size) { - return scudoMalloc(size, FromMalloc); -} - -INTERCEPTOR(void*, realloc, void *ptr, uptr size) { - return scudoRealloc(ptr, size); -} - -INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { - return scudoCalloc(nmemb, size); -} - -INTERCEPTOR(void*, valloc, uptr size) { - return scudoValloc(size); -} - -INTERCEPTOR(void*, memalign, uptr alignment, uptr size) { - return scudoMemalign(alignment, size); -} - -INTERCEPTOR(void*, __libc_memalign, uptr alignment, uptr size) { - return scudoMemalign(alignment, size); -} - -INTERCEPTOR(void*, pvalloc, uptr size) { - return scudoPvalloc(size); -} - -INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) { - return scudoAlignedAlloc(alignment, size); -} - -INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { - return scudoPosixMemalign(memptr, alignment, size); -} - -INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { - return scudoMallocUsableSize(ptr); -} - -INTERCEPTOR(int, mallopt, int cmd, int value) { - return -1; -} - -#endif // SANITIZER_LINUX diff --git a/lib/scudo/scudo_interface_internal.h b/lib/scudo/scudo_interface_internal.h index 3f39e0c4ee0b..3e520a50c83b 100644 --- a/lib/scudo/scudo_interface_internal.h +++ b/lib/scudo/scudo_interface_internal.h @@ -14,9 +14,20 @@ #ifndef SCUDO_INTERFACE_INTERNAL_H_ #define SCUDO_INTERFACE_INTERNAL_H_ +#include "sanitizer_common/sanitizer_internal_defs.h" + +using __sanitizer::uptr; +using __sanitizer::s32; + extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +const char* __scudo_default_options(); + +SANITIZER_INTERFACE_ATTRIBUTE +void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit); + SANITIZER_INTERFACE_ATTRIBUTE -void __scudo_set_rss_limit(unsigned long LimitMb, int HardLimit); // NOLINT +void __scudo_print_stats(); } // extern "C" #endif // SCUDO_INTERFACE_INTERNAL_H_ diff --git a/lib/scudo/scudo_malloc.cpp b/lib/scudo/scudo_malloc.cpp new file mode 100644 index 000000000000..91a77b365823 --- /dev/null +++ b/lib/scudo/scudo_malloc.cpp @@ -0,0 +1,85 @@ +//===-- scudo_malloc.cpp ----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Interceptors for malloc related functions. +/// +//===----------------------------------------------------------------------===// + +#include "scudo_allocator.h" + +#include "interception/interception.h" +#include "sanitizer_common/sanitizer_platform_interceptors.h" + +#include <stddef.h> + +using namespace __scudo; + +extern "C" { +INTERCEPTOR_ATTRIBUTE void free(void *ptr) { + scudoDeallocate(ptr, 0, 0, FromMalloc); +} + +INTERCEPTOR_ATTRIBUTE void *malloc(size_t size) { + return scudoAllocate(size, 0, FromMalloc); +} + +INTERCEPTOR_ATTRIBUTE void *realloc(void *ptr, size_t size) { + return scudoRealloc(ptr, size); +} + +INTERCEPTOR_ATTRIBUTE void *calloc(size_t nmemb, size_t size) { + return scudoCalloc(nmemb, size); +} + +INTERCEPTOR_ATTRIBUTE void *valloc(size_t size) { + return scudoValloc(size); +} + +INTERCEPTOR_ATTRIBUTE +int posix_memalign(void **memptr, size_t alignment, size_t size) { + return scudoPosixMemalign(memptr, alignment, size); +} + +#if SANITIZER_INTERCEPT_CFREE +INTERCEPTOR_ATTRIBUTE void cfree(void *ptr) ALIAS("free"); +#endif + +#if SANITIZER_INTERCEPT_MEMALIGN +INTERCEPTOR_ATTRIBUTE void *memalign(size_t alignment, size_t size) { + return scudoAllocate(size, alignment, FromMemalign); +} + +INTERCEPTOR_ATTRIBUTE +void *__libc_memalign(size_t alignment, size_t size) ALIAS("memalign"); +#endif + +#if SANITIZER_INTERCEPT_PVALLOC +INTERCEPTOR_ATTRIBUTE void *pvalloc(size_t size) { + return scudoPvalloc(size); +} +#endif + +#if SANITIZER_INTERCEPT_ALIGNED_ALLOC +INTERCEPTOR_ATTRIBUTE void *aligned_alloc(size_t alignment, size_t size) { + return scudoAlignedAlloc(alignment, size); +} +#endif + +#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE +INTERCEPTOR_ATTRIBUTE size_t malloc_usable_size(void *ptr) { + return scudoMallocUsableSize(ptr); +} +#endif + +#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO +INTERCEPTOR_ATTRIBUTE int mallopt(int cmd, int value) { + return -1; +} +#endif +} // extern "C" diff --git a/lib/scudo/scudo_new_delete.cpp b/lib/scudo/scudo_new_delete.cpp index c5a1abbed82b..daa3b47dc727 100644 --- a/lib/scudo/scudo_new_delete.cpp +++ b/lib/scudo/scudo_new_delete.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "scudo_allocator.h" +#include "scudo_errors.h" #include "interception/interception.h" @@ -24,51 +25,84 @@ using namespace __scudo; // Fake std::nothrow_t to avoid including <new>. namespace std { struct nothrow_t {}; +enum class align_val_t: size_t {}; } // namespace std // TODO(alekseys): throw std::bad_alloc instead of dying on OOM. +#define OPERATOR_NEW_BODY_ALIGN(Type, Align, NoThrow) \ + void *Ptr = scudoAllocate(size, static_cast<uptr>(Align), Type); \ + if (!NoThrow && UNLIKELY(!Ptr)) reportOutOfMemory(size); \ + return Ptr; +#define OPERATOR_NEW_BODY(Type, NoThrow) \ + OPERATOR_NEW_BODY_ALIGN(Type, 0, NoThrow) + +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size) +{ OPERATOR_NEW_BODY(FromNew, /*NoThrow=*/false); } +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size) +{ OPERATOR_NEW_BODY(FromNewArray, /*NoThrow=*/false); } +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(FromNew, /*NoThrow=*/true); } +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size, std::nothrow_t const&) +{ OPERATOR_NEW_BODY(FromNewArray, /*NoThrow=*/true); } +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size, std::align_val_t align) +{ OPERATOR_NEW_BODY_ALIGN(FromNew, align, /*NoThrow=*/false); } +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size, std::align_val_t align) +{ OPERATOR_NEW_BODY_ALIGN(FromNewArray, align, /*NoThrow=*/false); } +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&) +{ OPERATOR_NEW_BODY_ALIGN(FromNew, align, /*NoThrow=*/true); } CXX_OPERATOR_ATTRIBUTE -void *operator new(size_t size) { - void *res = scudoMalloc(size, FromNew); - if (UNLIKELY(!res)) DieOnFailure::OnOOM(); - return res; -} -CXX_OPERATOR_ATTRIBUTE -void *operator new[](size_t size) { - void *res = scudoMalloc(size, FromNewArray); - if (UNLIKELY(!res)) DieOnFailure::OnOOM(); - return res; -} -CXX_OPERATOR_ATTRIBUTE -void *operator new(size_t size, std::nothrow_t const&) { - return scudoMalloc(size, FromNew); -} -CXX_OPERATOR_ATTRIBUTE -void *operator new[](size_t size, std::nothrow_t const&) { - return scudoMalloc(size, FromNewArray); -} +void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&) +{ OPERATOR_NEW_BODY_ALIGN(FromNewArray, align, /*NoThrow=*/true); } +#define OPERATOR_DELETE_BODY(Type) \ + scudoDeallocate(ptr, 0, 0, Type); +#define OPERATOR_DELETE_BODY_SIZE(Type) \ + scudoDeallocate(ptr, size, 0, Type); +#define OPERATOR_DELETE_BODY_ALIGN(Type) \ + scudoDeallocate(ptr, 0, static_cast<uptr>(align), Type); +#define OPERATOR_DELETE_BODY_SIZE_ALIGN(Type) \ + scudoDeallocate(ptr, size, static_cast<uptr>(align), Type); + +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr) NOEXCEPT +{ OPERATOR_DELETE_BODY(FromNew); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr) NOEXCEPT +{ OPERATOR_DELETE_BODY(FromNewArray); } +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY(FromNew); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY(FromNewArray); } +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, size_t size) NOEXCEPT +{ OPERATOR_DELETE_BODY_SIZE(FromNew); } +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, size_t size) NOEXCEPT +{ OPERATOR_DELETE_BODY_SIZE(FromNewArray); } CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr) NOEXCEPT { - return scudoFree(ptr, FromNew); -} +void operator delete(void *ptr, std::align_val_t align) NOEXCEPT +{ OPERATOR_DELETE_BODY_ALIGN(FromNew); } CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr) NOEXCEPT { - return scudoFree(ptr, FromNewArray); -} +void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT +{ OPERATOR_DELETE_BODY_ALIGN(FromNewArray); } CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr, std::nothrow_t const&) NOEXCEPT { - return scudoFree(ptr, FromNew); -} +void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY_ALIGN(FromNew); } CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr, std::nothrow_t const&) NOEXCEPT { - return scudoFree(ptr, FromNewArray); -} +void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&) +{ OPERATOR_DELETE_BODY_ALIGN(FromNewArray); } CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr, size_t size) NOEXCEPT { - scudoSizedFree(ptr, size, FromNew); -} +void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT +{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FromNew); } CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr, size_t size) NOEXCEPT { - scudoSizedFree(ptr, size, FromNewArray); -} +void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT +{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FromNewArray); } diff --git a/lib/scudo/scudo_platform.h b/lib/scudo/scudo_platform.h index e1c9c32e9a62..3a6f4be69dbd 100644 --- a/lib/scudo/scudo_platform.h +++ b/lib/scudo/scudo_platform.h @@ -43,7 +43,11 @@ // Maximum number of TSDs that can be created for the Shared model. #ifndef SCUDO_SHARED_TSD_POOL_SIZE -# define SCUDO_SHARED_TSD_POOL_SIZE 32U +# if SANITIZER_ANDROID +# define SCUDO_SHARED_TSD_POOL_SIZE 2U +# else +# define SCUDO_SHARED_TSD_POOL_SIZE 32U +# endif // SANITIZER_ANDROID #endif // SCUDO_SHARED_TSD_POOL_SIZE // The following allows the public interface functions to be disabled. @@ -51,6 +55,16 @@ # define SCUDO_CAN_USE_PUBLIC_INTERFACE 1 #endif +// Hooks in the allocation & deallocation paths can become a security concern if +// implemented improperly, or if overwritten by an attacker. Use with caution. +#ifndef SCUDO_CAN_USE_HOOKS +# if SANITIZER_FUCHSIA +# define SCUDO_CAN_USE_HOOKS 1 +# else +# define SCUDO_CAN_USE_HOOKS 0 +# endif // SANITIZER_FUCHSIA +#endif // SCUDO_CAN_USE_HOOKS + namespace __scudo { #if SANITIZER_CAN_USE_ALLOCATOR64 diff --git a/lib/scudo/scudo_termination.cpp b/lib/scudo/scudo_termination.cpp index c441ff3c126a..4237d3bc1865 100644 --- a/lib/scudo/scudo_termination.cpp +++ b/lib/scudo/scudo_termination.cpp @@ -35,7 +35,7 @@ void SetCheckFailedCallback(CheckFailedCallbackType callback) {} void NORETURN CheckFailed(const char *File, int Line, const char *Condition, u64 Value1, u64 Value2) { - __scudo::dieWithMessage("Scudo CHECK failed: %s:%d %s (%lld, %lld)\n", + __scudo::dieWithMessage("CHECK failed at %s:%d %s (%lld, %lld)\n", File, Line, Condition, Value1, Value2); } diff --git a/lib/scudo/scudo_tsd.h b/lib/scudo/scudo_tsd.h index 80464b5ea1e4..2bd78716af69 100644 --- a/lib/scudo/scudo_tsd.h +++ b/lib/scudo/scudo_tsd.h @@ -23,11 +23,11 @@ namespace __scudo { -struct ALIGNED(64) ScudoTSD { - AllocatorCache Cache; +struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD { + AllocatorCacheT Cache; uptr QuarantineCachePlaceHolder[4]; - void init(bool Shared); + void init(); void commitBack(); INLINE bool tryLock() { @@ -36,29 +36,23 @@ struct ALIGNED(64) ScudoTSD { return true; } if (atomic_load_relaxed(&Precedence) == 0) - atomic_store_relaxed(&Precedence, MonotonicNanoTime()); + atomic_store_relaxed(&Precedence, static_cast<uptr>( + MonotonicNanoTime() >> FIRST_32_SECOND_64(16, 0))); return false; } INLINE void lock() { - Mutex.Lock(); atomic_store_relaxed(&Precedence, 0); + Mutex.Lock(); } - INLINE void unlock() { - if (!UnlockRequired) - return; - Mutex.Unlock(); - } + INLINE void unlock() { Mutex.Unlock(); } - INLINE u64 getPrecedence() { - return atomic_load_relaxed(&Precedence); - } + INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); } private: - bool UnlockRequired; StaticSpinMutex Mutex; - atomic_uint64_t Precedence; + atomic_uintptr_t Precedence; }; void initThread(bool MinimalInit); diff --git a/lib/scudo/scudo_tsd_exclusive.cpp b/lib/scudo/scudo_tsd_exclusive.cpp index 1084dfac91e1..74e797580be7 100644 --- a/lib/scudo/scudo_tsd_exclusive.cpp +++ b/lib/scudo/scudo_tsd_exclusive.cpp @@ -50,7 +50,7 @@ static void teardownThread(void *Ptr) { static void initOnce() { CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread), 0); initScudo(); - FallbackTSD.init(/*Shared=*/true); + FallbackTSD.init(); } void initThread(bool MinimalInit) { @@ -59,7 +59,7 @@ void initThread(bool MinimalInit) { return; CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>( GetPthreadDestructorIterations())), 0); - TSD.init(/*Shared=*/false); + TSD.init(); ScudoThreadState = ThreadInitialized; } diff --git a/lib/scudo/scudo_tsd_exclusive.inc b/lib/scudo/scudo_tsd_exclusive.inc index 567b6a1edd12..1fa9dcdfd20d 100644 --- a/lib/scudo/scudo_tsd_exclusive.inc +++ b/lib/scudo/scudo_tsd_exclusive.inc @@ -35,11 +35,13 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) { initThread(MinimalInit); } -ALWAYS_INLINE ScudoTSD *getTSDAndLock() { +ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) { if (UNLIKELY(ScudoThreadState != ThreadInitialized)) { FallbackTSD.lock(); + *UnlockRequired = true; return &FallbackTSD; } + *UnlockRequired = false; return &TSD; } diff --git a/lib/scudo/scudo_tsd_shared.cpp b/lib/scudo/scudo_tsd_shared.cpp index 3e13e5d3a109..8853894c00a2 100644 --- a/lib/scudo/scudo_tsd_shared.cpp +++ b/lib/scudo/scudo_tsd_shared.cpp @@ -23,6 +23,13 @@ pthread_key_t PThreadKey; static atomic_uint32_t CurrentIndex; static ScudoTSD *TSDs; static u32 NumberOfTSDs; +static u32 CoPrimes[SCUDO_SHARED_TSD_POOL_SIZE]; +static u32 NumberOfCoPrimes = 0; + +#if SANITIZER_LINUX && !SANITIZER_ANDROID +__attribute__((tls_model("initial-exec"))) +THREADLOCAL ScudoTSD *CurrentTSD; +#endif static void initOnce() { CHECK_EQ(pthread_key_create(&PThreadKey, NULL), 0); @@ -31,13 +38,21 @@ static void initOnce() { static_cast<u32>(SCUDO_SHARED_TSD_POOL_SIZE)); TSDs = reinterpret_cast<ScudoTSD *>( MmapOrDie(sizeof(ScudoTSD) * NumberOfTSDs, "ScudoTSDs")); - for (u32 i = 0; i < NumberOfTSDs; i++) - TSDs[i].init(/*Shared=*/true); + for (u32 I = 0; I < NumberOfTSDs; I++) { + TSDs[I].init(); + u32 A = I + 1; + u32 B = NumberOfTSDs; + while (B != 0) { const u32 T = A; A = B; B = T % B; } + if (A == 1) + CoPrimes[NumberOfCoPrimes++] = I + 1; + } } ALWAYS_INLINE void setCurrentTSD(ScudoTSD *TSD) { #if SANITIZER_ANDROID *get_android_tls_ptr() = reinterpret_cast<uptr>(TSD); +#elif SANITIZER_LINUX + CurrentTSD = TSD; #else CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(TSD)), 0); #endif // SANITIZER_ANDROID @@ -50,34 +65,42 @@ void initThread(bool MinimalInit) { setCurrentTSD(&TSDs[Index % NumberOfTSDs]); } -ScudoTSD *getTSDAndLockSlow() { - ScudoTSD *TSD; +ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) { if (NumberOfTSDs > 1) { - // Go through all the contexts and find the first unlocked one. - for (u32 i = 0; i < NumberOfTSDs; i++) { - TSD = &TSDs[i]; - if (TSD->tryLock()) { - setCurrentTSD(TSD); - return TSD; + // Use the Precedence of the current TSD as our random seed. Since we are in + // the slow path, it means that tryLock failed, and as a result it's very + // likely that said Precedence is non-zero. + u32 RandState = static_cast<u32>(TSD->getPrecedence()); + const u32 R = Rand(&RandState); + const u32 Inc = CoPrimes[R % NumberOfCoPrimes]; + u32 Index = R % NumberOfTSDs; + uptr LowestPrecedence = UINTPTR_MAX; + ScudoTSD *CandidateTSD = nullptr; + // Go randomly through at most 4 contexts and find a candidate. + for (u32 I = 0; I < Min(4U, NumberOfTSDs); I++) { + if (TSDs[Index].tryLock()) { + setCurrentTSD(&TSDs[Index]); + return &TSDs[Index]; } - } - // No luck, find the one with the lowest Precedence, and slow lock it. - u64 LowestPrecedence = UINT64_MAX; - for (u32 i = 0; i < NumberOfTSDs; i++) { - u64 Precedence = TSDs[i].getPrecedence(); - if (Precedence && Precedence < LowestPrecedence) { - TSD = &TSDs[i]; + const uptr Precedence = TSDs[Index].getPrecedence(); + // A 0 precedence here means another thread just locked this TSD. + if (UNLIKELY(Precedence == 0)) + continue; + if (Precedence < LowestPrecedence) { + CandidateTSD = &TSDs[Index]; LowestPrecedence = Precedence; } + Index += Inc; + if (Index >= NumberOfTSDs) + Index -= NumberOfTSDs; } - if (LIKELY(LowestPrecedence != UINT64_MAX)) { - TSD->lock(); - setCurrentTSD(TSD); - return TSD; + if (CandidateTSD) { + CandidateTSD->lock(); + setCurrentTSD(CandidateTSD); + return CandidateTSD; } } // Last resort, stick with the current one. - TSD = getCurrentTSD(); TSD->lock(); return TSD; } diff --git a/lib/scudo/scudo_tsd_shared.inc b/lib/scudo/scudo_tsd_shared.inc index 79fcd651ed2d..9dad756b5386 100644 --- a/lib/scudo/scudo_tsd_shared.inc +++ b/lib/scudo/scudo_tsd_shared.inc @@ -19,9 +19,16 @@ extern pthread_key_t PThreadKey; +#if SANITIZER_LINUX && !SANITIZER_ANDROID +__attribute__((tls_model("initial-exec"))) +extern THREADLOCAL ScudoTSD *CurrentTSD; +#endif + ALWAYS_INLINE ScudoTSD* getCurrentTSD() { #if SANITIZER_ANDROID return reinterpret_cast<ScudoTSD *>(*get_android_tls_ptr()); +#elif SANITIZER_LINUX + return CurrentTSD; #else return reinterpret_cast<ScudoTSD *>(pthread_getspecific(PThreadKey)); #endif // SANITIZER_ANDROID @@ -33,16 +40,17 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) { initThread(MinimalInit); } -ScudoTSD *getTSDAndLockSlow(); +ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD); -ALWAYS_INLINE ScudoTSD *getTSDAndLock() { +ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) { ScudoTSD *TSD = getCurrentTSD(); - CHECK(TSD && "No TSD associated with the current thread!"); + DCHECK(TSD && "No TSD associated with the current thread!"); + *UnlockRequired = true; // Try to lock the currently associated context. if (TSD->tryLock()) return TSD; // If it failed, go the slow path. - return getTSDAndLockSlow(); + return getTSDAndLockSlow(TSD); } #endif // !SCUDO_TSD_EXCLUSIVE diff --git a/lib/scudo/scudo_utils.cpp b/lib/scudo/scudo_utils.cpp index 2f936bf9e780..d5788d20ca46 100644 --- a/lib/scudo/scudo_utils.cpp +++ b/lib/scudo/scudo_utils.cpp @@ -17,7 +17,10 @@ # include <cpuid.h> #elif defined(__arm__) || defined(__aarch64__) # include "sanitizer_common/sanitizer_getauxval.h" -# if SANITIZER_POSIX +# if SANITIZER_FUCHSIA +# include <zircon/syscalls.h> +# include <zircon/features.h> +# elif SANITIZER_POSIX # include "sanitizer_common/sanitizer_posix.h" # include <fcntl.h> # endif @@ -38,12 +41,18 @@ extern int VSNPrintf(char *buff, int buff_length, const char *format, namespace __scudo { FORMAT(1, 2) void NORETURN dieWithMessage(const char *Format, ...) { + static const char ScudoError[] = "Scudo ERROR: "; + static constexpr uptr PrefixSize = sizeof(ScudoError) - 1; // Our messages are tiny, 256 characters is more than enough. char Message[256]; va_list Args; va_start(Args, Format); - VSNPrintf(Message, sizeof(Message), Format, Args); + internal_memcpy(Message, ScudoError, PrefixSize); + VSNPrintf(Message + PrefixSize, sizeof(Message) - PrefixSize, Format, Args); va_end(Args); + LogMessageOnPrintf(Message); + if (common_flags()->abort_on_error) + SetAbortMessage(Message); RawWrite(Message); Die(); } @@ -107,9 +116,17 @@ INLINE bool areBionicGlobalsInitialized() { } bool hasHardwareCRC32() { +#if SANITIZER_FUCHSIA + u32 HWCap; + zx_status_t Status = zx_system_get_features(ZX_FEATURE_KIND_CPU, &HWCap); + if (Status != ZX_OK || (HWCap & ZX_ARM64_FEATURE_ISA_CRC32) == 0) + return false; + return true; +#else if (&getauxval && areBionicGlobalsInitialized()) return !!(getauxval(AT_HWCAP) & HWCAP_CRC32); return hasHardwareCRC32ARMPosix(); +#endif // SANITIZER_FUCHSIA } #else bool hasHardwareCRC32() { return false; } diff --git a/lib/stats/CMakeLists.txt b/lib/stats/CMakeLists.txt index 6be36a7cb568..23c80843195d 100644 --- a/lib/stats/CMakeLists.txt +++ b/lib/stats/CMakeLists.txt @@ -1,3 +1,6 @@ +set(STATS_HEADERS + stats.h) + include_directories(..) add_custom_target(stats) @@ -22,8 +25,10 @@ add_compiler_rt_runtime(clang_rt.stats ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} OS ${SANITIZER_COMMON_SUPPORTED_OS} SOURCES stats.cc + ADDITIONAL_HEADERS ${STATS_HEADERS} OBJECT_LIBS RTSanitizerCommon RTSanitizerCommonLibc + RTSanitizerCommonSymbolizer CFLAGS ${SANITIZER_COMMON_CFLAGS} LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS} ${WEAK_SYMBOL_LINK_FLAGS} LINK_LIBS ${STATS_LINK_LIBS} @@ -34,6 +39,7 @@ add_compiler_rt_runtime(clang_rt.stats_client ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH} OS ${SANITIZER_COMMON_SUPPORTED_OS} SOURCES stats_client.cc + ADDITIONAL_HEADERS ${STATS_HEADERS} CFLAGS ${SANITIZER_COMMON_CFLAGS} LINK_FLAGS ${WEAK_SYMBOL_LINK_FLAGS} PARENT_TARGET stats) diff --git a/lib/stats/stats.cc b/lib/stats/stats.cc index 6a6eb3a3cdbc..c7a9a38a612a 100644 --- a/lib/stats/stats.cc +++ b/lib/stats/stats.cc @@ -42,7 +42,7 @@ void WriteLE(fd_t fd, uptr val) { } void OpenStatsFile(const char *path_env) { - InternalScopedBuffer<char> path(kMaxPathLength); + InternalMmapVector<char> path(kMaxPathLength); SubstituteForFlagValue(path_env, path.data(), kMaxPathLength); error_t err; diff --git a/lib/tsan/.clang-format b/lib/tsan/.clang-format index f6cb8ad931f5..560308c91dee 100644 --- a/lib/tsan/.clang-format +++ b/lib/tsan/.clang-format @@ -1 +1,2 @@ BasedOnStyle: Google +AllowShortIfStatementsOnASingleLine: false diff --git a/lib/tsan/CMakeLists.txt b/lib/tsan/CMakeLists.txt index 3697ecb21381..4a2ea3f4ac75 100644 --- a/lib/tsan/CMakeLists.txt +++ b/lib/tsan/CMakeLists.txt @@ -5,7 +5,10 @@ include_directories(..) set(TSAN_CFLAGS ${SANITIZER_COMMON_CFLAGS}) # SANITIZER_COMMON_CFLAGS contains -fPIC, but it's performance-critical for # TSan runtime to be built with -fPIE to reduce the number of register spills. -append_list_if(COMPILER_RT_HAS_FPIE_FLAG -fPIE TSAN_CFLAGS) +# On FreeBSD however it provokes linkage issue thus we disable it. +if(NOT CMAKE_SYSTEM MATCHES "FreeBSD") + append_list_if(COMPILER_RT_HAS_FPIE_FLAG -fPIE TSAN_CFLAGS) +endif() append_rtti_flag(OFF TSAN_CFLAGS) if(COMPILER_RT_TSAN_DEBUG_OUTPUT) @@ -77,14 +80,15 @@ set(TSAN_HEADERS rtl/tsan_flags.inc rtl/tsan_ignoreset.h rtl/tsan_interceptors.h - rtl/tsan_interface_ann.h rtl/tsan_interface.h + rtl/tsan_interface_ann.h rtl/tsan_interface_inl.h rtl/tsan_interface_java.h rtl/tsan_mman.h rtl/tsan_mutex.h rtl/tsan_mutexset.h rtl/tsan_platform.h + rtl/tsan_ppc_regs.h rtl/tsan_report.h rtl/tsan_rtl.h rtl/tsan_stack_trace.h @@ -111,18 +115,22 @@ if(APPLE) OS ${TSAN_SUPPORTED_OS} ARCHS ${TSAN_SUPPORTED_ARCH} SOURCES ${TSAN_SOURCES} ${TSAN_CXX_SOURCES} ${TSAN_ASM_SOURCES} + ADDITIONAL_HEADERS ${TSAN_HEADERS} OBJECT_LIBS RTInterception RTSanitizerCommon RTSanitizerCommonLibc + RTSanitizerCommonCoverage + RTSanitizerCommonSymbolizer RTUbsan CFLAGS ${TSAN_RTL_CFLAGS} LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS} ${WEAK_SYMBOL_LINK_FLAGS} - LINK_LIBS ${TSAN_LINK_LIBS} + LINK_LIBS ${TSAN_LINK_LIBS} objc PARENT_TARGET tsan) add_compiler_rt_object_libraries(RTTsan_dynamic OS ${TSAN_SUPPORTED_OS} ARCHS ${TSAN_SUPPORTED_ARCH} SOURCES ${TSAN_SOURCES} ${TSAN_CXX_SOURCES} ${TSAN_ASM_SOURCES} + ADDITIONAL_HEADERS ${TSAN_HEADERS} CFLAGS ${TSAN_RTL_CFLAGS}) # Build and check Go runtime. @@ -134,6 +142,7 @@ if(APPLE) WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/go COMMENT "Checking TSan Go runtime..." VERBATIM) + set_target_properties(GotsanRuntimeCheck PROPERTIES FOLDER "Compiler-RT Misc") else() foreach(arch ${TSAN_SUPPORTED_ARCH}) if(arch STREQUAL "x86_64") @@ -151,6 +160,15 @@ else() add_asm_sources(TSAN_ASM_SOURCES rtl/tsan_rtl_aarch64.S) elseif(arch MATCHES "powerpc64|powerpc64le") add_asm_sources(TSAN_ASM_SOURCES rtl/tsan_rtl_ppc64.S) + # Sanity check for Go runtime. + set(BUILDGO_SCRIPT ${CMAKE_CURRENT_SOURCE_DIR}/go/buildgo.sh) + add_custom_target(GotsanRuntimeCheck + COMMAND env "CC=${CMAKE_C_COMPILER} ${CMAKE_C_COMPILER_ARG1}" + IN_TMPDIR=1 SILENT=1 ${BUILDGO_SCRIPT} + DEPENDS clang_rt.tsan-${arch} ${BUILDGO_SCRIPT} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/go + COMMENT "Checking TSan Go runtime..." + VERBATIM) elseif(arch MATCHES "mips64|mips64le") add_asm_sources(TSAN_ASM_SOURCES rtl/tsan_rtl_mips64.S) else() @@ -163,7 +181,10 @@ else() $<TARGET_OBJECTS:RTInterception.${arch}> $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonCoverage.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}> $<TARGET_OBJECTS:RTUbsan.${arch}> + ADDITIONAL_HEADERS ${TSAN_HEADERS} CFLAGS ${TSAN_RTL_CFLAGS} PARENT_TARGET tsan) add_compiler_rt_runtime(clang_rt.tsan_cxx @@ -171,6 +192,7 @@ else() ARCHS ${arch} SOURCES ${TSAN_CXX_SOURCES} $<TARGET_OBJECTS:RTUbsan_cxx.${arch}> + ADDITIONAL_HEADERS ${TSAN_HEADERS} CFLAGS ${TSAN_RTL_CFLAGS} PARENT_TARGET tsan) list(APPEND TSAN_RUNTIME_LIBRARIES clang_rt.tsan-${arch} @@ -212,11 +234,13 @@ if(COMPILER_RT_LIBCXX_PATH AND set(LIBCXX_PREFIX ${CMAKE_CURRENT_BINARY_DIR}/libcxx_tsan_${arch}) add_custom_libcxx(libcxx_tsan_${arch} ${LIBCXX_PREFIX} DEPS ${TSAN_RUNTIME_LIBRARIES} - CFLAGS ${TARGET_CFLAGS} -fsanitize=thread) - list(APPEND libcxx_tsan_deps libcxx_tsan_${arch}) + CFLAGS ${TARGET_CFLAGS} -fsanitize=thread + USE_TOOLCHAIN) + list(APPEND libcxx_tsan_deps libcxx_tsan_${arch}-build) endforeach() add_custom_target(libcxx_tsan DEPENDS ${libcxx_tsan_deps}) + set_target_properties(libcxx_tsan PROPERTIES FOLDER "Compiler-RT Misc") endif() if(COMPILER_RT_INCLUDE_TESTS) diff --git a/lib/tsan/dd/CMakeLists.txt b/lib/tsan/dd/CMakeLists.txt index 07fc300535cd..f2b8a6d173a4 100644 --- a/lib/tsan/dd/CMakeLists.txt +++ b/lib/tsan/dd/CMakeLists.txt @@ -17,6 +17,8 @@ append_list_if(COMPILER_RT_HAS_LIBRT rt DD_LINKLIBS) append_list_if(COMPILER_RT_HAS_LIBPTHREAD pthread DD_LINKLIBS) add_custom_target(dd) +set_target_properties(dd PROPERTIES FOLDER "Compiler-RT Misc") + # Deadlock detector is currently supported on 64-bit Linux only. if(CAN_TARGET_x86_64 AND UNIX AND NOT APPLE AND NOT ANDROID) set(arch "x86_64") @@ -41,6 +43,7 @@ if(CAN_TARGET_x86_64 AND UNIX AND NOT APPLE AND NOT ANDROID) $<TARGET_OBJECTS:RTInterception.${arch}> $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.${arch}> LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS} LINK_LIBS ${DD_LINKLIBS} PARENT_TARGET dd) diff --git a/lib/tsan/go/buildgo.sh b/lib/tsan/go/buildgo.sh index 62ff0fc38ba9..7f570ca81139 100755 --- a/lib/tsan/go/buildgo.sh +++ b/lib/tsan/go/buildgo.sh @@ -35,12 +35,12 @@ SRCS=" ../../sanitizer_common/sanitizer_stackdepot.cc ../../sanitizer_common/sanitizer_stacktrace.cc ../../sanitizer_common/sanitizer_symbolizer.cc + ../../sanitizer_common/sanitizer_symbolizer_report.cc ../../sanitizer_common/sanitizer_termination.cc " if [ "`uname -a | grep Linux`" != "" ]; then - SUFFIX="linux_amd64" - OSCFLAGS="-fPIC -ffreestanding -Wno-maybe-uninitialized -Wno-unused-const-variable -Werror -Wno-unknown-warning-option" + OSCFLAGS="-fPIC -Wno-maybe-uninitialized" OSLDFLAGS="-lpthread -fPIC -fpie" SRCS=" $SRCS @@ -52,7 +52,13 @@ if [ "`uname -a | grep Linux`" != "" ]; then ../../sanitizer_common/sanitizer_linux.cc ../../sanitizer_common/sanitizer_linux_libcdep.cc ../../sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc - " + " + if [ "`uname -a | grep ppc64le`" != "" ]; then + SUFFIX="linux_ppc64le" + elif [ "`uname -a | grep x86_64`" != "" ]; then + SUFFIX="linux_amd64" + OSCFLAGS="$OSCFLAGS -ffreestanding -Wno-unused-const-variable -Werror -Wno-unknown-warning-option" + fi elif [ "`uname -a | grep FreeBSD`" != "" ]; then SUFFIX="freebsd_amd64" OSCFLAGS="-fno-strict-aliasing -fPIC -Werror" @@ -62,8 +68,8 @@ elif [ "`uname -a | grep FreeBSD`" != "" ]; then ../rtl/tsan_platform_linux.cc ../../sanitizer_common/sanitizer_posix.cc ../../sanitizer_common/sanitizer_posix_libcdep.cc + ../../sanitizer_common/sanitizer_procmaps_bsd.cc ../../sanitizer_common/sanitizer_procmaps_common.cc - ../../sanitizer_common/sanitizer_procmaps_freebsd.cc ../../sanitizer_common/sanitizer_linux.cc ../../sanitizer_common/sanitizer_linux_libcdep.cc ../../sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc @@ -77,15 +83,15 @@ elif [ "`uname -a | grep NetBSD`" != "" ]; then ../rtl/tsan_platform_linux.cc ../../sanitizer_common/sanitizer_posix.cc ../../sanitizer_common/sanitizer_posix_libcdep.cc + ../../sanitizer_common/sanitizer_procmaps_bsd.cc ../../sanitizer_common/sanitizer_procmaps_common.cc - ../../sanitizer_common/sanitizer_procmaps_freebsd.cc ../../sanitizer_common/sanitizer_linux.cc ../../sanitizer_common/sanitizer_linux_libcdep.cc ../../sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc " elif [ "`uname -a | grep Darwin`" != "" ]; then SUFFIX="darwin_amd64" - OSCFLAGS="-fPIC -Wno-unused-const-variable -Wno-unknown-warning-option -isysroot $(xcodebuild -version -sdk macosx Path) -mmacosx-version-min=10.7" + OSCFLAGS="-fPIC -Wno-unused-const-variable -Wno-unknown-warning-option -mmacosx-version-min=10.7" OSLDFLAGS="-lpthread -fPIC -fpie -mmacosx-version-min=10.7" SRCS=" $SRCS @@ -132,7 +138,12 @@ done FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -std=c++11 -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO=1 -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS" if [ "$DEBUG" = "" ]; then - FLAGS="$FLAGS -DSANITIZER_DEBUG=0 -O3 -msse3 -fomit-frame-pointer" + FLAGS="$FLAGS -DSANITIZER_DEBUG=0 -O3 -fomit-frame-pointer" + if [ "$SUFFIX" = "linux_ppc64le" ]; then + FLAGS="$FLAGS -mcpu=power8 -fno-function-sections" + elif [ "$SUFFIX" = "linux_amd64" ]; then + FLAGS="$FLAGS -msse3" + fi else FLAGS="$FLAGS -DSANITIZER_DEBUG=1 -g" fi diff --git a/lib/tsan/go/test.c b/lib/tsan/go/test.c index b3e31b1feaa7..c484774caeb9 100644 --- a/lib/tsan/go/test.c +++ b/lib/tsan/go/test.c @@ -11,6 +11,8 @@ // //===----------------------------------------------------------------------===// +#include <sys/mman.h> +#include <errno.h> #include <stdio.h> #include <stdlib.h> @@ -44,7 +46,13 @@ void symbolize_cb(long cmd, void *ctx) { } } -char buf0[100<<10]; +/* + * See lib/tsan/rtl/tsan_platform.h for details of what the memory layout + * of Go programs looks like. To prevent running over existing mappings, + * we pick an address slightly inside the Go heap region. + */ +void *go_heap = (void *)0xC011110000; +char *buf0; void foobar() {} void barfoo() {} @@ -54,6 +62,15 @@ int main(void) { void *proc0 = 0; __tsan_init(&thr0, &proc0, symbolize_cb); current_proc = proc0; + + // Allocate something resembling a heap in Go. + buf0 = mmap(go_heap, 16384, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_FIXED | MAP_ANON, -1, 0); + if (buf0 == MAP_FAILED) { + fprintf(stderr, "failed to allocate Go-like heap at %p; errno %d\n", + go_heap, errno); + return 1; + } char *buf = (char*)((unsigned long)buf0 + (64<<10) - 1 & ~((64<<10) - 1)); __tsan_map_shadow(buf, 4096); __tsan_malloc(thr0, (char*)&barfoo + 1, buf, 10); diff --git a/lib/tsan/go/tsan_go.cc b/lib/tsan/go/tsan_go.cc index d7a9e0b67962..5f2507b7df1d 100644 --- a/lib/tsan/go/tsan_go.cc +++ b/lib/tsan/go/tsan_go.cc @@ -282,11 +282,3 @@ void __tsan_report_count(u64 *pn) { } // extern "C" } // namespace __tsan - -namespace __sanitizer { - -void SymbolizerPrepareForSandboxing() { - // Nothing to do here for Go. -} - -} // namespace __sanitizer diff --git a/lib/tsan/rtl/tsan_debugging.cc b/lib/tsan/rtl/tsan_debugging.cc index a44b13632c61..9e5465d37557 100644 --- a/lib/tsan/rtl/tsan_debugging.cc +++ b/lib/tsan/rtl/tsan_debugging.cc @@ -83,6 +83,13 @@ int __tsan_get_report_data(void *report, const char **description, int *count, } SANITIZER_INTERFACE_ATTRIBUTE +int __tsan_get_report_tag(void *report, uptr *tag) { + const ReportDesc *rep = (ReportDesc *)report; + *tag = rep->tag; + return 1; +} + +SANITIZER_INTERFACE_ATTRIBUTE int __tsan_get_report_stack(void *report, uptr idx, void **trace, uptr trace_size) { const ReportDesc *rep = (ReportDesc *)report; diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc index d92976ad6e9e..901997b3e85d 100644 --- a/lib/tsan/rtl/tsan_interceptors.cc +++ b/lib/tsan/rtl/tsan_interceptors.cc @@ -569,10 +569,8 @@ TSAN_INTERCEPTOR(int, sigsetjmp, void *env); #define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname) #define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname) -#define TSAN_STRING_SETJMP_(x) # x -#define TSAN_STRING_SETJMP__(x) TSAN_STRING_SETJMP_(x) -#define TSAN_STRING_SETJMP TSAN_STRING_SETJMP__(setjmp_symname) -#define TSAN_STRING_SIGSETJMP TSAN_STRING_SETJMP__(sigsetjmp_symname) +#define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname) +#define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname) // Not called. Merely to satisfy TSAN_INTERCEPT(). extern "C" SANITIZER_INTERFACE_ATTRIBUTE @@ -597,7 +595,7 @@ extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) { return 0; } -#if !SANITIEZER_NETBSD +#if !SANITIZER_NETBSD extern "C" SANITIZER_INTERFACE_ATTRIBUTE int __interceptor___sigsetjmp(void *env); extern "C" int __interceptor___sigsetjmp(void *env) { @@ -609,13 +607,13 @@ extern "C" int __interceptor___sigsetjmp(void *env) { extern "C" int setjmp_symname(void *env); extern "C" int _setjmp(void *env); extern "C" int sigsetjmp_symname(void *env); -#if !SANITIEZER_NETBSD +#if !SANITIZER_NETBSD extern "C" int __sigsetjmp(void *env); #endif DEFINE_REAL(int, setjmp_symname, void *env) DEFINE_REAL(int, _setjmp, void *env) DEFINE_REAL(int, sigsetjmp_symname, void *env) -#if !SANITIEZER_NETBSD +#if !SANITIZER_NETBSD DEFINE_REAL(int, __sigsetjmp, void *env) #endif #endif // SANITIZER_MAC @@ -762,16 +760,14 @@ static bool fix_mmap_addr(void **addr, long_t sz, int flags) { return true; } -TSAN_INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags, - int fd, OFF_T off) { - SCOPED_TSAN_INTERCEPTOR(mmap, addr, sz, prot, flags, fd, off); - if (!fix_mmap_addr(&addr, sz, flags)) - return MAP_FAILED; - void *res = REAL(mmap)(addr, sz, prot, flags, fd, off); +template <class Mmap> +static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap, + void *addr, SIZE_T sz, int prot, int flags, + int fd, OFF64_T off) { + if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED; + void *res = real_mmap(addr, sz, prot, flags, fd, off); if (res != MAP_FAILED) { - if (fd > 0) - FdAccess(thr, pc, fd); - + if (fd > 0) FdAccess(thr, pc, fd); if (thr->ignore_reads_and_writes == 0) MemoryRangeImitateWrite(thr, pc, (uptr)res, sz); else @@ -780,29 +776,6 @@ TSAN_INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags, return res; } -#if SANITIZER_LINUX -TSAN_INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags, - int fd, OFF64_T off) { - SCOPED_TSAN_INTERCEPTOR(mmap64, addr, sz, prot, flags, fd, off); - if (!fix_mmap_addr(&addr, sz, flags)) - return MAP_FAILED; - void *res = REAL(mmap64)(addr, sz, prot, flags, fd, off); - if (res != MAP_FAILED) { - if (fd > 0) - FdAccess(thr, pc, fd); - - if (thr->ignore_reads_and_writes == 0) - MemoryRangeImitateWrite(thr, pc, (uptr)res, sz); - else - MemoryResetRange(thr, pc, (uptr)res, sz); - } - return res; -} -#define TSAN_MAYBE_INTERCEPT_MMAP64 TSAN_INTERCEPT(mmap64) -#else -#define TSAN_MAYBE_INTERCEPT_MMAP64 -#endif - TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) { SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz); if (sz != 0) { @@ -932,7 +905,7 @@ void DestroyThreadState() { } } // namespace __tsan -#if !SANITIZER_MAC && !SANITIZER_NETBSD +#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD static void thread_finalize(void *v) { uptr iter = (uptr)v; if (iter > 1) { @@ -963,7 +936,7 @@ extern "C" void *__tsan_thread_start_func(void *arg) { ThreadState *thr = cur_thread(); // Thread-local state is not initialized yet. ScopedIgnoreInterceptors ignore; -#if !SANITIZER_MAC && !SANITIZER_NETBSD +#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD ThreadIgnoreBegin(thr, 0); if (pthread_setspecific(interceptor_ctx()->finalize_key, (void *)GetPthreadDestructorIterations())) { @@ -1763,12 +1736,6 @@ TSAN_INTERCEPTOR(void, abort, int fake) { REAL(abort)(fake); } -TSAN_INTERCEPTOR(int, puts, const char *s) { - SCOPED_TSAN_INTERCEPTOR(puts, s); - MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s), false); - return REAL(puts)(s); -} - TSAN_INTERCEPTOR(int, rmdir, char *path) { SCOPED_TSAN_INTERCEPTOR(rmdir, path); Release(thr, pc, Dir2addr(path)); @@ -2313,6 +2280,13 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc, MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \ ((TsanInterceptorContext *)ctx)->pc, (uptr)m) +#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \ + off) \ + do { \ + return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \ + off); \ + } while (false) + #if !SANITIZER_MAC #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \ HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \ @@ -2426,7 +2400,7 @@ struct ScopedSyscall { } }; -#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD +#if !SANITIZER_FREEBSD && !SANITIZER_MAC static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) { TSAN_SYSCALL(); MemoryAccessRange(thr, pc, p, s, write); @@ -2519,6 +2493,7 @@ static void syscall_post_fork(uptr pc, int pid) { syscall_post_fork(GET_CALLER_PC(), res) #include "sanitizer_common/sanitizer_common_syscalls.inc" +#include "sanitizer_common/sanitizer_syscalls_netbsd.inc" #ifdef NEED_TLS_GET_ADDR // Define own interceptor instead of sanitizer_common's for three reasons: @@ -2536,7 +2511,8 @@ TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) { ThreadState *thr = cur_thread(); if (!thr) return res; - DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr, thr->tls_size); + DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr, + thr->tls_addr + thr->tls_size); if (!dtv) return res; // New DTLS block has been allocated. @@ -2556,22 +2532,33 @@ TSAN_INTERCEPTOR(void, _lwp_exit) { #define TSAN_MAYBE_INTERCEPT__LWP_EXIT #endif -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a); -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c); -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c); -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m); -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c); -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a); -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m); -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m); -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a); -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m); -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m); -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m); -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m); -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m); -TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m); -TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)()); +#if SANITIZER_FREEBSD +TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) { + SCOPED_TSAN_INTERCEPTOR(thr_exit, state); + DestroyThreadState(); + REAL(thr_exit(state)); +} +#define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit) +#else +#define TSAN_MAYBE_INTERCEPT_THR_EXIT +#endif + +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a) +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c) +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c) +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m) +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c) +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a) +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m) +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m) +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a) +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m) +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m) +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m) +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m) +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m) +TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m) +TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)()) namespace __tsan { @@ -2635,8 +2622,6 @@ void InitializeInterceptors() { TSAN_INTERCEPT(realloc); TSAN_INTERCEPT(free); TSAN_INTERCEPT(cfree); - TSAN_INTERCEPT(mmap); - TSAN_MAYBE_INTERCEPT_MMAP64; TSAN_INTERCEPT(munmap); TSAN_MAYBE_INTERCEPT_MEMALIGN; TSAN_INTERCEPT(valloc); @@ -2715,10 +2700,7 @@ void InitializeInterceptors() { TSAN_INTERCEPT(unlink); TSAN_INTERCEPT(tmpfile); TSAN_MAYBE_INTERCEPT_TMPFILE64; - TSAN_INTERCEPT(fread); - TSAN_INTERCEPT(fwrite); TSAN_INTERCEPT(abort); - TSAN_INTERCEPT(puts); TSAN_INTERCEPT(rmdir); TSAN_INTERCEPT(closedir); @@ -2750,6 +2732,7 @@ void InitializeInterceptors() { #endif TSAN_MAYBE_INTERCEPT__LWP_EXIT; + TSAN_MAYBE_INTERCEPT_THR_EXIT; #if !SANITIZER_MAC && !SANITIZER_ANDROID // Need to setup it, because interceptors check that the function is resolved. @@ -2762,7 +2745,7 @@ void InitializeInterceptors() { Die(); } -#if !SANITIZER_MAC && !SANITIZER_NETBSD +#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) { Printf("ThreadSanitizer: failed to create thread key\n"); Die(); diff --git a/lib/tsan/rtl/tsan_interceptors_mac.cc b/lib/tsan/rtl/tsan_interceptors_mac.cc index 3b3a4a2815c3..b58e6b7071cf 100644 --- a/lib/tsan/rtl/tsan_interceptors_mac.cc +++ b/lib/tsan/rtl/tsan_interceptors_mac.cc @@ -294,6 +294,43 @@ TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) { #endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>) +// Is the Obj-C object a tagged pointer (i.e. isn't really a valid pointer and +// contains data in the pointers bits instead)? +static bool IsTaggedObjCPointer(void *obj) { + const uptr kPossibleTaggedBits = 0x8000000000000001ull; + return ((uptr)obj & kPossibleTaggedBits) != 0; +} + +// Return an address on which we can synchronize (Acquire and Release) for a +// Obj-C tagged pointer (which is not a valid pointer). Ideally should be a +// derived address from 'obj', but for now just return the same global address. +// TODO(kubamracek): Return different address for different pointers. +static uptr SyncAddressForTaggedPointer(void *obj) { + (void)obj; + static u64 addr; + return (uptr)&addr; +} + +// Address on which we can synchronize for an Objective-C object. Supports +// tagged pointers. +static uptr SyncAddressForObjCObject(void *obj) { + if (IsTaggedObjCPointer(obj)) return SyncAddressForTaggedPointer(obj); + return (uptr)obj; +} + +TSAN_INTERCEPTOR(int, objc_sync_enter, void *obj) { + SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj); + int result = REAL(objc_sync_enter)(obj); + if (obj) Acquire(thr, pc, SyncAddressForObjCObject(obj)); + return result; +} + +TSAN_INTERCEPTOR(int, objc_sync_exit, void *obj) { + SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj); + if (obj) Release(thr, pc, SyncAddressForObjCObject(obj)); + return REAL(objc_sync_exit)(obj); +} + // On macOS, libc++ is always linked dynamically, so intercepting works the // usual way. #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR diff --git a/lib/tsan/rtl/tsan_interface.h b/lib/tsan/rtl/tsan_interface.h index a80a48991cba..203f6b106a96 100644 --- a/lib/tsan/rtl/tsan_interface.h +++ b/lib/tsan/rtl/tsan_interface.h @@ -117,6 +117,19 @@ int __tsan_get_report_data(void *report, const char **description, int *count, int *unique_tid_count, void **sleep_trace, uptr trace_size); +/// Retrieves the "tag" from a report (for external-race report types). External +/// races can be associated with a tag which give them more meaning. For example +/// tag value '1' means "Swift access race". Tag value '0' indicated a plain +/// external race. +/// +/// \param report opaque pointer to the current report (obtained as argument in +/// __tsan_on_report, or from __tsan_get_current_report) +/// \param [out] tag points to storage that will be filled with the tag value +/// +/// \returns non-zero value on success, zero on failure +SANITIZER_INTERFACE_ATTRIBUTE +int __tsan_get_report_tag(void *report, uptr *tag); + // Returns information about stack traces included in the report. SANITIZER_INTERFACE_ATTRIBUTE int __tsan_get_report_stack(void *report, uptr idx, void **trace, diff --git a/lib/tsan/rtl/tsan_malloc_mac.cc b/lib/tsan/rtl/tsan_malloc_mac.cc index 455c95df6c50..3cc30724da85 100644 --- a/lib/tsan/rtl/tsan_malloc_mac.cc +++ b/lib/tsan/rtl/tsan_malloc_mac.cc @@ -15,6 +15,7 @@ #include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_MAC +#include "sanitizer_common/sanitizer_errno.h" #include "tsan_interceptors.h" #include "tsan_stack_trace.h" @@ -39,6 +40,15 @@ using namespace __tsan; if (cur_thread()->in_symbolizer) return InternalCalloc(count, size); \ SCOPED_INTERCEPTOR_RAW(calloc, size, count); \ void *p = user_calloc(thr, pc, size, count) +#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \ + if (cur_thread()->in_symbolizer) { \ + void *p = InternalAlloc(size, nullptr, alignment); \ + if (!p) return errno_ENOMEM; \ + *memptr = p; \ + return 0; \ + } \ + SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, alignment, size); \ + int res = user_posix_memalign(thr, pc, memptr, alignment, size); #define COMMON_MALLOC_VALLOC(size) \ if (cur_thread()->in_symbolizer) \ return InternalAlloc(size, nullptr, GetPageSizeCached()); \ diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc index 19680238bf76..b160a9736d5d 100644 --- a/lib/tsan/rtl/tsan_mman.cc +++ b/lib/tsan/rtl/tsan_mman.cc @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_allocator_report.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_placement_new.h" @@ -150,13 +151,24 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) { OutputReport(thr, rep); } +static constexpr uptr kMaxAllowedMallocSize = 1ull << 40; + void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) { - if ((sz >= (1ull << 40)) || (align >= (1ull << 40))) - return Allocator::FailureHandler::OnBadRequest(); + if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize) { + if (AllocatorMayReturnNull()) + return nullptr; + GET_STACK_TRACE_FATAL(thr, pc); + ReportAllocationSizeTooBig(sz, kMaxAllowedMallocSize, &stack); + } void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align); - if (UNLIKELY(p == 0)) - return 0; + if (UNLIKELY(!p)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + GET_STACK_TRACE_FATAL(thr, pc); + ReportOutOfMemory(sz, &stack); + } if (ctx && ctx->initialized) OnUserAlloc(thr, pc, (uptr)p, sz, true); if (signal) @@ -178,8 +190,12 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz) { } void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) { - if (UNLIKELY(CheckForCallocOverflow(size, n))) - return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest()); + if (UNLIKELY(CheckForCallocOverflow(size, n))) { + if (AllocatorMayReturnNull()) + return SetErrnoOnNull(nullptr); + GET_STACK_TRACE_FATAL(thr, pc); + ReportCallocOverflow(n, size, &stack); + } void *p = user_alloc_internal(thr, pc, n * size); if (p) internal_memset(p, 0, n * size); @@ -224,7 +240,10 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) { if (UNLIKELY(!IsPowerOfTwo(align))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) + return nullptr; + GET_STACK_TRACE_FATAL(thr, pc); + ReportInvalidAllocationAlignment(align, &stack); } return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align)); } @@ -232,11 +251,14 @@ void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) { int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align, uptr sz) { if (UNLIKELY(!CheckPosixMemalignAlignment(align))) { - Allocator::FailureHandler::OnBadRequest(); - return errno_EINVAL; + if (AllocatorMayReturnNull()) + return errno_EINVAL; + GET_STACK_TRACE_FATAL(thr, pc); + ReportInvalidPosixMemalignAlignment(align, &stack); } void *ptr = user_alloc_internal(thr, pc, sz, align); if (UNLIKELY(!ptr)) + // OOM error is already taken care of by user_alloc_internal. return errno_ENOMEM; CHECK(IsAligned((uptr)ptr, align)); *memptr = ptr; @@ -246,7 +268,10 @@ int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align, void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) { if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) + return nullptr; + GET_STACK_TRACE_FATAL(thr, pc); + ReportInvalidAlignedAllocAlignment(sz, align, &stack); } return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align)); } @@ -259,7 +284,10 @@ void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) { uptr PageSize = GetPageSizeCached(); if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) { errno = errno_ENOMEM; - return Allocator::FailureHandler::OnBadRequest(); + if (AllocatorMayReturnNull()) + return nullptr; + GET_STACK_TRACE_FATAL(thr, pc); + ReportPvallocOverflow(sz, &stack); } // pvalloc(0) should allocate one page. sz = sz ? RoundUpTo(sz, PageSize) : PageSize; diff --git a/lib/tsan/rtl/tsan_new_delete.cc b/lib/tsan/rtl/tsan_new_delete.cc index a1bb22690e0f..4f52d3d71eb6 100644 --- a/lib/tsan/rtl/tsan_new_delete.cc +++ b/lib/tsan/rtl/tsan_new_delete.cc @@ -13,8 +13,10 @@ //===----------------------------------------------------------------------===// #include "interception/interception.h" #include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_report.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "tsan_interceptors.h" +#include "tsan_rtl.h" using namespace __tsan; // NOLINT @@ -34,7 +36,10 @@ DECLARE_REAL(void, free, void *ptr) { \ SCOPED_INTERCEPTOR_RAW(mangled_name, size); \ p = user_alloc(thr, pc, size); \ - if (!nothrow && UNLIKELY(!p)) DieOnFailure::OnOOM(); \ + if (!nothrow && UNLIKELY(!p)) { \ + GET_STACK_TRACE_FATAL(thr, pc); \ + ReportOutOfMemory(size, &stack); \ + } \ } \ invoke_malloc_hook(p, size); \ return p; @@ -46,7 +51,10 @@ DECLARE_REAL(void, free, void *ptr) { \ SCOPED_INTERCEPTOR_RAW(mangled_name, size); \ p = user_memalign(thr, pc, (uptr)align, size); \ - if (!nothrow && UNLIKELY(!p)) DieOnFailure::OnOOM(); \ + if (!nothrow && UNLIKELY(!p)) { \ + GET_STACK_TRACE_FATAL(thr, pc); \ + ReportOutOfMemory(size, &stack); \ + } \ } \ invoke_malloc_hook(p, size); \ return p; diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h index bfac70df12d9..6b3e6bac27cb 100644 --- a/lib/tsan/rtl/tsan_platform.h +++ b/lib/tsan/rtl/tsan_platform.h @@ -79,25 +79,27 @@ struct Mapping { #define TSAN_MID_APP_RANGE 1 #elif defined(__mips64) /* -C/C++ on linux/mips64 -0100 0000 00 - 0200 0000 00: main binary -0200 0000 00 - 1400 0000 00: - -1400 0000 00 - 2400 0000 00: shadow -2400 0000 00 - 3000 0000 00: - -3000 0000 00 - 4000 0000 00: metainfo (memory blocks and sync objects) -4000 0000 00 - 6000 0000 00: - -6000 0000 00 - 6200 0000 00: traces -6200 0000 00 - fe00 0000 00: - -fe00 0000 00 - ff00 0000 00: heap -ff00 0000 00 - ff80 0000 00: - -ff80 0000 00 - ffff ffff ff: modules and main thread stack +C/C++ on linux/mips64 (40-bit VMA) +0000 0000 00 - 0100 0000 00: - (4 GB) +0100 0000 00 - 0200 0000 00: main binary (4 GB) +0200 0000 00 - 2000 0000 00: - (120 GB) +2000 0000 00 - 4000 0000 00: shadow (128 GB) +4000 0000 00 - 5000 0000 00: metainfo (memory blocks and sync objects) (64 GB) +5000 0000 00 - aa00 0000 00: - (360 GB) +aa00 0000 00 - ab00 0000 00: main binary (PIE) (4 GB) +ab00 0000 00 - b000 0000 00: - (20 GB) +b000 0000 00 - b200 0000 00: traces (8 GB) +b200 0000 00 - fe00 0000 00: - (304 GB) +fe00 0000 00 - ff00 0000 00: heap (4 GB) +ff00 0000 00 - ff80 0000 00: - (2 GB) +ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB) */ struct Mapping { static const uptr kMetaShadowBeg = 0x4000000000ull; static const uptr kMetaShadowEnd = 0x5000000000ull; static const uptr kTraceMemBeg = 0xb000000000ull; static const uptr kTraceMemEnd = 0xb200000000ull; - static const uptr kShadowBeg = 0x2400000000ull; + static const uptr kShadowBeg = 0x2000000000ull; static const uptr kShadowEnd = 0x4000000000ull; static const uptr kHeapMemBeg = 0xfe00000000ull; static const uptr kHeapMemEnd = 0xff00000000ull; @@ -353,9 +355,9 @@ struct Mapping47 { #define TSAN_RUNTIME_VMA 1 #endif -#elif SANITIZER_GO && !SANITIZER_WINDOWS +#elif SANITIZER_GO && !SANITIZER_WINDOWS && defined(__x86_64__) -/* Go on linux, darwin and freebsd +/* Go on linux, darwin and freebsd on x86_64 0000 0000 1000 - 0000 1000 0000: executable 0000 1000 0000 - 00c0 0000 0000: - 00c0 0000 0000 - 00e0 0000 0000: heap @@ -404,6 +406,61 @@ struct Mapping { static const uptr kAppMemEnd = 0x00e000000000ull; }; +#elif SANITIZER_GO && defined(__powerpc64__) + +/* Only Mapping46 and Mapping47 are currently supported for powercp64 on Go. */ + +/* Go on linux/powerpc64 (46-bit VMA) +0000 0000 1000 - 0000 1000 0000: executable +0000 1000 0000 - 00c0 0000 0000: - +00c0 0000 0000 - 00e0 0000 0000: heap +00e0 0000 0000 - 2000 0000 0000: - +2000 0000 0000 - 2380 0000 0000: shadow +2380 0000 0000 - 2400 0000 0000: - +2400 0000 0000 - 3400 0000 0000: metainfo (memory blocks and sync objects) +3400 0000 0000 - 3600 0000 0000: - +3600 0000 0000 - 3800 0000 0000: traces +3800 0000 0000 - 4000 0000 0000: - +*/ + +struct Mapping46 { + static const uptr kMetaShadowBeg = 0x240000000000ull; + static const uptr kMetaShadowEnd = 0x340000000000ull; + static const uptr kTraceMemBeg = 0x360000000000ull; + static const uptr kTraceMemEnd = 0x380000000000ull; + static const uptr kShadowBeg = 0x200000000000ull; + static const uptr kShadowEnd = 0x238000000000ull; + static const uptr kAppMemBeg = 0x000000001000ull; + static const uptr kAppMemEnd = 0x00e000000000ull; +}; + +/* Go on linux/powerpc64 (47-bit VMA) +0000 0000 1000 - 0000 1000 0000: executable +0000 1000 0000 - 00c0 0000 0000: - +00c0 0000 0000 - 00e0 0000 0000: heap +00e0 0000 0000 - 2000 0000 0000: - +2000 0000 0000 - 3000 0000 0000: shadow +3000 0000 0000 - 3000 0000 0000: - +3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects) +4000 0000 0000 - 6000 0000 0000: - +6000 0000 0000 - 6200 0000 0000: traces +6200 0000 0000 - 8000 0000 0000: - +*/ + +struct Mapping47 { + static const uptr kMetaShadowBeg = 0x300000000000ull; + static const uptr kMetaShadowEnd = 0x400000000000ull; + static const uptr kTraceMemBeg = 0x600000000000ull; + static const uptr kTraceMemEnd = 0x620000000000ull; + static const uptr kShadowBeg = 0x200000000000ull; + static const uptr kShadowEnd = 0x300000000000ull; + static const uptr kAppMemBeg = 0x000000001000ull; + static const uptr kAppMemEnd = 0x00e000000000ull; +}; + +// Indicates the runtime will define the memory regions at runtime. +#define TSAN_RUNTIME_VMA 1 + #else # error "Unknown platform" #endif @@ -476,7 +533,9 @@ uptr MappingArchImpl(void) { return 0; #elif defined(__powerpc64__) switch (vmaSize) { +#if !SANITIZER_GO case 44: return MappingImpl<Mapping44, Type>(); +#endif case 46: return MappingImpl<Mapping46, Type>(); case 47: return MappingImpl<Mapping47, Type>(); } @@ -631,7 +690,9 @@ bool IsAppMem(uptr mem) { return false; #elif defined(__powerpc64__) switch (vmaSize) { +#if !SANITIZER_GO case 44: return IsAppMemImpl<Mapping44>(mem); +#endif case 46: return IsAppMemImpl<Mapping46>(mem); case 47: return IsAppMemImpl<Mapping47>(mem); } @@ -660,7 +721,9 @@ bool IsShadowMem(uptr mem) { return false; #elif defined(__powerpc64__) switch (vmaSize) { +#if !SANITIZER_GO case 44: return IsShadowMemImpl<Mapping44>(mem); +#endif case 46: return IsShadowMemImpl<Mapping46>(mem); case 47: return IsShadowMemImpl<Mapping47>(mem); } @@ -689,7 +752,9 @@ bool IsMetaMem(uptr mem) { return false; #elif defined(__powerpc64__) switch (vmaSize) { +#if !SANITIZER_GO case 44: return IsMetaMemImpl<Mapping44>(mem); +#endif case 46: return IsMetaMemImpl<Mapping46>(mem); case 47: return IsMetaMemImpl<Mapping47>(mem); } @@ -728,7 +793,9 @@ uptr MemToShadow(uptr x) { return 0; #elif defined(__powerpc64__) switch (vmaSize) { +#if !SANITIZER_GO case 44: return MemToShadowImpl<Mapping44>(x); +#endif case 46: return MemToShadowImpl<Mapping46>(x); case 47: return MemToShadowImpl<Mapping47>(x); } @@ -769,7 +836,9 @@ u32 *MemToMeta(uptr x) { return 0; #elif defined(__powerpc64__) switch (vmaSize) { +#if !SANITIZER_GO case 44: return MemToMetaImpl<Mapping44>(x); +#endif case 46: return MemToMetaImpl<Mapping46>(x); case 47: return MemToMetaImpl<Mapping47>(x); } @@ -823,7 +892,9 @@ uptr ShadowToMem(uptr s) { return 0; #elif defined(__powerpc64__) switch (vmaSize) { +#if !SANITIZER_GO case 44: return ShadowToMemImpl<Mapping44>(s); +#endif case 46: return ShadowToMemImpl<Mapping46>(s); case 47: return ShadowToMemImpl<Mapping47>(s); } @@ -860,7 +931,9 @@ uptr GetThreadTrace(int tid) { return 0; #elif defined(__powerpc64__) switch (vmaSize) { +#if !SANITIZER_GO case 44: return GetThreadTraceImpl<Mapping44>(tid); +#endif case 46: return GetThreadTraceImpl<Mapping46>(tid); case 47: return GetThreadTraceImpl<Mapping47>(tid); } @@ -892,7 +965,9 @@ uptr GetThreadTraceHeader(int tid) { return 0; #elif defined(__powerpc64__) switch (vmaSize) { +#if !SANITIZER_GO case 44: return GetThreadTraceHeaderImpl<Mapping44>(tid); +#endif case 46: return GetThreadTraceHeaderImpl<Mapping46>(tid); case 47: return GetThreadTraceHeaderImpl<Mapping47>(tid); } diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc index e14d5f575a2e..de989b780883 100644 --- a/lib/tsan/rtl/tsan_platform_linux.cc +++ b/lib/tsan/rtl/tsan_platform_linux.cc @@ -168,11 +168,11 @@ static void MapRodata() { fd_t fd = openrv; // Fill the file with kShadowRodata. const uptr kMarkerSize = 512 * 1024 / sizeof(u64); - InternalScopedBuffer<u64> marker(kMarkerSize); + InternalMmapVector<u64> marker(kMarkerSize); // volatile to prevent insertion of memset for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++) *p = kShadowRodata; - internal_write(fd, marker.data(), marker.size()); + internal_write(fd, marker.data(), marker.size() * sizeof(u64)); // Map the file into memory. uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, fd, 0); @@ -191,8 +191,9 @@ static void MapRodata() { // Assume it's .rodata char *shadow_start = (char *)MemToShadow(segment.start); char *shadow_end = (char *)MemToShadow(segment.end); - for (char *p = shadow_start; p < shadow_end; p += marker.size()) { - internal_mmap(p, Min<uptr>(marker.size(), shadow_end - p), + for (char *p = shadow_start; p < shadow_end; + p += marker.size() * sizeof(u64)) { + internal_mmap(p, Min<uptr>(marker.size() * sizeof(u64), shadow_end - p), PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0); } } @@ -213,15 +214,23 @@ void InitializePlatformEarly() { #if defined(__aarch64__) if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) { Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); - Printf("FATAL: Found %d - Supported 39, 42 and 48\n", vmaSize); + Printf("FATAL: Found %zd - Supported 39, 42 and 48\n", vmaSize); Die(); } #elif defined(__powerpc64__) +# if !SANITIZER_GO if (vmaSize != 44 && vmaSize != 46 && vmaSize != 47) { Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); - Printf("FATAL: Found %d - Supported 44, 46, and 47\n", vmaSize); + Printf("FATAL: Found %zd - Supported 44, 46, and 47\n", vmaSize); Die(); } +# else + if (vmaSize != 46 && vmaSize != 47) { + Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); + Printf("FATAL: Found %zd - Supported 46, and 47\n", vmaSize); + Die(); + } +# endif #endif #endif } diff --git a/lib/tsan/rtl/tsan_platform_posix.cc b/lib/tsan/rtl/tsan_platform_posix.cc index e4f90a811c35..c38dcc7f2c1c 100644 --- a/lib/tsan/rtl/tsan_platform_posix.cc +++ b/lib/tsan/rtl/tsan_platform_posix.cc @@ -16,6 +16,7 @@ #if SANITIZER_POSIX #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_procmaps.h" #include "tsan_platform.h" @@ -23,16 +24,39 @@ namespace __tsan { +static const char kShadowMemoryMappingWarning[] = + "FATAL: %s can not madvise shadow region [%zx, %zx] with %s (errno: %d)\n"; +static const char kShadowMemoryMappingHint[] = + "HINT: if %s is not supported in your environment, you may set " + "TSAN_OPTIONS=%s=0\n"; + +static void NoHugePagesInShadow(uptr addr, uptr size) { + if (common_flags()->no_huge_pages_for_shadow) + if (!NoHugePagesInRegion(addr, size)) { + Printf(kShadowMemoryMappingWarning, SanitizerToolName, addr, addr + size, + "MADV_NOHUGEPAGE", errno); + Printf(kShadowMemoryMappingHint, "MADV_NOHUGEPAGE", + "no_huge_pages_for_shadow"); + Die(); + } +} + +static void DontDumpShadow(uptr addr, uptr size) { + if (common_flags()->use_madv_dontdump) + if (!DontDumpShadowMemory(addr, size)) { + Printf(kShadowMemoryMappingWarning, SanitizerToolName, addr, addr + size, + "MADV_DONTDUMP", errno); + Printf(kShadowMemoryMappingHint, "MADV_DONTDUMP", "use_madv_dontdump"); + Die(); + } +} + #if !SANITIZER_GO void InitializeShadowMemory() { // Map memory shadow. - uptr shadow = - (uptr)MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(), - "shadow"); - if (shadow != ShadowBeg()) { + if (!MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(), "shadow")) { Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); - Printf("FATAL: Make sure to compile with -fPIE and " - "to link with -pie (%p, %p).\n", shadow, ShadowBeg()); + Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n"); Die(); } // This memory range is used for thread stacks and large user mmaps. @@ -74,30 +98,23 @@ void InitializeShadowMemory() { DCHECK(0); } #endif - NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg), + NoHugePagesInShadow(MemToShadow(kMadviseRangeBeg), kMadviseRangeSize * kShadowMultiplier); - // Meta shadow is compressing and we don't flush it, - // so it makes sense to mark it as NOHUGEPAGE to not over-allocate memory. - // On one program it reduces memory consumption from 5GB to 2.5GB. - NoHugePagesInRegion(MetaShadowBeg(), MetaShadowEnd() - MetaShadowBeg()); - if (common_flags()->use_madv_dontdump) - DontDumpShadowMemory(ShadowBeg(), ShadowEnd() - ShadowBeg()); + DontDumpShadow(ShadowBeg(), ShadowEnd() - ShadowBeg()); DPrintf("memory shadow: %zx-%zx (%zuGB)\n", ShadowBeg(), ShadowEnd(), (ShadowEnd() - ShadowBeg()) >> 30); // Map meta shadow. - uptr meta_size = MetaShadowEnd() - MetaShadowBeg(); - uptr meta = - (uptr)MmapFixedNoReserve(MetaShadowBeg(), meta_size, "meta shadow"); - if (meta != MetaShadowBeg()) { + const uptr meta = MetaShadowBeg(); + const uptr meta_size = MetaShadowEnd() - meta; + if (!MmapFixedNoReserve(meta, meta_size, "meta shadow")) { Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); - Printf("FATAL: Make sure to compile with -fPIE and " - "to link with -pie (%p, %p).\n", meta, MetaShadowBeg()); + Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n"); Die(); } - if (common_flags()->use_madv_dontdump) - DontDumpShadowMemory(meta, meta_size); + NoHugePagesInShadow(meta, meta_size); + DontDumpShadow(meta, meta_size); DPrintf("meta shadow: %zx-%zx (%zuGB)\n", meta, meta + meta_size, meta_size >> 30); diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc index 17b820977c26..5841222927b3 100644 --- a/lib/tsan/rtl/tsan_rtl.cc +++ b/lib/tsan/rtl/tsan_rtl.cc @@ -105,8 +105,8 @@ Context::Context() , racy_stacks() , racy_addresses() , fired_suppressions_mtx(MutexTypeFired, StatMtxFired) - , fired_suppressions(8) , clock_alloc("clock allocator") { + fired_suppressions.reserve(8); } // The objects are allocated in TLS, so one may rely on zero-initialization. @@ -140,7 +140,7 @@ static void MemoryProfiler(Context *ctx, fd_t fd, int i) { uptr n_threads; uptr n_running_threads; ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); - InternalScopedBuffer<char> buf(4096); + InternalMmapVector<char> buf(4096); WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads); WriteToFile(fd, buf.data(), internal_strlen(buf.data())); } @@ -246,7 +246,8 @@ void MapShadow(uptr addr, uptr size) { const uptr kPageSize = GetPageSizeCached(); uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize); uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize); - MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"); + if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow")) + Die(); // Meta shadow is 2:1, so tread carefully. static bool data_mapped = false; @@ -258,7 +259,8 @@ void MapShadow(uptr addr, uptr size) { if (!data_mapped) { // First call maps data+bss. data_mapped = true; - MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"); + if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow")) + Die(); } else { // Mapping continous heap. // Windows wants 64K alignment. @@ -268,7 +270,8 @@ void MapShadow(uptr addr, uptr size) { return; if (meta_begin < mapped_meta_end) meta_begin = mapped_meta_end; - MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"); + if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow")) + Die(); mapped_meta_end = meta_end; } VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n", @@ -280,10 +283,9 @@ void MapThreadTrace(uptr addr, uptr size, const char *name) { CHECK_GE(addr, TraceMemBeg()); CHECK_LE(addr + size, TraceMemEnd()); CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment - uptr addr1 = (uptr)MmapFixedNoReserve(addr, size, name); - if (addr1 != addr) { - Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n", - addr, size, addr1); + if (!MmapFixedNoReserve(addr, size, name)) { + Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n", + addr, size); Die(); } } @@ -354,6 +356,7 @@ void Initialize(ThreadState *thr) { ctx = new(ctx_placeholder) Context; const char *options = GetEnv(SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS"); CacheBinaryName(); + CheckASLR(); InitializeFlags(&ctx->flags, options); AvoidCVE_2016_2143(); InitializePlatformEarly(); @@ -547,6 +550,10 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) { } void TraceSwitch(ThreadState *thr) { +#if !SANITIZER_GO + if (ctx->after_multithreaded_fork) + return; +#endif thr->nomalloc++; Trace *thr_trace = ThreadTrace(thr->tid); Lock l(&thr_trace->mtx); @@ -918,7 +925,8 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, u64 *p1 = p; p = RoundDown(end, kPageSize); UnmapOrDie((void*)p1, (uptr)p - (uptr)p1); - MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1); + if (!MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1)) + Die(); // Set the ending. while (p < end) { *p++ = val; diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h index 7d44ccae9893..523b69aaa6bc 100644 --- a/lib/tsan/rtl/tsan_rtl.h +++ b/lib/tsan/rtl/tsan_rtl.h @@ -520,7 +520,9 @@ struct Context { Context(); bool initialized; +#if !SANITIZER_GO bool after_multithreaded_fork; +#endif MetaMap metamap; @@ -648,6 +650,10 @@ void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack, ExtractTagFromStack(stack, tag); } +#define GET_STACK_TRACE_FATAL(thr, pc) \ + VarSizeStackTrace stack; \ + ObtainCurrentStack(thr, pc, &stack); \ + stack.ReverseOrder(); #if TSAN_COLLECT_STATS void StatAggregate(u64 *dst, u64 *src); diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc index 152b965ad535..c61d02b7a766 100644 --- a/lib/tsan/rtl/tsan_rtl_mutex.cc +++ b/lib/tsan/rtl/tsan_rtl_mutex.cc @@ -104,7 +104,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { unlock_locked = true; } u64 mid = s->GetId(); - u32 last_lock = s->last_lock; + u64 last_lock = s->last_lock; if (!unlock_locked) s->Reset(thr->proc()); // must not reset it before the report is printed s->mtx.Unlock(); @@ -114,7 +114,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { rep.AddMutex(mid); VarSizeStackTrace trace; ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace); + rep.AddStack(trace, true); FastState last(last_lock); RestoreStack(last.tid(), last.epoch(), &trace, 0); rep.AddStack(trace, true); @@ -361,7 +361,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { if (s->recursion == 0) { StatInc(thr, StatMutexUnlock); s->owner_tid = SyncVar::kInvalidTid; - ReleaseImpl(thr, pc, &s->clock); + ReleaseStoreImpl(thr, pc, &s->clock); } else { StatInc(thr, StatMutexRecUnlock); } diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc index cc582ab50190..febb6cef2d39 100644 --- a/lib/tsan/rtl/tsan_rtl_report.cc +++ b/lib/tsan/rtl/tsan_rtl_report.cc @@ -649,8 +649,8 @@ void ReportRace(ThreadState *thr) { // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit // event. Later we subtract -1 from it (in GetPreviousInstructionPc) // and the resulting PC has kExternalPCBit set, so we pass it to - // __tsan_symbolize_external. __tsan_symbolize_external is within its rights - // to crash since the PC is completely bogus. + // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its + // rights to crash since the PC is completely bogus. // test/tsan/double_race.cc contains a test case for this. toppc = 0; } diff --git a/lib/tsan/rtl/tsan_stack_trace.cc b/lib/tsan/rtl/tsan_stack_trace.cc index ceca3f8e8738..a0dee19e246e 100644 --- a/lib/tsan/rtl/tsan_stack_trace.cc +++ b/lib/tsan/rtl/tsan_stack_trace.cc @@ -43,4 +43,9 @@ void VarSizeStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) { trace_buffer[cnt] = extra_top_pc; } +void VarSizeStackTrace::ReverseOrder() { + for (u32 i = 0; i < (size >> 1); i++) + Swap(trace_buffer[i], trace_buffer[size - 1 - i]); +} + } // namespace __tsan diff --git a/lib/tsan/rtl/tsan_stack_trace.h b/lib/tsan/rtl/tsan_stack_trace.h index 5bf89bb7584f..f69b57464bcf 100644 --- a/lib/tsan/rtl/tsan_stack_trace.h +++ b/lib/tsan/rtl/tsan_stack_trace.h @@ -27,6 +27,10 @@ struct VarSizeStackTrace : public StackTrace { ~VarSizeStackTrace(); void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0); + // Reverses the current stack trace order, the top frame goes to the bottom, + // the last frame goes to the top. + void ReverseOrder(); + private: void ResizeBuffer(uptr new_size); diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc index e39702b7d22a..be38f331ad96 100644 --- a/lib/tsan/rtl/tsan_suppressions.cc +++ b/lib/tsan/rtl/tsan_suppressions.cc @@ -152,7 +152,7 @@ uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) { } void PrintMatchedSuppressions() { - InternalMmapVector<Suppression *> matched(1); + InternalMmapVector<Suppression *> matched; CHECK(suppression_ctx); suppression_ctx->GetMatched(&matched); if (!matched.size()) diff --git a/lib/tsan/rtl/tsan_symbolize.cc b/lib/tsan/rtl/tsan_symbolize.cc index b2423951795f..27f0e01c7fbe 100644 --- a/lib/tsan/rtl/tsan_symbolize.cc +++ b/lib/tsan/rtl/tsan_symbolize.cc @@ -36,6 +36,7 @@ void ExitSymbolizer() { thr->ignore_interceptors--; } +// Legacy API. // May be overriden by JIT/JAVA/etc, // whatever produces PCs marked with kExternalPCBit. SANITIZER_WEAK_DEFAULT_IMPL @@ -45,9 +46,49 @@ bool __tsan_symbolize_external(uptr pc, char *func_buf, uptr func_siz, return false; } +// New API: call __tsan_symbolize_external_ex only when it exists. +// Once old clients are gone, provide dummy implementation. +SANITIZER_WEAK_DEFAULT_IMPL +void __tsan_symbolize_external_ex(uptr pc, + void (*add_frame)(void *, const char *, + const char *, int, int), + void *ctx) {} + +struct SymbolizedStackBuilder { + SymbolizedStack *head; + SymbolizedStack *tail; + uptr addr; +}; + +static void AddFrame(void *ctx, const char *function_name, const char *file, + int line, int column) { + SymbolizedStackBuilder *ssb = (struct SymbolizedStackBuilder *)ctx; + if (ssb->tail) { + ssb->tail->next = SymbolizedStack::New(ssb->addr); + ssb->tail = ssb->tail->next; + } else { + ssb->head = ssb->tail = SymbolizedStack::New(ssb->addr); + } + AddressInfo *info = &ssb->tail->info; + if (function_name) { + info->function = internal_strdup(function_name); + } + if (file) { + info->file = internal_strdup(file); + } + info->line = line; + info->column = column; +} + SymbolizedStack *SymbolizeCode(uptr addr) { // Check if PC comes from non-native land. if (addr & kExternalPCBit) { + SymbolizedStackBuilder ssb = {nullptr, nullptr, addr}; + __tsan_symbolize_external_ex(addr, AddFrame, &ssb); + if (ssb.head) + return ssb.head; + // Legacy code: remove along with the declaration above + // once all clients using this API are gone. // Declare static to not consume too much stack space. // We symbolize reports in a single thread, so this is fine. static char func_buf[1024]; diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc index 44ae558fa1b2..ba3953375ee9 100644 --- a/lib/tsan/rtl/tsan_sync.cc +++ b/lib/tsan/rtl/tsan_sync.cc @@ -176,7 +176,8 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) { uptr metap = (uptr)MemToMeta(p0); uptr metasz = sz0 / kMetaRatio; UnmapOrDie((void*)metap, metasz); - MmapFixedNoReserve(metap, metasz); + if (!MmapFixedNoReserve(metap, metasz)) + Die(); } MBlock* MetaMap::GetBlock(uptr p) { diff --git a/lib/tsan/tests/CMakeLists.txt b/lib/tsan/tests/CMakeLists.txt index ad8d02ed331f..352319f10109 100644 --- a/lib/tsan/tests/CMakeLists.txt +++ b/lib/tsan/tests/CMakeLists.txt @@ -22,6 +22,8 @@ if(APPLE) $<TARGET_OBJECTS:RTInterception.osx> $<TARGET_OBJECTS:RTSanitizerCommon.osx> $<TARGET_OBJECTS:RTSanitizerCommonLibc.osx> + $<TARGET_OBJECTS:RTSanitizerCommonCoverage.osx> + $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.osx> $<TARGET_OBJECTS:RTUbsan.osx>) set(TSAN_TEST_RUNTIME RTTsanTest) add_library(${TSAN_TEST_RUNTIME} STATIC ${TSAN_TEST_RUNTIME_OBJECTS}) @@ -32,6 +34,7 @@ if(APPLE) list(APPEND TSAN_UNITTEST_CFLAGS ${DARWIN_osx_CFLAGS}) set(LINK_FLAGS "-lc++") + list(APPEND LINK_FLAGS ${DARWIN_osx_LINK_FLAGS}) add_weak_symbols("ubsan" LINK_FLAGS) add_weak_symbols("sanitizer_common" LINK_FLAGS) else() diff --git a/lib/tsan/tests/unit/tsan_mman_test.cc b/lib/tsan/tests/unit/tsan_mman_test.cc index 05ae4286704c..26e13a55ce1c 100644 --- a/lib/tsan/tests/unit/tsan_mman_test.cc +++ b/lib/tsan/tests/unit/tsan_mman_test.cc @@ -153,29 +153,12 @@ TEST(Mman, Valloc) { EXPECT_NE(p, (void*)0); EXPECT_EQ(page_size, __sanitizer_get_allocated_size(p)); user_free(thr, 0, p); - - EXPECT_DEATH(p = user_pvalloc(thr, 0, (uptr)-(page_size - 1)), - "allocator is terminating the process instead of returning 0"); - EXPECT_DEATH(p = user_pvalloc(thr, 0, (uptr)-1), - "allocator is terminating the process instead of returning 0"); } #if !SANITIZER_DEBUG // EXPECT_DEATH clones a thread with 4K stack, // which is overflown by tsan memory accesses functions in debug mode. -TEST(Mman, CallocOverflow) { - ThreadState *thr = cur_thread(); - uptr pc = 0; - size_t kArraySize = 4096; - volatile size_t kMaxSizeT = std::numeric_limits<size_t>::max(); - volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10; - volatile void *p = NULL; - EXPECT_DEATH(p = user_calloc(thr, pc, kArraySize, kArraySize2), - "allocator is terminating the process instead of returning 0"); - EXPECT_EQ(0L, p); -} - TEST(Mman, Memalign) { ThreadState *thr = cur_thread(); @@ -183,12 +166,16 @@ TEST(Mman, Memalign) { EXPECT_NE(p, (void*)0); user_free(thr, 0, p); + // TODO(alekseyshl): Remove this death test when memalign is verified by + // tests in sanitizer_common. p = NULL; EXPECT_DEATH(p = user_memalign(thr, 0, 7, 100), - "allocator is terminating the process instead of returning 0"); + "invalid-allocation-alignment"); EXPECT_EQ(0L, p); } +#endif + TEST(Mman, PosixMemalign) { ThreadState *thr = cur_thread(); @@ -197,16 +184,6 @@ TEST(Mman, PosixMemalign) { EXPECT_NE(p, (void*)0); EXPECT_EQ(res, 0); user_free(thr, 0, p); - - p = NULL; - // Alignment is not a power of two, although is a multiple of sizeof(void*). - EXPECT_DEATH(res = user_posix_memalign(thr, 0, &p, 3 * sizeof(p), 100), - "allocator is terminating the process instead of returning 0"); - EXPECT_EQ(0L, p); - // Alignment is not a multiple of sizeof(void*), although is a power of 2. - EXPECT_DEATH(res = user_posix_memalign(thr, 0, &p, 2, 100), - "allocator is terminating the process instead of returning 0"); - EXPECT_EQ(0L, p); } TEST(Mman, AlignedAlloc) { @@ -215,18 +192,6 @@ TEST(Mman, AlignedAlloc) { void *p = user_aligned_alloc(thr, 0, 8, 64); EXPECT_NE(p, (void*)0); user_free(thr, 0, p); - - p = NULL; - // Alignement is not a power of 2. - EXPECT_DEATH(p = user_aligned_alloc(thr, 0, 7, 100), - "allocator is terminating the process instead of returning 0"); - EXPECT_EQ(0L, p); - // Size is not a multiple of alignment. - EXPECT_DEATH(p = user_aligned_alloc(thr, 0, 8, 100), - "allocator is terminating the process instead of returning 0"); - EXPECT_EQ(0L, p); } -#endif - } // namespace __tsan diff --git a/lib/ubsan/CMakeLists.txt b/lib/ubsan/CMakeLists.txt index ea4f6e895ec0..ab118ae4a521 100644 --- a/lib/ubsan/CMakeLists.txt +++ b/lib/ubsan/CMakeLists.txt @@ -5,21 +5,35 @@ set(UBSAN_SOURCES ubsan_init.cc ubsan_flags.cc ubsan_handlers.cc - ubsan_value.cc - ) + ubsan_monitor.cc + ubsan_value.cc) set(UBSAN_STANDALONE_SOURCES ubsan_diag_standalone.cc ubsan_init_standalone.cc - ubsan_signals_standalone.cc - ) + ubsan_signals_standalone.cc) set(UBSAN_CXXABI_SOURCES ubsan_handlers_cxx.cc ubsan_type_hash.cc ubsan_type_hash_itanium.cc - ubsan_type_hash_win.cc - ) + ubsan_type_hash_win.cc) + +set(UBSAN_HEADERS + ubsan_checks.inc + ubsan_diag.h + ubsan_flags.h + ubsan_flags.inc + ubsan_handlers.h + ubsan_handlers_cxx.h + ubsan_init.h + ubsan_interface.inc + ubsan_monitor.h + ubsan_platform.h + ubsan_signals_standalone.h + ubsan_type_hash.h + ubsan_value.h +) include_directories(..) @@ -55,6 +69,7 @@ if(APPLE) OS ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${UBSAN_COMMON_SUPPORTED_ARCH} SOURCES ${UBSAN_COMMON_SOURCES} + ADDITIONAL_HEADERS ${UBSAN_HEADERS} CFLAGS ${UBSAN_CXXFLAGS}) if(COMPILER_RT_HAS_UBSAN) @@ -63,6 +78,7 @@ if(APPLE) OS ${SANITIZER_COMMON_SUPPORTED_OS} ARCHS ${UBSAN_SUPPORTED_ARCH} SOURCES ${UBSAN_STANDALONE_SOURCES} + ADDITIONAL_HEADERS ${UBSAN_HEADERS} CFLAGS ${UBSAN_STANDALONE_CFLAGS}) add_weak_symbols("ubsan" WEAK_SYMBOL_LINK_FLAGS) @@ -76,6 +92,8 @@ if(APPLE) RTUbsan_standalone RTSanitizerCommon RTSanitizerCommonLibc + RTSanitizerCommonCoverage + RTSanitizerCommonSymbolizer RTInterception LINK_FLAGS ${WEAK_SYMBOL_LINK_FLAGS} PARENT_TARGET ubsan) @@ -88,6 +106,8 @@ if(APPLE) RTUbsan_standalone RTSanitizerCommonNoHooks RTSanitizerCommonLibcNoHooks + RTSanitizerCommonCoverage + RTSanitizerCommonSymbolizerNoHooks RTInterception LINK_FLAGS ${WEAK_SYMBOL_LINK_FLAGS} PARENT_TARGET ubsan) @@ -97,7 +117,9 @@ else() # Common parts of UBSan runtime. add_compiler_rt_object_libraries(RTUbsan ARCHS ${UBSAN_COMMON_SUPPORTED_ARCH} - SOURCES ${UBSAN_SOURCES} CFLAGS ${UBSAN_CFLAGS}) + SOURCES ${UBSAN_SOURCES} + ADDITIONAL_HEADERS ${UBSAN_HEADERS} + CFLAGS ${UBSAN_CFLAGS}) if(SANITIZER_CAN_USE_CXXABI) # C++-specific parts of UBSan runtime. Requires a C++ ABI library. @@ -110,7 +132,9 @@ else() add_compiler_rt_object_libraries(RTUbsan_cxx ARCHS ${UBSAN_COMMON_SUPPORTED_ARCH} - SOURCES ${UBSAN_CXX_SOURCES} CFLAGS ${UBSAN_CXXFLAGS}) + SOURCES ${UBSAN_CXX_SOURCES} + ADDITIONAL_HEADERS ${UBSAN_HEADERS} + CFLAGS ${UBSAN_CXXFLAGS}) if (WIN32) add_compiler_rt_object_libraries(UbsanWeakInterception @@ -145,14 +169,19 @@ else() add_compiler_rt_object_libraries(RTUbsan_standalone ARCHS ${UBSAN_SUPPORTED_ARCH} SOURCES ${UBSAN_STANDALONE_SOURCES} + ADDITIONAL_HEADERS ${UBSAN_HEADERS} CFLAGS ${UBSAN_STANDALONE_CFLAGS}) # Standalone UBSan runtimes. add_compiler_rt_runtime(clang_rt.ubsan_standalone STATIC ARCHS ${UBSAN_SUPPORTED_ARCH} + SOURCES ubsan_init_standalone_preinit.cc + ADDITIONAL_HEADERS ${UBSAN_HEADERS} OBJECT_LIBS RTSanitizerCommon RTSanitizerCommonLibc + RTSanitizerCommonCoverage + RTSanitizerCommonSymbolizer RTUbsan RTUbsan_standalone RTInterception @@ -166,12 +195,14 @@ else() CFLAGS ${UBSAN_CXXFLAGS} PARENT_TARGET ubsan) - if (UNIX) + if (FUCHSIA OR UNIX) add_compiler_rt_runtime(clang_rt.ubsan_standalone SHARED ARCHS ${UBSAN_SUPPORTED_ARCH} OBJECT_LIBS RTSanitizerCommon RTSanitizerCommonLibc + RTSanitizerCommonCoverage + RTSanitizerCommonSymbolizer RTUbsan RTUbsan_cxx RTUbsan_standalone diff --git a/lib/ubsan/ubsan_diag.cc b/lib/ubsan/ubsan_diag.cc index d5b68407bb37..df4f13cbe2f5 100644 --- a/lib/ubsan/ubsan_diag.cc +++ b/lib/ubsan/ubsan_diag.cc @@ -16,6 +16,7 @@ #include "ubsan_diag.h" #include "ubsan_init.h" #include "ubsan_flags.h" +#include "ubsan_monitor.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_stacktrace.h" @@ -340,6 +341,13 @@ Diag::~Diag() { Decorator Decor; InternalScopedString Buffer(1024); + // Prepare a report that a monitor process can inspect. + if (Level == DL_Error) { + RenderText(&Buffer, Message, Args); + UndefinedBehaviorReport UBR{ConvertTypeToString(ET), Loc, Buffer}; + Buffer.clear(); + } + Buffer.append(Decor.Bold()); RenderLocation(&Buffer, Loc); Buffer.append(":"); diff --git a/lib/ubsan/ubsan_diag.h b/lib/ubsan/ubsan_diag.h index 7370b1b62421..d9ea410c358c 100644 --- a/lib/ubsan/ubsan_diag.h +++ b/lib/ubsan/ubsan_diag.h @@ -121,6 +121,12 @@ public: const char *getName() const { return Name; } }; +enum class ErrorType { +#define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) Name, +#include "ubsan_checks.inc" +#undef UBSAN_CHECK +}; + /// \brief Representation of an in-flight diagnostic. /// /// Temporary \c Diag instances are created by the handler routines to @@ -133,6 +139,9 @@ class Diag { /// The diagnostic level. DiagLevel Level; + /// The error type. + ErrorType ET; + /// The message which will be emitted, with %0, %1, ... placeholders for /// arguments. const char *Message; @@ -197,8 +206,9 @@ private: Diag &operator=(const Diag &); public: - Diag(Location Loc, DiagLevel Level, const char *Message) - : Loc(Loc), Level(Level), Message(Message), NumArgs(0), NumRanges(0) {} + Diag(Location Loc, DiagLevel Level, ErrorType ET, const char *Message) + : Loc(Loc), Level(Level), ET(ET), Message(Message), NumArgs(0), + NumRanges(0) {} ~Diag(); Diag &operator<<(const char *Str) { return AddArg(Str); } @@ -219,12 +229,6 @@ struct ReportOptions { uptr bp; }; -enum class ErrorType { -#define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) Name, -#include "ubsan_checks.inc" -#undef UBSAN_CHECK -}; - bool ignoreReport(SourceLocation SLoc, ReportOptions Opts, ErrorType ET); #define GET_REPORT_OPTIONS(unrecoverable_handler) \ diff --git a/lib/ubsan/ubsan_flags.cc b/lib/ubsan/ubsan_flags.cc index c4fb278c8367..7b6784b8278f 100644 --- a/lib/ubsan/ubsan_flags.cc +++ b/lib/ubsan/ubsan_flags.cc @@ -26,6 +26,15 @@ const char *MaybeCallUbsanDefaultOptions() { return (&__ubsan_default_options) ? __ubsan_default_options() : ""; } +static const char *GetFlag(const char *flag) { + // We cannot call getenv() from inside a preinit array initializer + if (SANITIZER_CAN_USE_PREINIT_ARRAY) { + return GetEnv(flag); + } else { + return getenv(flag); + } +} + Flags ubsan_flags; void Flags::SetDefaults() { @@ -47,7 +56,7 @@ void InitializeFlags() { CommonFlags cf; cf.CopyFrom(*common_flags()); cf.print_summary = false; - cf.external_symbolizer_path = getenv("UBSAN_SYMBOLIZER_PATH"); + cf.external_symbolizer_path = GetFlag("UBSAN_SYMBOLIZER_PATH"); OverrideCommonFlags(cf); } @@ -61,7 +70,7 @@ void InitializeFlags() { // Override from user-specified string. parser.ParseString(MaybeCallUbsanDefaultOptions()); // Override from environment variable. - parser.ParseString(getenv("UBSAN_OPTIONS")); + parser.ParseString(GetFlag("UBSAN_OPTIONS")); InitializeCommonFlags(); if (Verbosity()) ReportUnrecognizedFlags(); diff --git a/lib/ubsan/ubsan_flags.inc b/lib/ubsan/ubsan_flags.inc index d171a98e1730..1638a054e8f0 100644 --- a/lib/ubsan/ubsan_flags.inc +++ b/lib/ubsan/ubsan_flags.inc @@ -24,3 +24,6 @@ UBSAN_FLAG(bool, print_stacktrace, false, UBSAN_FLAG(const char *, suppressions, "", "Suppressions file name.") UBSAN_FLAG(bool, report_error_type, false, "Print specific error type instead of 'undefined-behavior' in summary.") +UBSAN_FLAG(bool, silence_unsigned_overflow, false, + "Do not print error reports for unsigned integer overflow. " + "Used to provide fuzzing signal without blowing up logs.") diff --git a/lib/ubsan/ubsan_handlers.cc b/lib/ubsan/ubsan_handlers.cc index bcda746a1783..49c6bcacbe27 100644 --- a/lib/ubsan/ubsan_handlers.cc +++ b/lib/ubsan/ubsan_handlers.cc @@ -15,6 +15,8 @@ #if CAN_SANITIZE_UB #include "ubsan_handlers.h" #include "ubsan_diag.h" +#include "ubsan_flags.h" +#include "ubsan_monitor.h" #include "sanitizer_common/sanitizer_common.h" @@ -70,17 +72,17 @@ static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer, switch (ET) { case ErrorType::NullPointerUse: - Diag(Loc, DL_Error, "%0 null pointer of type %1") + Diag(Loc, DL_Error, ET, "%0 null pointer of type %1") << TypeCheckKinds[Data->TypeCheckKind] << Data->Type; break; case ErrorType::MisalignedPointerUse: - Diag(Loc, DL_Error, "%0 misaligned address %1 for type %3, " + Diag(Loc, DL_Error, ET, "%0 misaligned address %1 for type %3, " "which requires %2 byte alignment") << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer << Alignment << Data->Type; break; case ErrorType::InsufficientObjectSize: - Diag(Loc, DL_Error, "%0 address %1 with insufficient space " + Diag(Loc, DL_Error, ET, "%0 address %1 with insufficient space " "for an object of type %2") << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer << Data->Type; break; @@ -89,7 +91,7 @@ static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer, } if (Pointer) - Diag(Pointer, DL_Note, "pointer points here"); + Diag(Pointer, DL_Note, ET, "pointer points here"); } void __ubsan::__ubsan_handle_type_mismatch_v1(TypeMismatchData *Data, @@ -117,12 +119,15 @@ static void handleIntegerOverflowImpl(OverflowData *Data, ValueHandle LHS, if (ignoreReport(Loc, Opts, ET)) return; + if (!IsSigned && flags()->silence_unsigned_overflow) + return; + ScopedReport R(Opts, Loc, ET); - Diag(Loc, DL_Error, "%0 integer overflow: " - "%1 %2 %3 cannot be represented in type %4") - << (IsSigned ? "signed" : "unsigned") - << Value(Data->Type, LHS) << Operator << RHS << Data->Type; + Diag(Loc, DL_Error, ET, "%0 integer overflow: " + "%1 %2 %3 cannot be represented in type %4") + << (IsSigned ? "signed" : "unsigned") << Value(Data->Type, LHS) + << Operator << RHS << Data->Type; } #define UBSAN_OVERFLOW_HANDLER(handler_name, op, unrecoverable) \ @@ -151,15 +156,18 @@ static void handleNegateOverflowImpl(OverflowData *Data, ValueHandle OldVal, if (ignoreReport(Loc, Opts, ET)) return; + if (!IsSigned && flags()->silence_unsigned_overflow) + return; + ScopedReport R(Opts, Loc, ET); if (IsSigned) - Diag(Loc, DL_Error, + Diag(Loc, DL_Error, ET, "negation of %0 cannot be represented in type %1; " "cast to an unsigned type to negate this value to itself") << Value(Data->Type, OldVal) << Data->Type; else - Diag(Loc, DL_Error, "negation of %0 cannot be represented in type %1") + Diag(Loc, DL_Error, ET, "negation of %0 cannot be represented in type %1") << Value(Data->Type, OldVal) << Data->Type; } @@ -196,11 +204,12 @@ static void handleDivremOverflowImpl(OverflowData *Data, ValueHandle LHS, switch (ET) { case ErrorType::SignedIntegerOverflow: - Diag(Loc, DL_Error, "division of %0 by -1 cannot be represented in type %1") + Diag(Loc, DL_Error, ET, + "division of %0 by -1 cannot be represented in type %1") << LHSVal << Data->Type; break; default: - Diag(Loc, DL_Error, "division by zero"); + Diag(Loc, DL_Error, ET, "division by zero"); break; } } @@ -239,15 +248,16 @@ static void handleShiftOutOfBoundsImpl(ShiftOutOfBoundsData *Data, if (ET == ErrorType::InvalidShiftExponent) { if (RHSVal.isNegative()) - Diag(Loc, DL_Error, "shift exponent %0 is negative") << RHSVal; + Diag(Loc, DL_Error, ET, "shift exponent %0 is negative") << RHSVal; else - Diag(Loc, DL_Error, "shift exponent %0 is too large for %1-bit type %2") + Diag(Loc, DL_Error, ET, + "shift exponent %0 is too large for %1-bit type %2") << RHSVal << Data->LHSType.getIntegerBitWidth() << Data->LHSType; } else { if (LHSVal.isNegative()) - Diag(Loc, DL_Error, "left shift of negative value %0") << LHSVal; + Diag(Loc, DL_Error, ET, "left shift of negative value %0") << LHSVal; else - Diag(Loc, DL_Error, + Diag(Loc, DL_Error, ET, "left shift of %0 by %1 places cannot be represented in type %2") << LHSVal << RHSVal << Data->LHSType; } @@ -279,7 +289,7 @@ static void handleOutOfBoundsImpl(OutOfBoundsData *Data, ValueHandle Index, ScopedReport R(Opts, Loc, ET); Value IndexVal(Data->IndexType, Index); - Diag(Loc, DL_Error, "index %0 out of bounds for type %1") + Diag(Loc, DL_Error, ET, "index %0 out of bounds for type %1") << IndexVal << Data->ArrayType; } @@ -297,8 +307,10 @@ void __ubsan::__ubsan_handle_out_of_bounds_abort(OutOfBoundsData *Data, static void handleBuiltinUnreachableImpl(UnreachableData *Data, ReportOptions Opts) { - ScopedReport R(Opts, Data->Loc, ErrorType::UnreachableCall); - Diag(Data->Loc, DL_Error, "execution reached an unreachable program point"); + ErrorType ET = ErrorType::UnreachableCall; + ScopedReport R(Opts, Data->Loc, ET); + Diag(Data->Loc, DL_Error, ET, + "execution reached an unreachable program point"); } void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) { @@ -308,8 +320,9 @@ void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) { } static void handleMissingReturnImpl(UnreachableData *Data, ReportOptions Opts) { - ScopedReport R(Opts, Data->Loc, ErrorType::MissingReturn); - Diag(Data->Loc, DL_Error, + ErrorType ET = ErrorType::MissingReturn; + ScopedReport R(Opts, Data->Loc, ET); + Diag(Data->Loc, DL_Error, ET, "execution reached the end of a value-returning function " "without returning a value"); } @@ -330,9 +343,9 @@ static void handleVLABoundNotPositive(VLABoundData *Data, ValueHandle Bound, ScopedReport R(Opts, Loc, ET); - Diag(Loc, DL_Error, "variable length array bound evaluates to " - "non-positive value %0") - << Value(Data->Type, Bound); + Diag(Loc, DL_Error, ET, "variable length array bound evaluates to " + "non-positive value %0") + << Value(Data->Type, Bound); } void __ubsan::__ubsan_handle_vla_bound_not_positive(VLABoundData *Data, @@ -390,7 +403,7 @@ static void handleFloatCastOverflow(void *DataPtr, ValueHandle From, ScopedReport R(Opts, Loc, ET); - Diag(Loc, DL_Error, + Diag(Loc, DL_Error, ET, "%0 is outside the range of representable values of type %2") << Value(*FromType, From) << *FromType << *ToType; } @@ -421,9 +434,9 @@ static void handleLoadInvalidValue(InvalidValueData *Data, ValueHandle Val, ScopedReport R(Opts, Loc, ET); - Diag(Loc, DL_Error, + Diag(Loc, DL_Error, ET, "load of value %0, which is not a valid value for type %1") - << Value(Data->Type, Val) << Data->Type; + << Value(Data->Type, Val) << Data->Type; } void __ubsan::__ubsan_handle_load_invalid_value(InvalidValueData *Data, @@ -447,7 +460,7 @@ static void handleInvalidBuiltin(InvalidBuiltinData *Data, ReportOptions Opts) { ScopedReport R(Opts, Loc, ET); - Diag(Loc, DL_Error, + Diag(Loc, DL_Error, ET, "passing zero to %0, which is not a valid argument") << ((Data->Kind == BCK_CTZPassedZero) ? "ctz()" : "clz()"); } @@ -478,10 +491,10 @@ static void handleFunctionTypeMismatch(FunctionTypeMismatchData *Data, if (!FName) FName = "(unknown)"; - Diag(CallLoc, DL_Error, + Diag(CallLoc, DL_Error, ET, "call to function %0 through pointer to incorrect function type %1") << FName << Data->Type; - Diag(FLoc, DL_Note, "%0 defined here") << FName; + Diag(FLoc, DL_Note, ET, "%0 defined here") << FName; } void @@ -511,10 +524,10 @@ static void handleNonNullReturn(NonNullReturnData *Data, SourceLocation *LocPtr, ScopedReport R(Opts, Loc, ET); - Diag(Loc, DL_Error, "null pointer returned from function declared to never " - "return null"); + Diag(Loc, DL_Error, ET, + "null pointer returned from function declared to never return null"); if (!Data->AttrLoc.isInvalid()) - Diag(Data->AttrLoc, DL_Note, "%0 specified here") + Diag(Data->AttrLoc, DL_Note, ET, "%0 specified here") << (IsAttr ? "returns_nonnull attribute" : "_Nonnull return type annotation"); } @@ -555,12 +568,12 @@ static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts, ScopedReport R(Opts, Loc, ET); - Diag(Loc, DL_Error, + Diag(Loc, DL_Error, ET, "null pointer passed as argument %0, which is declared to " "never be null") << Data->ArgIndex; if (!Data->AttrLoc.isInvalid()) - Diag(Data->AttrLoc, DL_Note, "%0 specified here") + Diag(Data->AttrLoc, DL_Note, ET, "%0 specified here") << (IsAttr ? "nonnull attribute" : "_Nonnull type annotation"); } @@ -600,14 +613,15 @@ static void handlePointerOverflowImpl(PointerOverflowData *Data, if ((sptr(Base) >= 0) == (sptr(Result) >= 0)) { if (Base > Result) - Diag(Loc, DL_Error, "addition of unsigned offset to %0 overflowed to %1") + Diag(Loc, DL_Error, ET, + "addition of unsigned offset to %0 overflowed to %1") << (void *)Base << (void *)Result; else - Diag(Loc, DL_Error, + Diag(Loc, DL_Error, ET, "subtraction of unsigned offset from %0 overflowed to %1") << (void *)Base << (void *)Result; } else { - Diag(Loc, DL_Error, + Diag(Loc, DL_Error, ET, "pointer index expression with base %0 overflowed to %1") << (void *)Base << (void *)Result; } @@ -630,7 +644,7 @@ void __ubsan::__ubsan_handle_pointer_overflow_abort(PointerOverflowData *Data, static void handleCFIBadIcall(CFICheckFailData *Data, ValueHandle Function, ReportOptions Opts) { - if (Data->CheckKind != CFITCK_ICall) + if (Data->CheckKind != CFITCK_ICall && Data->CheckKind != CFITCK_NVMFCall) Die(); SourceLocation Loc = Data->Loc.acquire(); @@ -641,15 +655,33 @@ static void handleCFIBadIcall(CFICheckFailData *Data, ValueHandle Function, ScopedReport R(Opts, Loc, ET); - Diag(Loc, DL_Error, "control flow integrity check for type %0 failed during " - "indirect function call") - << Data->Type; + const char *CheckKindStr = Data->CheckKind == CFITCK_NVMFCall + ? "non-virtual pointer to member function call" + : "indirect function call"; + Diag(Loc, DL_Error, ET, + "control flow integrity check for type %0 failed during %1") + << Data->Type << CheckKindStr; SymbolizedStackHolder FLoc(getSymbolizedLocation(Function)); const char *FName = FLoc.get()->info.function; if (!FName) FName = "(unknown)"; - Diag(FLoc, DL_Note, "%0 defined here") << FName; + Diag(FLoc, DL_Note, ET, "%0 defined here") << FName; + + // If the failure involved different DSOs for the check location and icall + // target, report the DSO names. + const char *DstModule = FLoc.get()->info.module; + if (!DstModule) + DstModule = "(unknown)"; + + const char *SrcModule = Symbolizer::GetOrInit()->GetModuleNameForPc(Opts.pc); + if (!SrcModule) + SrcModule = "(unknown)"; + + if (internal_strcmp(SrcModule, DstModule)) + Diag(Loc, DL_Note, ET, + "check failed in %0, destination function located in %1") + << SrcModule << DstModule; } namespace __ubsan { @@ -685,7 +717,7 @@ void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data, ValueHandle Value, uptr ValidVtable) { GET_REPORT_OPTIONS(false); - if (Data->CheckKind == CFITCK_ICall) + if (Data->CheckKind == CFITCK_ICall || Data->CheckKind == CFITCK_NVMFCall) handleCFIBadIcall(Data, Value, Opts); else __ubsan_handle_cfi_bad_type(Data, Value, ValidVtable, Opts); @@ -695,7 +727,7 @@ void __ubsan::__ubsan_handle_cfi_check_fail_abort(CFICheckFailData *Data, ValueHandle Value, uptr ValidVtable) { GET_REPORT_OPTIONS(true); - if (Data->CheckKind == CFITCK_ICall) + if (Data->CheckKind == CFITCK_ICall || Data->CheckKind == CFITCK_NVMFCall) handleCFIBadIcall(Data, Value, Opts); else __ubsan_handle_cfi_bad_type(Data, Value, ValidVtable, Opts); diff --git a/lib/ubsan/ubsan_handlers.h b/lib/ubsan/ubsan_handlers.h index 311776b9f22c..ed3c8f0b1b0e 100644 --- a/lib/ubsan/ubsan_handlers.h +++ b/lib/ubsan/ubsan_handlers.h @@ -181,6 +181,8 @@ enum CFITypeCheckKind : unsigned char { CFITCK_DerivedCast, CFITCK_UnrelatedCast, CFITCK_ICall, + CFITCK_NVMFCall, + CFITCK_VMFCall, }; struct CFICheckFailData { diff --git a/lib/ubsan/ubsan_handlers_cxx.cc b/lib/ubsan/ubsan_handlers_cxx.cc index e15abc64ecca..85a3e8dad4c5 100644 --- a/lib/ubsan/ubsan_handlers_cxx.cc +++ b/lib/ubsan/ubsan_handlers_cxx.cc @@ -50,29 +50,30 @@ static bool HandleDynamicTypeCacheMiss( ScopedReport R(Opts, Loc, ET); - Diag(Loc, DL_Error, + Diag(Loc, DL_Error, ET, "%0 address %1 which does not point to an object of type %2") << TypeCheckKinds[Data->TypeCheckKind] << (void*)Pointer << Data->Type; // If possible, say what type it actually points to. if (!DTI.isValid()) { if (DTI.getOffset() < -VptrMaxOffsetToTop || DTI.getOffset() > VptrMaxOffsetToTop) { - Diag(Pointer, DL_Note, "object has a possibly invalid vptr: abs(offset to top) too big") + Diag(Pointer, DL_Note, ET, + "object has a possibly invalid vptr: abs(offset to top) too big") << TypeName(DTI.getMostDerivedTypeName()) << Range(Pointer, Pointer + sizeof(uptr), "possibly invalid vptr"); } else { - Diag(Pointer, DL_Note, "object has invalid vptr") + Diag(Pointer, DL_Note, ET, "object has invalid vptr") << TypeName(DTI.getMostDerivedTypeName()) << Range(Pointer, Pointer + sizeof(uptr), "invalid vptr"); } } else if (!DTI.getOffset()) - Diag(Pointer, DL_Note, "object is of type %0") + Diag(Pointer, DL_Note, ET, "object is of type %0") << TypeName(DTI.getMostDerivedTypeName()) << Range(Pointer, Pointer + sizeof(uptr), "vptr for %0"); else // FIXME: Find the type at the specified offset, and include that // in the note. - Diag(Pointer - DTI.getOffset(), DL_Note, + Diag(Pointer - DTI.getOffset(), DL_Note, ET, "object is base class subobject at offset %0 within object of type %1") << DTI.getOffset() << TypeName(DTI.getMostDerivedTypeName()) << TypeName(DTI.getSubobjectTypeName()) @@ -122,25 +123,39 @@ void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable, case CFITCK_UnrelatedCast: CheckKindStr = "cast to unrelated type"; break; + case CFITCK_VMFCall: + CheckKindStr = "virtual pointer to member function call"; + break; case CFITCK_ICall: + case CFITCK_NVMFCall: Die(); } - Diag(Loc, DL_Error, "control flow integrity check for type %0 failed during " - "%1 (vtable address %2)") + Diag(Loc, DL_Error, ET, + "control flow integrity check for type %0 failed during " + "%1 (vtable address %2)") << Data->Type << CheckKindStr << (void *)Vtable; // If possible, say what type it actually points to. - if (!DTI.isValid()) { - const char *module = Symbolizer::GetOrInit()->GetModuleNameForPc(Vtable); - if (module) - Diag(Vtable, DL_Note, "invalid vtable in module %0") << module; - else - Diag(Vtable, DL_Note, "invalid vtable"); - } else { - Diag(Vtable, DL_Note, "vtable is of type %0") + if (!DTI.isValid()) + Diag(Vtable, DL_Note, ET, "invalid vtable"); + else + Diag(Vtable, DL_Note, ET, "vtable is of type %0") << TypeName(DTI.getMostDerivedTypeName()); - } + + // If the failure involved different DSOs for the check location and vtable, + // report the DSO names. + const char *DstModule = Symbolizer::GetOrInit()->GetModuleNameForPc(Vtable); + if (!DstModule) + DstModule = "(unknown)"; + + const char *SrcModule = Symbolizer::GetOrInit()->GetModuleNameForPc(Opts.pc); + if (!SrcModule) + SrcModule = "(unknown)"; + + if (internal_strcmp(SrcModule, DstModule)) + Diag(Loc, DL_Note, ET, "check failed in %0, vtable located in %1") + << SrcModule << DstModule; } } // namespace __ubsan diff --git a/lib/ubsan/ubsan_init_standalone_preinit.cc b/lib/ubsan/ubsan_init_standalone_preinit.cc new file mode 100644 index 000000000000..5e75c17ae79a --- /dev/null +++ b/lib/ubsan/ubsan_init_standalone_preinit.cc @@ -0,0 +1,36 @@ +//===-- ubsan_init_standalone_preinit.cc ---------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Initialization of standalone UBSan runtime. +// +//===----------------------------------------------------------------------===// + +#include "ubsan_platform.h" +#if !CAN_SANITIZE_UB +#error "UBSan is not supported on this platform!" +#endif + +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "ubsan_init.h" +#include "ubsan_signals_standalone.h" + +#if SANITIZER_CAN_USE_PREINIT_ARRAY + +namespace __ubsan { + +static void PreInitAsStandalone() { + InitAsStandalone(); + InitializeDeadlySignals(); +} + +} // namespace __ubsan + +__attribute__((section(".preinit_array"), used)) void (*__local_ubsan_preinit)( + void) = __ubsan::PreInitAsStandalone; +#endif // SANITIZER_CAN_USE_PREINIT_ARRAY diff --git a/lib/ubsan/ubsan_interface.inc b/lib/ubsan/ubsan_interface.inc index 9beb3e2ff953..782c621a2b47 100644 --- a/lib/ubsan/ubsan_interface.inc +++ b/lib/ubsan/ubsan_interface.inc @@ -52,3 +52,5 @@ INTERFACE_FUNCTION(__ubsan_handle_type_mismatch_v1_abort) INTERFACE_FUNCTION(__ubsan_handle_vla_bound_not_positive) INTERFACE_FUNCTION(__ubsan_handle_vla_bound_not_positive_abort) INTERFACE_WEAK_FUNCTION(__ubsan_default_options) +INTERFACE_FUNCTION(__ubsan_on_report) +INTERFACE_FUNCTION(__ubsan_get_current_report_data) diff --git a/lib/ubsan/ubsan_monitor.cc b/lib/ubsan/ubsan_monitor.cc new file mode 100644 index 000000000000..e2b39845ce3c --- /dev/null +++ b/lib/ubsan/ubsan_monitor.cc @@ -0,0 +1,76 @@ +//===-- ubsan_monitor.cc ----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Hooks which allow a monitor process to inspect UBSan's diagnostics. +// +//===----------------------------------------------------------------------===// + +#include "ubsan_monitor.h" + +using namespace __ubsan; + +UndefinedBehaviorReport::UndefinedBehaviorReport(const char *IssueKind, + Location &Loc, + InternalScopedString &Msg) + : IssueKind(IssueKind), Loc(Loc), Buffer(Msg.length() + 1) { + // We have the common sanitizer reporting lock, so it's safe to register a + // new UB report. + RegisterUndefinedBehaviorReport(this); + + // Make a copy of the diagnostic. + Buffer.append("%s", Msg.data()); + + // Let the monitor know that a report is available. + __ubsan_on_report(); +} + +static UndefinedBehaviorReport *CurrentUBR; + +void __ubsan::RegisterUndefinedBehaviorReport(UndefinedBehaviorReport *UBR) { + CurrentUBR = UBR; +} + +SANITIZER_WEAK_DEFAULT_IMPL +void __ubsan::__ubsan_on_report(void) {} + +void __ubsan::__ubsan_get_current_report_data(const char **OutIssueKind, + const char **OutMessage, + const char **OutFilename, + unsigned *OutLine, + unsigned *OutCol, + char **OutMemoryAddr) { + if (!OutIssueKind || !OutMessage || !OutFilename || !OutLine || !OutCol || + !OutMemoryAddr) + UNREACHABLE("Invalid arguments passed to __ubsan_get_current_report_data"); + + InternalScopedString &Buf = CurrentUBR->Buffer; + + // Ensure that the first character of the diagnostic text can't start with a + // lowercase letter. + char FirstChar = Buf.data()[0]; + if (FirstChar >= 'a' && FirstChar <= 'z') + Buf.data()[0] = FirstChar - 'a' + 'A'; + + *OutIssueKind = CurrentUBR->IssueKind; + *OutMessage = Buf.data(); + if (!CurrentUBR->Loc.isSourceLocation()) { + *OutFilename = "<unknown>"; + *OutLine = *OutCol = 0; + } else { + SourceLocation SL = CurrentUBR->Loc.getSourceLocation(); + *OutFilename = SL.getFilename(); + *OutLine = SL.getLine(); + *OutCol = SL.getColumn(); + } + + if (CurrentUBR->Loc.isMemoryLocation()) + *OutMemoryAddr = (char *)CurrentUBR->Loc.getMemoryLocation(); + else + *OutMemoryAddr = nullptr; +} diff --git a/lib/ubsan/ubsan_monitor.h b/lib/ubsan/ubsan_monitor.h new file mode 100644 index 000000000000..7159cbb2c587 --- /dev/null +++ b/lib/ubsan/ubsan_monitor.h @@ -0,0 +1,49 @@ +//===-- ubsan_monitor.h -----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Hooks which allow a monitor process to inspect UBSan's diagnostics. +// +//===----------------------------------------------------------------------===// + +#ifndef UBSAN_MONITOR_H +#define UBSAN_MONITOR_H + +#include "ubsan_diag.h" +#include "ubsan_value.h" + +namespace __ubsan { + +struct UndefinedBehaviorReport { + const char *IssueKind; + Location &Loc; + InternalScopedString Buffer; + + UndefinedBehaviorReport(const char *IssueKind, Location &Loc, + InternalScopedString &Msg); +}; + +SANITIZER_INTERFACE_ATTRIBUTE void +RegisterUndefinedBehaviorReport(UndefinedBehaviorReport *UBR); + +/// Called after a report is prepared. This serves to alert monitor processes +/// that a UB report is available. +extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __ubsan_on_report(void); + +/// Used by the monitor process to extract information from a UB report. The +/// data is only available until the next time __ubsan_on_report is called. The +/// caller is responsible for copying and preserving the data if needed. +extern "C" SANITIZER_INTERFACE_ATTRIBUTE void +__ubsan_get_current_report_data(const char **OutIssueKind, + const char **OutMessage, + const char **OutFilename, unsigned *OutLine, + unsigned *OutCol, char **OutMemoryAddr); + +} // end namespace __ubsan + +#endif // UBSAN_MONITOR_H diff --git a/lib/ubsan/ubsan_platform.h b/lib/ubsan/ubsan_platform.h index 72eb4199960e..45a4aa772081 100644 --- a/lib/ubsan/ubsan_platform.h +++ b/lib/ubsan/ubsan_platform.h @@ -14,13 +14,10 @@ #define UBSAN_PLATFORM_H // Other platforms should be easy to add, and probably work as-is. -#if (defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \ - defined(__NetBSD__)) && \ - (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || \ - defined(__aarch64__) || defined(__mips__) || defined(__powerpc64__) || \ - defined(__s390__)) || (defined(__sun__) && defined(__svr4__)) -# define CAN_SANITIZE_UB 1 -#elif defined(_WIN32) || defined(__Fuchsia__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \ + defined(__NetBSD__) || defined(__OpenBSD__) || \ + (defined(__sun__) && defined(__svr4__)) || \ + defined(_WIN32) || defined(__Fuchsia__) || defined(__rtems__) # define CAN_SANITIZE_UB 1 #else # define CAN_SANITIZE_UB 0 diff --git a/lib/ubsan/ubsan_signals_standalone.cc b/lib/ubsan/ubsan_signals_standalone.cc index 4c6646dd8b0b..5e77c60b1dbb 100644 --- a/lib/ubsan/ubsan_signals_standalone.cc +++ b/lib/ubsan/ubsan_signals_standalone.cc @@ -13,20 +13,34 @@ //===----------------------------------------------------------------------===// #include "ubsan_platform.h" +#include "sanitizer_common/sanitizer_platform.h" #if CAN_SANITIZE_UB #include "interception/interception.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "ubsan_diag.h" #include "ubsan_init.h" +// Interception of signals breaks too many things on Android. +// * It requires that ubsan is the first dependency of the main executable for +// the interceptors to work correctly. This complicates deployment, as it +// prevents us from enabling ubsan on random platform modules independently. +// * For this to work with ART VM, ubsan signal handler has to be set after the +// debuggerd handler, but before the ART handler. +// * Interceptors don't work at all when ubsan runtime is loaded late, ex. when +// it is part of an APK that does not use wrap.sh method. +#if SANITIZER_FUCHSIA || SANITIZER_ANDROID + +namespace __ubsan { +void InitializeDeadlySignals() {} +} + +#else + #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name) #include "sanitizer_common/sanitizer_signal_interceptors.inc" namespace __ubsan { -#if SANITIZER_FUCHSIA -void InitializeDeadlySignals() {} -#else static void OnStackUnwind(const SignalContext &sig, const void *, BufferedStackTrace *stack) { GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, sig.context, @@ -46,8 +60,9 @@ void InitializeDeadlySignals() { InitializeSignalInterceptors(); InstallDeadlySignalHandlers(&UBsanOnDeadlySignal); } -#endif } // namespace __ubsan +#endif + #endif // CAN_SANITIZE_UB diff --git a/lib/ubsan/ubsan_win_weak_interception.cc b/lib/ubsan/ubsan_win_weak_interception.cc index 353719eefacf..c28577057c08 100644 --- a/lib/ubsan/ubsan_win_weak_interception.cc +++ b/lib/ubsan/ubsan_win_weak_interception.cc @@ -14,6 +14,7 @@ #ifdef SANITIZER_DYNAMIC #include "sanitizer_common/sanitizer_win_weak_interception.h" #include "ubsan_flags.h" +#include "ubsan_monitor.h" // Check if strong definitions for weak functions are present in the main // executable. If that is the case, override dll functions to point to strong // implementations. diff --git a/lib/ubsan_minimal/CMakeLists.txt b/lib/ubsan_minimal/CMakeLists.txt index 54860a3d2764..b70246845f84 100644 --- a/lib/ubsan_minimal/CMakeLists.txt +++ b/lib/ubsan_minimal/CMakeLists.txt @@ -44,7 +44,7 @@ if(COMPILER_RT_HAS_UBSAN_MINIMAL) LINK_LIBS ${UBSAN_DYNAMIC_LIBS} PARENT_TARGET ubsan-minimal) - if (UNIX AND NOT APPLE) + if (SANITIZER_USE_SYMBOLS AND NOT APPLE) set(ARCHS_FOR_SYMBOLS ${UBSAN_SUPPORTED_ARCH}) list(REMOVE_ITEM ARCHS_FOR_SYMBOLS i386 i686) add_sanitizer_rt_symbols(clang_rt.ubsan_minimal diff --git a/lib/ubsan_minimal/ubsan_minimal_handlers.cc b/lib/ubsan_minimal/ubsan_minimal_handlers.cc index 5a5675c983fe..f25a75a863c4 100644 --- a/lib/ubsan_minimal/ubsan_minimal_handlers.cc +++ b/lib/ubsan_minimal/ubsan_minimal_handlers.cc @@ -61,6 +61,19 @@ static void abort_with_message(const char *msg) { static void abort_with_message(const char *) { abort(); } #endif +#if SANITIZER_DEBUG +namespace __sanitizer { +// The DCHECK macro needs this symbol to be defined. +void NORETURN CheckFailed(const char *file, int, const char *cond, u64, u64) { + message("Sanitizer CHECK failed: "); + message(file); + message(":?? : "); // FIXME: Show line number. + message(cond); + abort(); +} +} // namespace __sanitizer +#endif + #define INTERFACE extern "C" __attribute__((visibility("default"))) // FIXME: add caller pc to the error message (possibly as "ubsan: error-type diff --git a/lib/xray/CMakeLists.txt b/lib/xray/CMakeLists.txt index 5547600b943a..8e18f55658f8 100644 --- a/lib/xray/CMakeLists.txt +++ b/lib/xray/CMakeLists.txt @@ -1,16 +1,29 @@ -# Build for the XRay runtime support library. +# Build for all components of the XRay runtime support library. # XRay runtime library implementation files. set(XRAY_SOURCES - xray_inmemory_log.cc - xray_init.cc - xray_flags.cc - xray_interface.cc - xray_buffer_queue.cc - xray_log_interface.cc - xray_fdr_logging.cc - xray_utils.cc) + xray_init.cc + xray_flags.cc + xray_interface.cc + xray_log_interface.cc + xray_utils.cc) +# Implementation files for all XRay modes. +set(XRAY_FDR_MODE_SOURCES + xray_fdr_flags.cc + xray_buffer_queue.cc + xray_fdr_logging.cc) + +set(XRAY_BASIC_MODE_SOURCES + xray_basic_flags.cc + xray_basic_logging.cc) + +set(XRAY_PROFILING_MODE_SOURCES + xray_profile_collector.cc + xray_profiling.cc + xray_profiling_flags.cc) + +# Implementation files for all XRay architectures. set(x86_64_SOURCES xray_x86_64.cc xray_trampoline_x86_64.S) @@ -23,8 +36,8 @@ set(armhf_SOURCES ${arm_SOURCES}) set(aarch64_SOURCES - xray_AArch64.cc - xray_trampoline_AArch64.S) + xray_AArch64.cc + xray_trampoline_AArch64.S) set(mips_SOURCES xray_mips.cc @@ -47,11 +60,68 @@ set(powerpc64le_SOURCES xray_trampoline_powerpc64.cc xray_trampoline_powerpc64_asm.S) +set(XRAY_IMPL_HEADERS + xray_allocator.h + xray_basic_flags.h + xray_basic_flags.inc + xray_basic_logging.h + xray_buffer_queue.h + xray_defs.h + xray_fdr_flags.h + xray_fdr_flags.inc + xray_fdr_log_records.h + xray_fdr_logging.h + xray_flags.h + xray_flags.inc + xray_function_call_trie.h + xray_interface_internal.h + xray_powerpc64.inc + xray_profile_collector.h + xray_profiling_flags.h + xray_profiling_flags.inc + xray_recursion_guard.h + xray_segmented_array.h + xray_tsc.h + xray_utils.h + xray_x86_64.inc) + +# Create list of all source files for +# consumption by tests. +set(XRAY_ALL_SOURCE_FILES + ${XRAY_SOURCES} + ${XRAY_FDR_MODE_SOURCES} + ${XRAY_BASIC_MODE_SOURCES} + ${XRAY_PROFILING_MODE_SOURCES} + ${x86_64_SOURCES} + ${arm_SOURCES} + ${armhf_SOURCES} + ${mips_SOURCES} + ${mipsel_SOURCES} + ${mips64_SOURCES} + ${mips64el_SOURCES} + ${powerpc64le_SOURCES} + ${XRAY_IMPL_HEADERS} +) +list(REMOVE_DUPLICATES XRAY_ALL_SOURCE_FILES) +# Make list that uses absolute paths +set(XRAY_ALL_SOURCE_FILES_ABS_PATHS "") +foreach (src_file ${XRAY_ALL_SOURCE_FILES}) + list(APPEND + XRAY_ALL_SOURCE_FILES_ABS_PATHS + "${CMAKE_CURRENT_SOURCE_DIR}/${src_file}") +endforeach() + + +# Now put it all together... include_directories(..) include_directories(../../include) set(XRAY_CFLAGS ${SANITIZER_COMMON_CFLAGS}) set(XRAY_COMMON_DEFINITIONS XRAY_HAS_EXCEPTIONS=1) + +# We don't need RTTI in XRay, so turn that off. +append_rtti_flag(OFF XRAY_CFLAGS) + append_list_if( COMPILER_RT_HAS_XRAY_COMPILER_FLAG XRAY_SUPPORTED=1 XRAY_COMMON_DEFINITIONS) append_list_if( @@ -60,10 +130,13 @@ append_list_if( add_compiler_rt_component(xray) set(XRAY_COMMON_RUNTIME_OBJECT_LIBS - RTXray RTSanitizerCommon RTSanitizerCommonLibc) +if (TARGET cxx-headers OR HAVE_LIBCXX) + set(XRAY_DEPS cxx-headers) +endif() + if (APPLE) set(XRAY_LINK_LIBS ${SANITIZER_COMMON_LINK_LIBS}) add_asm_sources(XRAY_ASM_SOURCES xray_trampoline_x86_64.S) @@ -75,8 +148,34 @@ if (APPLE) OS ${XRAY_SUPPORTED_OS} ARCHS ${XRAY_SUPPORTED_ARCH} SOURCES ${x86_64_SOURCES} + ADDITIONAL_HEADERS ${XRAY_IMPL_HEADERS} + CFLAGS ${XRAY_CFLAGS} + DEFS ${XRAY_COMMON_DEFINITIONS} + DEPS ${XRAY_DEPS}) + add_compiler_rt_object_libraries(RTXrayFDR + OS ${XRAY_SUPPORTED_OS} + ARCHS ${XRAY_SUPPORTED_ARCH} + SOURCES ${XRAY_FDR_MODE_SOURCES} + ADDITIONAL_HEADERS ${XRAY_IMPL_HEADERS} CFLAGS ${XRAY_CFLAGS} - DEFS ${XRAY_COMMON_DEFINITIONS}) + DEFS ${XRAY_COMMON_DEFINITIONS} + DEPS ${XRAY_DEPS}) + add_compiler_rt_object_libraries(RTXrayBASIC + OS ${XRAY_SUPPORTED_OS} + ARCHS ${XRAY_SUPPORTED_ARCH} + SOURCES ${XRAY_BASIC_MODE_SOURCES} + ADDITIONAL_HEADERS ${XRAY_IMPL_HEADERS} + CFLAGS ${XRAY_CFLAGS} + DEFS ${XRAY_COMMON_DEFINITIONS} + DEPS ${XRAY_DEPS}) + add_compiler_rt_object_libraries(RTXrayPROFILING + OS ${XRAY_SUPPORTED_OS} + ARCHS ${XRAY_SUPPORTED_ARCH} + SOURCES ${XRAY_PROFILING_MODE_SOURCES} + ADDITIONAL_HEADERS ${XRAY_IMPL_HEADERS} + CFLAGS ${XRAY_CFLAGS} + DEFS ${XRAY_COMMON_DEFINITIONS} + DEPS ${XRAY_DEPS}) # We only support running on osx for now. add_compiler_rt_runtime(clang_rt.xray @@ -91,24 +190,104 @@ if (APPLE) LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS} ${WEAK_SYMBOL_LINK_FLAGS} LINK_LIBS ${XRAY_LINK_LIBS} PARENT_TARGET xray) -else() -foreach(arch ${XRAY_SUPPORTED_ARCH}) - if(CAN_TARGET_${arch}) + add_compiler_rt_runtime(clang_rt.xray-fdr + STATIC + OS ${XRAY_SUPPORTED_OS} + ARCHS ${XRAY_SUPPORTED_ARCH} + OBJECT_LIBS RTXrayFDR + CFLAGS ${XRAY_CFLAGS} + DEFS ${XRAY_COMMON_DEFINITIONS} + LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS} ${WEAK_SYMBOL_LINK_FLAGS} + LINK_LIBS ${XRAY_LINK_LIBS} + PARENT_TARGET xray) + add_compiler_rt_runtime(clang_rt.xray-basic + STATIC + OS ${XRAY_SUPPORTED_OS} + ARCHS ${XRAY_SUPPORTED_ARCH} + OBJECT_LIBS RTXrayBASIC + CFLAGS ${XRAY_CFLAGS} + DEFS ${XRAY_COMMON_DEFINITIONS} + LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS} ${WEAK_SYMBOL_LINK_FLAGS} + LINK_LIBS ${XRAY_LINK_LIBS} + PARENT_TARGET xray) + add_compiler_rt_runtime(clang_rt.xray-profiling + STATIC + OS ${XRAY_SUPPORTED_OS} + ARCHS ${XRAY_SUPPORTED_ARCH} + OBJECT_LIBS RTXrayPROFILING + CFLAGS ${XRAY_CFLAGS} + DEFS ${XRAY_COMMON_DEFINITIONS} + LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS} ${WEAK_SYMBOL_LINK_FLAGS} + LINK_LIBS ${XRAY_LINK_LIBS} + PARENT_TARGET xray) +else() # not Apple + foreach(arch ${XRAY_SUPPORTED_ARCH}) + if(NOT CAN_TARGET_${arch}) + continue() + endif() add_compiler_rt_object_libraries(RTXray ARCHS ${arch} - SOURCES ${XRAY_SOURCES} CFLAGS ${XRAY_CFLAGS} - DEFS ${XRAY_COMMON_DEFINITIONS}) + SOURCES ${XRAY_SOURCES} ${${arch}_SOURCES} + ADDITIONAL_HEADERS ${XRAY_IMPL_HEADERS} + CFLAGS ${XRAY_CFLAGS} + DEFS ${XRAY_COMMON_DEFINITIONS} + DEPS ${XRAY_DEPS}) + add_compiler_rt_object_libraries(RTXrayFDR + ARCHS ${arch} + SOURCES ${XRAY_FDR_MODE_SOURCES} + ADDITIONAL_HEADERS ${XRAY_IMPL_HEADERS} + CFLAGS ${XRAY_CFLAGS} + DEFS ${XRAY_COMMON_DEFINITIONS} + DEPS ${XRAY_DEPS}) + add_compiler_rt_object_libraries(RTXrayBASIC + ARCHS ${arch} + SOURCES ${XRAY_BASIC_MODE_SOURCES} + ADDITIONAL_HEADERS ${XRAY_IMPL_HEADERS} + CFLAGS ${XRAY_CFLAGS} + DEFS ${XRAY_COMMON_DEFINITIONS} + DEPS ${XRAY_DEPS}) + add_compiler_rt_object_libraries(RTXrayPROFILING + ARCHS ${arch} + SOURCES ${XRAY_PROFILING_MODE_SOURCES} + ADDITIONAL_HEADERS ${XRAY_IMPL_HEADERS} + CFLAGS ${XRAY_CFLAGS} + DEFS ${XRAY_COMMON_DEFINITIONS} + DEPS ${XRAY_DEPS}) + + # Common XRay archive for instrumented binaries. add_compiler_rt_runtime(clang_rt.xray STATIC ARCHS ${arch} - SOURCES ${${arch}_SOURCES} CFLAGS ${XRAY_CFLAGS} DEFS ${XRAY_COMMON_DEFINITIONS} - OBJECT_LIBS ${XRAY_COMMON_RUNTIME_OBJECT_LIBS} + OBJECT_LIBS ${XRAY_COMMON_RUNTIME_OBJECT_LIBS} RTXray PARENT_TARGET xray) - endif() -endforeach() -endif() + # FDR mode runtime archive (addon for clang_rt.xray) + add_compiler_rt_runtime(clang_rt.xray-fdr + STATIC + ARCHS ${arch} + CFLAGS ${XRAY_CFLAGS} + DEFS ${XRAY_COMMON_DEFINITIONS} + OBJECT_LIBS RTXrayFDR + PARENT_TARGET xray) + # Basic mode runtime archive (addon for clang_rt.xray) + add_compiler_rt_runtime(clang_rt.xray-basic + STATIC + ARCHS ${arch} + CFLAGS ${XRAY_CFLAGS} + DEFS ${XRAY_COMMON_DEFINITIONS} + OBJECT_LIBS RTXrayBASIC + PARENT_TARGET xray) + # Profiler Mode runtime + add_compiler_rt_runtime(clang_rt.xray-profiling + STATIC + ARCHS ${arch} + CFLAGS ${XRAY_CFLAGS} + DEFS ${XRAY_COMMON_DEFINITIONS} + OBJECT_LIBS RTXrayPROFILING + PARENT_TARGET xray) + endforeach() +endif() # not Apple if(COMPILER_RT_INCLUDE_TESTS) add_subdirectory(tests) diff --git a/lib/xray/tests/CMakeLists.txt b/lib/xray/tests/CMakeLists.txt index e54e63f27890..11f373167d24 100644 --- a/lib/xray/tests/CMakeLists.txt +++ b/lib/xray/tests/CMakeLists.txt @@ -3,6 +3,18 @@ include_directories(..) add_custom_target(XRayUnitTests) set_target_properties(XRayUnitTests PROPERTIES FOLDER "XRay unittests") +# Sanity check XRAY_ALL_SOURCE_FILES_ABS_PATHS +list(LENGTH XRAY_ALL_SOURCE_FILES_ABS_PATHS XASFAP_LENGTH) +if (${XASFAP_LENGTH} EQUAL 0) + message(FATAL_ERROR "XRAY_ALL_SOURCE_FILES_ABS_PATHS cannot be empty") +endif() +unset(XASFAP_LENGTH) +foreach (src_file ${XRAY_ALL_SOURCE_FILES_ABS_PATHS}) + if (NOT EXISTS "${src_file}") + message(FATAL_ERROR "Source file \"${src_file}\" does not exist") + endif() +endforeach() + set(XRAY_UNITTEST_CFLAGS ${XRAY_CFLAGS} ${COMPILER_RT_UNITTEST_CFLAGS} @@ -11,27 +23,77 @@ set(XRAY_UNITTEST_CFLAGS -I${COMPILER_RT_SOURCE_DIR}/lib/xray -I${COMPILER_RT_SOURCE_DIR}/lib) +function(add_xray_lib library) + add_library(${library} STATIC ${ARGN}) + set_target_properties(${library} PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + FOLDER "Compiler-RT Runtime tests") +endfunction() + +function(get_xray_lib_for_arch arch lib) + if(APPLE) + set(tgt_name "RTXRay.test.osx") + else() + set(tgt_name "RTXRay.test.${arch}") + endif() + set(${lib} "${tgt_name}" PARENT_SCOPE) +endfunction() + set(XRAY_TEST_ARCH ${XRAY_SUPPORTED_ARCH}) +set(XRAY_UNITTEST_LINK_FLAGS + ${CMAKE_THREAD_LIBS_INIT} + -l${SANITIZER_CXX_ABI_LIBRARY} + -fxray-instrument + ) +if (NOT APPLE) + append_list_if(COMPILER_RT_HAS_LIBM -lm XRAY_UNITTEST_LINK_FLAGS) + append_list_if(COMPILER_RT_HAS_LIBRT -lrt XRAY_UNITTEST_LINK_FLAGS) + append_list_if(COMPILER_RT_HAS_LIBDL -ldl XRAY_UNITTEST_LINK_FLAGS) + append_list_if(COMPILER_RT_HAS_LIBPTHREAD -pthread XRAY_UNITTEST_LINK_FLAGS) +endif() + macro(add_xray_unittest testname) cmake_parse_arguments(TEST "" "" "SOURCES;HEADERS" ${ARGN}) if(UNIX AND NOT APPLE) + set(CMAKE_DL_LIBS_INIT "") foreach(arch ${XRAY_TEST_ARCH}) set(TEST_OBJECTS) + get_xray_lib_for_arch(${arch} XRAY_RUNTIME_LIBS) generate_compiler_rt_tests(TEST_OBJECTS XRayUnitTests "${testname}-${arch}-Test" "${arch}" SOURCES ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE} + # Note that any change in the implementations will cause all the unit + # tests to be re-built. This is by design, but may be cumbersome during + # the build/test cycle. + COMPILE_DEPS ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE} + ${XRAY_HEADERS} ${XRAY_ALL_SOURCE_FILES_ABS_PATHS} + RUNTIME "${XRAY_RUNTIME_LIBS}" DEPS gtest xray llvm-xray CFLAGS ${XRAY_UNITTEST_CFLAGS} - LINK_FLAGS -fxray-instrument - ${TARGET_LINK_FLAGS} - -lstdc++ -lm ${CMAKE_THREAD_LIBS_INIT} - -lpthread - -ldl -lrt) - set_target_properties(XRayUnitTests PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + LINK_FLAGS ${TARGET_LINK_FLAGS} ${XRAY_UNITTEST_LINK_FLAGS}) + set_target_properties(XRayUnitTests + PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endforeach() endif() endmacro() if(COMPILER_RT_CAN_EXECUTE_TESTS) + if (APPLE) + add_xray_lib("RTXRay.test.osx" + $<TARGET_OBJECTS:RTXray.osx> + $<TARGET_OBJECTS:RTXrayFDR.osx> + $<TARGET_OBJECTS:RTXrayPROFILING.osx> + $<TARGET_OBJECTS:RTSanitizerCommon.osx> + $<TARGET_OBJECTS:RTSanitizerCommonLibc.osx>) + else() + foreach(arch ${XRAY_SUPPORTED_ARCH}) + add_xray_lib("RTXRay.test.${arch}" + $<TARGET_OBJECTS:RTXray.${arch}> + $<TARGET_OBJECTS:RTXrayFDR.${arch}> + $<TARGET_OBJECTS:RTXrayPROFILING.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommon.${arch}> + $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>) + endforeach() + endif() add_subdirectory(unit) endif() diff --git a/lib/xray/tests/unit/CMakeLists.txt b/lib/xray/tests/unit/CMakeLists.txt index 62d01f239581..b42eb50d0790 100644 --- a/lib/xray/tests/unit/CMakeLists.txt +++ b/lib/xray/tests/unit/CMakeLists.txt @@ -2,3 +2,11 @@ add_xray_unittest(XRayBufferQueueTest SOURCES buffer_queue_test.cc xray_unit_test_main.cc) add_xray_unittest(XRayFDRLoggingTest SOURCES fdr_logging_test.cc xray_unit_test_main.cc) +add_xray_unittest(XRayAllocatorTest SOURCES + allocator_test.cc xray_unit_test_main.cc) +add_xray_unittest(XRaySegmentedArrayTest SOURCES + segmented_array_test.cc xray_unit_test_main.cc) +add_xray_unittest(XRayFunctionCallTrieTest SOURCES + function_call_trie_test.cc xray_unit_test_main.cc) +add_xray_unittest(XRayProfileCollectorTest SOURCES + profile_collector_test.cc xray_unit_test_main.cc) diff --git a/lib/xray/tests/unit/allocator_test.cc b/lib/xray/tests/unit/allocator_test.cc new file mode 100644 index 000000000000..be404160e417 --- /dev/null +++ b/lib/xray/tests/unit/allocator_test.cc @@ -0,0 +1,42 @@ +//===-- allocator_test.cc -------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a function call tracing system. +// +//===----------------------------------------------------------------------===// + +#include "xray_allocator.h" +#include "gtest/gtest.h" + +namespace __xray { +namespace { + +struct TestData { + s64 First; + s64 Second; +}; + +TEST(AllocatorTest, Construction) { Allocator<sizeof(TestData)> A(2 << 11); } + +TEST(AllocatorTest, Allocate) { + Allocator<sizeof(TestData)> A(2 << 11); + auto B = A.Allocate(); + ASSERT_NE(B.Data, nullptr); +} + +TEST(AllocatorTest, OverAllocate) { + Allocator<sizeof(TestData)> A(sizeof(TestData)); + auto B1 = A.Allocate(); + (void)B1; + auto B2 = A.Allocate(); + ASSERT_EQ(B2.Data, nullptr); +} + +} // namespace +} // namespace __xray diff --git a/lib/xray/tests/unit/buffer_queue_test.cc b/lib/xray/tests/unit/buffer_queue_test.cc index 1ec7469ce187..c0d4ccb268d6 100644 --- a/lib/xray/tests/unit/buffer_queue_test.cc +++ b/lib/xray/tests/unit/buffer_queue_test.cc @@ -32,9 +32,9 @@ TEST(BufferQueueTest, GetAndRelease) { ASSERT_TRUE(Success); BufferQueue::Buffer Buf; ASSERT_EQ(Buffers.getBuffer(Buf), BufferQueue::ErrorCode::Ok); - ASSERT_NE(nullptr, Buf.Buffer); + ASSERT_NE(nullptr, Buf.Data); ASSERT_EQ(Buffers.releaseBuffer(Buf), BufferQueue::ErrorCode::Ok); - ASSERT_EQ(nullptr, Buf.Buffer); + ASSERT_EQ(nullptr, Buf.Data); } TEST(BufferQueueTest, GetUntilFailed) { @@ -53,7 +53,7 @@ TEST(BufferQueueTest, ReleaseUnknown) { BufferQueue Buffers(kSize, 1, Success); ASSERT_TRUE(Success); BufferQueue::Buffer Buf; - Buf.Buffer = reinterpret_cast<void *>(0xdeadbeef); + Buf.Data = reinterpret_cast<void *>(0xdeadbeef); Buf.Size = kSize; EXPECT_EQ(BufferQueue::ErrorCode::UnrecognizedBuffer, Buffers.releaseBuffer(Buf)); @@ -65,7 +65,7 @@ TEST(BufferQueueTest, ErrorsWhenFinalising) { ASSERT_TRUE(Success); BufferQueue::Buffer Buf; ASSERT_EQ(Buffers.getBuffer(Buf), BufferQueue::ErrorCode::Ok); - ASSERT_NE(nullptr, Buf.Buffer); + ASSERT_NE(nullptr, Buf.Data); ASSERT_EQ(Buffers.finalize(), BufferQueue::ErrorCode::Ok); BufferQueue::Buffer OtherBuf; ASSERT_EQ(BufferQueue::ErrorCode::QueueFinalizing, diff --git a/lib/xray/tests/unit/fdr_logging_test.cc b/lib/xray/tests/unit/fdr_logging_test.cc index 76738ea4dff3..b6961efbc351 100644 --- a/lib/xray/tests/unit/fdr_logging_test.cc +++ b/lib/xray/tests/unit/fdr_logging_test.cc @@ -10,6 +10,7 @@ // This file is a part of XRay, a function call tracing system. // //===----------------------------------------------------------------------===// +#include "sanitizer_common/sanitizer_common.h" #include "xray_fdr_logging.h" #include "gtest/gtest.h" @@ -86,7 +87,7 @@ TEST(FDRLoggingTest, Simple) { XRayFileHeader H; memcpy(&H, Contents, sizeof(XRayFileHeader)); - ASSERT_EQ(H.Version, 2); + ASSERT_EQ(H.Version, 3); ASSERT_EQ(H.Type, FileTypes::FDR_LOG); // We require one buffer at least to have the "extents" metadata record, @@ -131,7 +132,7 @@ TEST(FDRLoggingTest, Multiple) { XRayFileHeader H; memcpy(&H, Contents, sizeof(XRayFileHeader)); - ASSERT_EQ(H.Version, 2); + ASSERT_EQ(H.Version, 3); ASSERT_EQ(H.Type, FileTypes::FDR_LOG); MetadataRecord MDR0, MDR1; @@ -154,12 +155,12 @@ TEST(FDRLoggingTest, MultiThreadedCycling) { // Now we want to create one thread, do some logging, then create another one, // in succession and making sure that we're able to get thread records from // the latest thread (effectively being able to recycle buffers). - std::array<pid_t, 2> Threads; + std::array<tid_t, 2> Threads; for (uint64_t I = 0; I < 2; ++I) { std::thread t{[I, &Threads] { fdrLoggingHandleArg0(I + 1, XRayEntryType::ENTRY); fdrLoggingHandleArg0(I + 1, XRayEntryType::EXIT); - Threads[I] = syscall(SYS_gettid); + Threads[I] = GetTid(); }}; t.join(); } @@ -182,7 +183,7 @@ TEST(FDRLoggingTest, MultiThreadedCycling) { XRayFileHeader H; memcpy(&H, Contents, sizeof(XRayFileHeader)); - ASSERT_EQ(H.Version, 2); + ASSERT_EQ(H.Version, 3); ASSERT_EQ(H.Type, FileTypes::FDR_LOG); MetadataRecord MDR0, MDR1; @@ -192,9 +193,9 @@ TEST(FDRLoggingTest, MultiThreadedCycling) { ASSERT_EQ(MDR0.RecordKind, uint8_t(MetadataRecord::RecordKinds::BufferExtents)); ASSERT_EQ(MDR1.RecordKind, uint8_t(MetadataRecord::RecordKinds::NewBuffer)); - pid_t Latest = 0; - memcpy(&Latest, MDR1.Data, sizeof(pid_t)); - ASSERT_EQ(Latest, Threads[1]); + int32_t Latest = 0; + memcpy(&Latest, MDR1.Data, sizeof(int32_t)); + ASSERT_EQ(Latest, static_cast<int32_t>(Threads[1])); } } // namespace diff --git a/lib/xray/tests/unit/function_call_trie_test.cc b/lib/xray/tests/unit/function_call_trie_test.cc new file mode 100644 index 000000000000..049ecfb07e01 --- /dev/null +++ b/lib/xray/tests/unit/function_call_trie_test.cc @@ -0,0 +1,286 @@ +//===-- function_call_trie_test.cc ----------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a function call tracing system. +// +//===----------------------------------------------------------------------===// +#include "gtest/gtest.h" + +#include "xray_function_call_trie.h" + +namespace __xray { + +namespace { + +TEST(FunctionCallTrieTest, ConstructWithTLSAllocators) { + profilingFlags()->setDefaults(); + FunctionCallTrie::Allocators Allocators = FunctionCallTrie::InitAllocators(); + FunctionCallTrie Trie(Allocators); +} + +TEST(FunctionCallTrieTest, EnterAndExitFunction) { + profilingFlags()->setDefaults(); + auto A = FunctionCallTrie::InitAllocators(); + FunctionCallTrie Trie(A); + + Trie.enterFunction(1, 1); + Trie.exitFunction(1, 2); + + // We need a way to pull the data out. At this point, until we get a data + // collection service implemented, we're going to export the data as a list of + // roots, and manually walk through the structure ourselves. + + const auto &R = Trie.getRoots(); + + ASSERT_EQ(R.size(), 1u); + ASSERT_EQ(R.front()->FId, 1); + ASSERT_EQ(R.front()->CallCount, 1); + ASSERT_EQ(R.front()->CumulativeLocalTime, 1u); +} + +TEST(FunctionCallTrieTest, MissingFunctionEntry) { + profilingFlags()->setDefaults(); + auto A = FunctionCallTrie::InitAllocators(); + FunctionCallTrie Trie(A); + Trie.exitFunction(1, 1); + const auto &R = Trie.getRoots(); + + ASSERT_TRUE(R.empty()); +} + +TEST(FunctionCallTrieTest, NoMatchingEntersForExit) { + profilingFlags()->setDefaults(); + auto A = FunctionCallTrie::InitAllocators(); + FunctionCallTrie Trie(A); + Trie.enterFunction(2, 1); + Trie.enterFunction(3, 3); + Trie.exitFunction(1, 5); + const auto &R = Trie.getRoots(); + + ASSERT_FALSE(R.empty()); + EXPECT_EQ(R.size(), size_t{1}); +} + +TEST(FunctionCallTrieTest, MissingFunctionExit) { + profilingFlags()->setDefaults(); + auto A = FunctionCallTrie::InitAllocators(); + FunctionCallTrie Trie(A); + Trie.enterFunction(1, 1); + const auto &R = Trie.getRoots(); + + ASSERT_FALSE(R.empty()); + EXPECT_EQ(R.size(), size_t{1}); +} + +TEST(FunctionCallTrieTest, MultipleRoots) { + profilingFlags()->setDefaults(); + auto A = FunctionCallTrie::InitAllocators(); + FunctionCallTrie Trie(A); + + // Enter and exit FId = 1. + Trie.enterFunction(1, 1); + Trie.exitFunction(1, 2); + + // Enter and exit FId = 2. + Trie.enterFunction(2, 3); + Trie.exitFunction(2, 4); + + const auto &R = Trie.getRoots(); + ASSERT_FALSE(R.empty()); + ASSERT_EQ(R.size(), 2u); + + // Make sure the roots have different IDs. + const auto R0 = R[0]; + const auto R1 = R[1]; + ASSERT_NE(R0->FId, R1->FId); + + // Inspect the roots that they have the right data. + ASSERT_NE(R0, nullptr); + EXPECT_EQ(R0->CallCount, 1u); + EXPECT_EQ(R0->CumulativeLocalTime, 1u); + + ASSERT_NE(R1, nullptr); + EXPECT_EQ(R1->CallCount, 1u); + EXPECT_EQ(R1->CumulativeLocalTime, 1u); +} + +// While missing an intermediary entry may be rare in practice, we still enforce +// that we can handle the case where we've missed the entry event somehow, in +// between call entry/exits. To illustrate, imagine the following shadow call +// stack: +// +// f0@t0 -> f1@t1 -> f2@t2 +// +// If for whatever reason we see an exit for `f2` @ t3, followed by an exit for +// `f0` @ t4 (i.e. no `f1` exit in between) then we need to handle the case of +// accounting local time to `f2` from d = (t3 - t2), then local time to `f1` +// as d' = (t3 - t1) - d, and then local time to `f0` as d'' = (t3 - t0) - d'. +TEST(FunctionCallTrieTest, MissingIntermediaryExit) { + profilingFlags()->setDefaults(); + auto A = FunctionCallTrie::InitAllocators(); + FunctionCallTrie Trie(A); + + Trie.enterFunction(1, 0); + Trie.enterFunction(2, 100); + Trie.enterFunction(3, 200); + Trie.exitFunction(3, 300); + Trie.exitFunction(1, 400); + + // What we should see at this point is all the functions in the trie in a + // specific order (1 -> 2 -> 3) with the appropriate count(s) and local + // latencies. + const auto &R = Trie.getRoots(); + ASSERT_FALSE(R.empty()); + ASSERT_EQ(R.size(), 1u); + + const auto &F1 = *R[0]; + ASSERT_EQ(F1.FId, 1); + ASSERT_FALSE(F1.Callees.empty()); + + const auto &F2 = *F1.Callees[0].NodePtr; + ASSERT_EQ(F2.FId, 2); + ASSERT_FALSE(F2.Callees.empty()); + + const auto &F3 = *F2.Callees[0].NodePtr; + ASSERT_EQ(F3.FId, 3); + ASSERT_TRUE(F3.Callees.empty()); + + // Now that we've established the preconditions, we check for specific aspects + // of the nodes. + EXPECT_EQ(F3.CallCount, 1); + EXPECT_EQ(F2.CallCount, 1); + EXPECT_EQ(F1.CallCount, 1); + EXPECT_EQ(F3.CumulativeLocalTime, 100); + EXPECT_EQ(F2.CumulativeLocalTime, 300); + EXPECT_EQ(F1.CumulativeLocalTime, 100); +} + +TEST(FunctionCallTrieTest, DeepCallStack) { + // Simulate a relatively deep call stack (32 levels) and ensure that we can + // properly pop all the way up the stack. + profilingFlags()->setDefaults(); + auto A = FunctionCallTrie::InitAllocators(); + FunctionCallTrie Trie(A); + for (int i = 0; i < 32; ++i) + Trie.enterFunction(i + 1, i); + Trie.exitFunction(1, 33); + + // Here, validate that we have a 32-level deep function call path from the + // root (1) down to the leaf (33). + const auto &R = Trie.getRoots(); + ASSERT_EQ(R.size(), 1u); + auto F = R[0]; + for (int i = 0; i < 32; ++i) { + EXPECT_EQ(F->FId, i + 1); + EXPECT_EQ(F->CallCount, 1); + if (F->Callees.empty() && i != 31) + FAIL() << "Empty callees for FId " << F->FId; + if (i != 31) + F = F->Callees[0].NodePtr; + } +} + +// TODO: Test that we can handle cross-CPU migrations, where TSCs are not +// guaranteed to be synchronised. +TEST(FunctionCallTrieTest, DeepCopy) { + profilingFlags()->setDefaults(); + auto A = FunctionCallTrie::InitAllocators(); + FunctionCallTrie Trie(A); + + Trie.enterFunction(1, 0); + Trie.enterFunction(2, 1); + Trie.exitFunction(2, 2); + Trie.enterFunction(3, 3); + Trie.exitFunction(3, 4); + Trie.exitFunction(1, 5); + + // We want to make a deep copy and compare notes. + auto B = FunctionCallTrie::InitAllocators(); + FunctionCallTrie Copy(B); + Trie.deepCopyInto(Copy); + + ASSERT_NE(Trie.getRoots().size(), 0u); + ASSERT_EQ(Trie.getRoots().size(), Copy.getRoots().size()); + const auto &R0Orig = *Trie.getRoots()[0]; + const auto &R0Copy = *Copy.getRoots()[0]; + EXPECT_EQ(R0Orig.FId, 1); + EXPECT_EQ(R0Orig.FId, R0Copy.FId); + + ASSERT_EQ(R0Orig.Callees.size(), 2u); + ASSERT_EQ(R0Copy.Callees.size(), 2u); + + const auto &F1Orig = + *R0Orig.Callees + .find_element( + [](const FunctionCallTrie::NodeIdPair &R) { return R.FId == 2; }) + ->NodePtr; + const auto &F1Copy = + *R0Copy.Callees + .find_element( + [](const FunctionCallTrie::NodeIdPair &R) { return R.FId == 2; }) + ->NodePtr; + EXPECT_EQ(&R0Orig, F1Orig.Parent); + EXPECT_EQ(&R0Copy, F1Copy.Parent); +} + +TEST(FunctionCallTrieTest, MergeInto) { + profilingFlags()->setDefaults(); + auto A = FunctionCallTrie::InitAllocators(); + FunctionCallTrie T0(A); + FunctionCallTrie T1(A); + + // 1 -> 2 -> 3 + T0.enterFunction(1, 0); + T0.enterFunction(2, 1); + T0.enterFunction(3, 2); + T0.exitFunction(3, 3); + T0.exitFunction(2, 4); + T0.exitFunction(1, 5); + + // 1 -> 2 -> 3 + T1.enterFunction(1, 0); + T1.enterFunction(2, 1); + T1.enterFunction(3, 2); + T1.exitFunction(3, 3); + T1.exitFunction(2, 4); + T1.exitFunction(1, 5); + + // We use a different allocator here to make sure that we're able to transfer + // data into a FunctionCallTrie which uses a different allocator. This + // reflects the inteded usage scenario for when we're collecting profiles that + // aggregate across threads. + auto B = FunctionCallTrie::InitAllocators(); + FunctionCallTrie Merged(B); + + T0.mergeInto(Merged); + T1.mergeInto(Merged); + + ASSERT_EQ(Merged.getRoots().size(), 1u); + const auto &R0 = *Merged.getRoots()[0]; + EXPECT_EQ(R0.FId, 1); + EXPECT_EQ(R0.CallCount, 2); + EXPECT_EQ(R0.CumulativeLocalTime, 10); + EXPECT_EQ(R0.Callees.size(), 1u); + + const auto &F1 = *R0.Callees[0].NodePtr; + EXPECT_EQ(F1.FId, 2); + EXPECT_EQ(F1.CallCount, 2); + EXPECT_EQ(F1.CumulativeLocalTime, 6); + EXPECT_EQ(F1.Callees.size(), 1u); + + const auto &F2 = *F1.Callees[0].NodePtr; + EXPECT_EQ(F2.FId, 3); + EXPECT_EQ(F2.CallCount, 2); + EXPECT_EQ(F2.CumulativeLocalTime, 2); + EXPECT_EQ(F2.Callees.size(), 0u); +} + +} // namespace + +} // namespace __xray diff --git a/lib/xray/tests/unit/profile_collector_test.cc b/lib/xray/tests/unit/profile_collector_test.cc new file mode 100644 index 000000000000..b7dbe567312a --- /dev/null +++ b/lib/xray/tests/unit/profile_collector_test.cc @@ -0,0 +1,179 @@ +//===-- profile_collector_test.cc -----------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a function call tracing system. +// +//===----------------------------------------------------------------------===// +#include "gtest/gtest.h" + +#include "xray_profile_collector.h" +#include "xray_profiling_flags.h" +#include <cstdint> +#include <thread> +#include <utility> +#include <vector> + +namespace __xray { +namespace { + +static constexpr auto kHeaderSize = 16u; + +void ValidateBlock(XRayBuffer B) { + profilingFlags()->setDefaults(); + ASSERT_NE(static_cast<const void *>(B.Data), nullptr); + ASSERT_NE(B.Size, 0u); + ASSERT_GE(B.Size, kHeaderSize); + // We look at the block size, the block number, and the thread ID to ensure + // that none of them are zero (or that the header data is laid out as we + // expect). + char LocalBuffer[kHeaderSize] = {}; + internal_memcpy(LocalBuffer, B.Data, kHeaderSize); + u32 BlockSize = 0; + u32 BlockNumber = 0; + u64 ThreadId = 0; + internal_memcpy(&BlockSize, LocalBuffer, sizeof(u32)); + internal_memcpy(&BlockNumber, LocalBuffer + sizeof(u32), sizeof(u32)); + internal_memcpy(&ThreadId, LocalBuffer + (2 * sizeof(u32)), sizeof(u64)); + ASSERT_NE(BlockSize, 0u); + ASSERT_GE(BlockNumber, 0u); + ASSERT_NE(ThreadId, 0u); +} + +std::tuple<u32, u32, u64> ParseBlockHeader(XRayBuffer B) { + char LocalBuffer[kHeaderSize] = {}; + internal_memcpy(LocalBuffer, B.Data, kHeaderSize); + u32 BlockSize = 0; + u32 BlockNumber = 0; + u64 ThreadId = 0; + internal_memcpy(&BlockSize, LocalBuffer, sizeof(u32)); + internal_memcpy(&BlockNumber, LocalBuffer + sizeof(u32), sizeof(u32)); + internal_memcpy(&ThreadId, LocalBuffer + (2 * sizeof(u32)), sizeof(u64)); + return std::make_tuple(BlockSize, BlockNumber, ThreadId); +} + +struct Profile { + int64_t CallCount; + int64_t CumulativeLocalTime; + std::vector<int32_t> Path; +}; + +std::tuple<Profile, const char *> ParseProfile(const char *P) { + Profile Result; + // Read the path first, until we find a sentinel 0. + int32_t F; + do { + internal_memcpy(&F, P, sizeof(int32_t)); + P += sizeof(int32_t); + Result.Path.push_back(F); + } while (F != 0); + + // Then read the CallCount. + internal_memcpy(&Result.CallCount, P, sizeof(int64_t)); + P += sizeof(int64_t); + + // Then read the CumulativeLocalTime. + internal_memcpy(&Result.CumulativeLocalTime, P, sizeof(int64_t)); + P += sizeof(int64_t); + return std::make_tuple(std::move(Result), P); +} + +TEST(profileCollectorServiceTest, PostSerializeCollect) { + profilingFlags()->setDefaults(); + // The most basic use-case (the one we actually only care about) is the one + // where we ensure that we can post FunctionCallTrie instances, which are then + // destroyed but serialized properly. + // + // First, we initialise a set of allocators in the local scope. This ensures + // that we're able to copy the contents of the FunctionCallTrie that uses + // the local allocators. + auto Allocators = FunctionCallTrie::InitAllocators(); + FunctionCallTrie T(Allocators); + + // Then, we populate the trie with some data. + T.enterFunction(1, 1); + T.enterFunction(2, 2); + T.exitFunction(2, 3); + T.exitFunction(1, 4); + + // Then we post the data to the global profile collector service. + profileCollectorService::post(T, 1); + + // Then we serialize the data. + profileCollectorService::serialize(); + + // Then we go through a single buffer to see whether we're getting the data we + // expect. + auto B = profileCollectorService::nextBuffer({nullptr, 0}); + ValidateBlock(B); + u32 BlockSize; + u32 BlockNum; + u64 ThreadId; + std::tie(BlockSize, BlockNum, ThreadId) = ParseBlockHeader(B); + + // We look at the serialized buffer to see whether the Trie we're expecting + // to see is there. + auto DStart = static_cast<const char *>(B.Data) + kHeaderSize; + std::vector<char> D(DStart, DStart + BlockSize); + B = profileCollectorService::nextBuffer(B); + ASSERT_EQ(B.Data, nullptr); + ASSERT_EQ(B.Size, 0u); + + Profile Profile1, Profile2; + auto P = static_cast<const char *>(D.data()); + std::tie(Profile1, P) = ParseProfile(P); + std::tie(Profile2, P) = ParseProfile(P); + + ASSERT_NE(Profile1.Path.size(), Profile2.Path.size()); + auto &P1 = Profile1.Path.size() < Profile2.Path.size() ? Profile2 : Profile1; + auto &P2 = Profile1.Path.size() < Profile2.Path.size() ? Profile1 : Profile2; + std::vector<int32_t> P1Expected = {2, 1, 0}; + std::vector<int32_t> P2Expected = {1, 0}; + ASSERT_EQ(P1.Path.size(), P1Expected.size()); + ASSERT_EQ(P2.Path.size(), P2Expected.size()); + ASSERT_EQ(P1.Path, P1Expected); + ASSERT_EQ(P2.Path, P2Expected); +} + +// We break out a function that will be run in multiple threads, one that will +// use a thread local allocator, and will post the FunctionCallTrie to the +// profileCollectorService. This simulates what the threads being profiled would +// be doing anyway, but through the XRay logging implementation. +void threadProcessing() { + thread_local auto Allocators = FunctionCallTrie::InitAllocators(); + FunctionCallTrie T(Allocators); + + T.enterFunction(1, 1); + T.enterFunction(2, 2); + T.exitFunction(2, 3); + T.exitFunction(1, 4); + + profileCollectorService::post(T, GetTid()); +} + +TEST(profileCollectorServiceTest, PostSerializeCollectMultipleThread) { + profilingFlags()->setDefaults(); + std::thread t1(threadProcessing); + std::thread t2(threadProcessing); + + t1.join(); + t2.join(); + + // At this point, t1 and t2 are already done with what they were doing. + profileCollectorService::serialize(); + + // Ensure that we see two buffers. + auto B = profileCollectorService::nextBuffer({nullptr, 0}); + ValidateBlock(B); + + B = profileCollectorService::nextBuffer(B); + ValidateBlock(B); +} + +} // namespace +} // namespace __xray diff --git a/lib/xray/tests/unit/segmented_array_test.cc b/lib/xray/tests/unit/segmented_array_test.cc new file mode 100644 index 000000000000..035674ccfaf5 --- /dev/null +++ b/lib/xray/tests/unit/segmented_array_test.cc @@ -0,0 +1,200 @@ +#include "xray_segmented_array.h" +#include "gtest/gtest.h" + +namespace __xray { +namespace { + +struct TestData { + s64 First; + s64 Second; + + // Need a constructor for emplace operations. + TestData(s64 F, s64 S) : First(F), Second(S) {} +}; + +TEST(SegmentedArrayTest, ConstructWithAllocators) { + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 4); + Array<TestData> Data(A); + (void)Data; +} + +TEST(SegmentedArrayTest, ConstructAndPopulate) { + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 4); + Array<TestData> data(A); + ASSERT_NE(data.Append(TestData{0, 0}), nullptr); + ASSERT_NE(data.Append(TestData{1, 1}), nullptr); + ASSERT_EQ(data.size(), 2u); +} + +TEST(SegmentedArrayTest, ConstructPopulateAndLookup) { + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 4); + Array<TestData> data(A); + ASSERT_NE(data.Append(TestData{0, 1}), nullptr); + ASSERT_EQ(data.size(), 1u); + ASSERT_EQ(data[0].First, 0); + ASSERT_EQ(data[0].Second, 1); +} + +TEST(SegmentedArrayTest, PopulateWithMoreElements) { + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 24); + Array<TestData> data(A); + static const auto kMaxElements = 100u; + for (auto I = 0u; I < kMaxElements; ++I) { + ASSERT_NE(data.Append(TestData{I, I + 1}), nullptr); + } + ASSERT_EQ(data.size(), kMaxElements); + for (auto I = 0u; I < kMaxElements; ++I) { + ASSERT_EQ(data[I].First, I); + ASSERT_EQ(data[I].Second, I + 1); + } +} + +TEST(SegmentedArrayTest, AppendEmplace) { + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 4); + Array<TestData> data(A); + ASSERT_NE(data.AppendEmplace(1, 1), nullptr); + ASSERT_EQ(data[0].First, 1); + ASSERT_EQ(data[0].Second, 1); +} + +TEST(SegmentedArrayTest, AppendAndTrim) { + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 4); + Array<TestData> data(A); + ASSERT_NE(data.AppendEmplace(1, 1), nullptr); + ASSERT_EQ(data.size(), 1u); + data.trim(1); + ASSERT_EQ(data.size(), 0u); + ASSERT_TRUE(data.empty()); +} + +TEST(SegmentedArrayTest, IteratorAdvance) { + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 4); + Array<TestData> data(A); + ASSERT_TRUE(data.empty()); + ASSERT_EQ(data.begin(), data.end()); + auto I0 = data.begin(); + ASSERT_EQ(I0++, data.begin()); + ASSERT_NE(I0, data.begin()); + for (const auto &D : data) { + (void)D; + FAIL(); + } + ASSERT_NE(data.AppendEmplace(1, 1), nullptr); + ASSERT_EQ(data.size(), 1u); + ASSERT_NE(data.begin(), data.end()); + auto &D0 = *data.begin(); + ASSERT_EQ(D0.First, 1); + ASSERT_EQ(D0.Second, 1); +} + +TEST(SegmentedArrayTest, IteratorRetreat) { + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 4); + Array<TestData> data(A); + ASSERT_TRUE(data.empty()); + ASSERT_EQ(data.begin(), data.end()); + ASSERT_NE(data.AppendEmplace(1, 1), nullptr); + ASSERT_EQ(data.size(), 1u); + ASSERT_NE(data.begin(), data.end()); + auto &D0 = *data.begin(); + ASSERT_EQ(D0.First, 1); + ASSERT_EQ(D0.Second, 1); + + auto I0 = data.end(); + ASSERT_EQ(I0--, data.end()); + ASSERT_NE(I0, data.end()); + ASSERT_EQ(I0, data.begin()); + ASSERT_EQ(I0->First, 1); + ASSERT_EQ(I0->Second, 1); +} + +TEST(SegmentedArrayTest, IteratorTrimBehaviour) { + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 20); + Array<TestData> Data(A); + ASSERT_TRUE(Data.empty()); + auto I0Begin = Data.begin(), I0End = Data.end(); + // Add enough elements in Data to have more than one chunk. + constexpr auto Segment = Array<TestData>::SegmentSize; + constexpr auto SegmentX2 = Segment * 2; + for (auto i = SegmentX2; i > 0u; --i) { + Data.AppendEmplace(static_cast<s64>(i), static_cast<s64>(i)); + } + ASSERT_EQ(Data.size(), SegmentX2); + { + auto &Back = Data.back(); + ASSERT_EQ(Back.First, 1); + ASSERT_EQ(Back.Second, 1); + } + + // Trim one chunk's elements worth. + Data.trim(Segment); + ASSERT_EQ(Data.size(), Segment); + + // Check that we are still able to access 'back' properly. + { + auto &Back = Data.back(); + ASSERT_EQ(Back.First, static_cast<s64>(Segment + 1)); + ASSERT_EQ(Back.Second, static_cast<s64>(Segment + 1)); + } + + // Then trim until it's empty. + Data.trim(Segment); + ASSERT_TRUE(Data.empty()); + + // Here our iterators should be the same. + auto I1Begin = Data.begin(), I1End = Data.end(); + EXPECT_EQ(I0Begin, I1Begin); + EXPECT_EQ(I0End, I1End); + + // Then we ensure that adding elements back works just fine. + for (auto i = SegmentX2; i > 0u; --i) { + Data.AppendEmplace(static_cast<s64>(i), static_cast<s64>(i)); + } + EXPECT_EQ(Data.size(), SegmentX2); +} + +struct ShadowStackEntry { + uint64_t EntryTSC = 0; + uint64_t *NodePtr = nullptr; + ShadowStackEntry(uint64_t T, uint64_t *N) : EntryTSC(T), NodePtr(N) {} +}; + +TEST(SegmentedArrayTest, SimulateStackBehaviour) { + using AllocatorType = typename Array<ShadowStackEntry>::AllocatorType; + AllocatorType A(1 << 10); + Array<ShadowStackEntry> Data(A); + static uint64_t Dummy = 0; + constexpr uint64_t Max = 9; + + for (uint64_t i = 0; i < Max; ++i) { + auto P = Data.Append({i, &Dummy}); + ASSERT_NE(P, nullptr); + ASSERT_EQ(P->NodePtr, &Dummy); + auto &Back = Data.back(); + ASSERT_EQ(Back.NodePtr, &Dummy); + ASSERT_EQ(Back.EntryTSC, i); + } + + // Simulate a stack by checking the data from the end as we're trimming. + auto Counter = Max; + ASSERT_EQ(Data.size(), size_t(Max)); + while (!Data.empty()) { + const auto &Top = Data.back(); + uint64_t *TopNode = Top.NodePtr; + EXPECT_EQ(TopNode, &Dummy) << "Counter = " << Counter; + Data.trim(1); + --Counter; + ASSERT_EQ(Data.size(), size_t(Counter)); + } +} + +} // namespace +} // namespace __xray diff --git a/lib/xray/xray_AArch64.cc b/lib/xray/xray_AArch64.cc index f26e77dd7fc1..096de009e83c 100644 --- a/lib/xray/xray_AArch64.cc +++ b/lib/xray/xray_AArch64.cc @@ -112,6 +112,12 @@ bool patchCustomEvent(const bool Enable, const uint32_t FuncId, return false; } +bool patchTypedEvent(const bool Enable, const uint32_t FuncId, + const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { + // FIXME: Implement in aarch64? + return false; +} + // FIXME: Maybe implement this better? bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; } diff --git a/lib/xray/xray_allocator.h b/lib/xray/xray_allocator.h new file mode 100644 index 000000000000..8244815284a8 --- /dev/null +++ b/lib/xray/xray_allocator.h @@ -0,0 +1,129 @@ +//===-- xray_allocator.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a dynamic runtime instrumentation system. +// +// Defines the allocator interface for an arena allocator, used primarily for +// the profiling runtime. +// +//===----------------------------------------------------------------------===// +#ifndef XRAY_ALLOCATOR_H +#define XRAY_ALLOCATOR_H + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_mutex.h" +#include "sanitizer_common/sanitizer_posix.h" +#include "xray_utils.h" +#include <sys/mman.h> +#include <cstddef> +#include <cstdint> + +#ifndef MAP_NORESERVE +// no-op on NetBSD (at least), unsupported flag on FreeBSD basically because unneeded +#define MAP_NORESERVE 0 +#endif + +namespace __xray { + +/// The Allocator type hands out fixed-sized chunks of memory that are +/// cache-line aligned and sized. This is useful for placement of +/// performance-sensitive data in memory that's frequently accessed. The +/// allocator also self-limits the peak memory usage to a dynamically defined +/// maximum. +/// +/// N is the lower-bound size of the block of memory to return from the +/// allocation function. N is used to compute the size of a block, which is +/// cache-line-size multiples worth of memory. We compute the size of a block by +/// determining how many cache lines worth of memory is required to subsume N. +/// +/// The Allocator instance will manage its own memory acquired through mmap. +/// This severely constrains the platforms on which this can be used to POSIX +/// systems where mmap semantics are well-defined. +/// +/// FIXME: Isolate the lower-level memory management to a different abstraction +/// that can be platform-specific. +template <size_t N> struct Allocator { + // The Allocator returns memory as Block instances. + struct Block { + /// Compute the minimum cache-line size multiple that is >= N. + static constexpr auto Size = nearest_boundary(N, kCacheLineSize); + void *Data; + }; + +private: + const size_t MaxMemory{0}; + void *BackingStore = nullptr; + void *AlignedNextBlock = nullptr; + size_t AllocatedBlocks = 0; + SpinMutex Mutex{}; + + void *Alloc() { + SpinMutexLock Lock(&Mutex); + if (UNLIKELY(BackingStore == nullptr)) { + BackingStore = reinterpret_cast<void *>( + internal_mmap(NULL, MaxMemory, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, 0, 0)); + if (BackingStore == MAP_FAILED) { + BackingStore = nullptr; + if (Verbosity()) + Report("XRay Profiling: Failed to allocate memory for allocator.\n"); + return nullptr; + } + + AlignedNextBlock = BackingStore; + + // Ensure that NextBlock is aligned appropriately. + auto BackingStoreNum = reinterpret_cast<uintptr_t>(BackingStore); + auto AlignedNextBlockNum = nearest_boundary( + reinterpret_cast<uintptr_t>(AlignedNextBlock), kCacheLineSize); + if (diff(AlignedNextBlockNum, BackingStoreNum) > ptrdiff_t(MaxMemory)) { + munmap(BackingStore, MaxMemory); + AlignedNextBlock = BackingStore = nullptr; + if (Verbosity()) + Report("XRay Profiling: Cannot obtain enough memory from " + "preallocated region.\n"); + return nullptr; + } + + AlignedNextBlock = reinterpret_cast<void *>(AlignedNextBlockNum); + + // Assert that AlignedNextBlock is cache-line aligned. + DCHECK_EQ(reinterpret_cast<uintptr_t>(AlignedNextBlock) % kCacheLineSize, + 0); + } + + if ((AllocatedBlocks * Block::Size) >= MaxMemory) + return nullptr; + + // Align the pointer we'd like to return to an appropriate alignment, then + // advance the pointer from where to start allocations. + void *Result = AlignedNextBlock; + AlignedNextBlock = reinterpret_cast<void *>( + reinterpret_cast<char *>(AlignedNextBlock) + N); + ++AllocatedBlocks; + return Result; + } + +public: + explicit Allocator(size_t M) + : MaxMemory(nearest_boundary(M, kCacheLineSize)) {} + + Block Allocate() { return {Alloc()}; } + + ~Allocator() NOEXCEPT { + if (BackingStore != nullptr) { + internal_munmap(BackingStore, MaxMemory); + } + } +}; + +} // namespace __xray + +#endif // XRAY_ALLOCATOR_H diff --git a/lib/xray/xray_arm.cc b/lib/xray/xray_arm.cc index da4efcdd2b17..5b828287e3f6 100644 --- a/lib/xray/xray_arm.cc +++ b/lib/xray/xray_arm.cc @@ -149,6 +149,12 @@ bool patchCustomEvent(const bool Enable, const uint32_t FuncId, return false; } +bool patchTypedEvent(const bool Enable, const uint32_t FuncId, + const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { + // FIXME: Implement in arm? + return false; +} + // FIXME: Maybe implement this better? bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; } diff --git a/lib/xray/xray_basic_flags.cc b/lib/xray/xray_basic_flags.cc new file mode 100644 index 000000000000..14d805c71a88 --- /dev/null +++ b/lib/xray/xray_basic_flags.cc @@ -0,0 +1,50 @@ +//===-- xray_basic_flags.cc -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a dynamic runtime instrumentation system. +// +// XRay Basic flag parsing logic. +//===----------------------------------------------------------------------===// + +#include "xray_basic_flags.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "xray_defs.h" + +using namespace __sanitizer; + +namespace __xray { + +/// Use via basicFlags(). +BasicFlags xray_basic_flags_dont_use_directly; + +void BasicFlags::setDefaults() XRAY_NEVER_INSTRUMENT { +#define XRAY_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "xray_basic_flags.inc" +#undef XRAY_FLAG +} + +void registerXRayBasicFlags(FlagParser *P, + BasicFlags *F) XRAY_NEVER_INSTRUMENT { +#define XRAY_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(P, #Name, Description, &F->Name); +#include "xray_basic_flags.inc" +#undef XRAY_FLAG +} + +const char *useCompilerDefinedBasicFlags() XRAY_NEVER_INSTRUMENT { +#ifdef XRAY_BASIC_OPTIONS + return SANITIZER_STRINGIFY(XRAY_BASIC_OPTIONS); +#else + return ""; +#endif +} + +} // namespace __xray diff --git a/lib/xray/xray_basic_flags.h b/lib/xray/xray_basic_flags.h new file mode 100644 index 000000000000..041578f0663c --- /dev/null +++ b/lib/xray/xray_basic_flags.h @@ -0,0 +1,38 @@ +//===-- xray_basic_flags.h -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a dynamic runtime instruementation system. +// +// XRay Basic Mode runtime flags. +//===----------------------------------------------------------------------===// + +#ifndef XRAY_BASIC_FLAGS_H +#define XRAY_BASIC_FLAGS_H + +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_internal_defs.h" + +namespace __xray { + +struct BasicFlags { +#define XRAY_FLAG(Type, Name, DefaultValue, Description) Type Name; +#include "xray_basic_flags.inc" +#undef XRAY_FLAG + + void setDefaults(); +}; + +extern BasicFlags xray_basic_flags_dont_use_directly; +extern void registerXRayBasicFlags(FlagParser *P, BasicFlags *F); +const char *useCompilerDefinedBasicFlags(); +inline BasicFlags *basicFlags() { return &xray_basic_flags_dont_use_directly; } + +} // namespace __xray + +#endif // XRAY_BASIC_FLAGS_H diff --git a/lib/xray/xray_basic_flags.inc b/lib/xray/xray_basic_flags.inc new file mode 100644 index 000000000000..327735b51055 --- /dev/null +++ b/lib/xray/xray_basic_flags.inc @@ -0,0 +1,24 @@ +//===-- xray_basic_flags.inc ------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// XRay runtime flags. +// +//===----------------------------------------------------------------------===// +#ifndef XRAY_FLAG +#error "Define XRAY_FLAG prior to including this file!" +#endif + +XRAY_FLAG(int, func_duration_threshold_us, 5, + "Basic logging will try to skip functions that execute for fewer " + "microseconds than this threshold.") +XRAY_FLAG(int, max_stack_depth, 64, + "Basic logging will keep track of at most this deep a call stack, " + "any more and the recordings will be dropped.") +XRAY_FLAG(int, thread_buffer_size, 1024, + "The number of entries to keep on a per-thread buffer.") diff --git a/lib/xray/xray_inmemory_log.cc b/lib/xray/xray_basic_logging.cc index a27ffbcbd12e..585ca641cd0c 100644 --- a/lib/xray/xray_inmemory_log.cc +++ b/lib/xray/xray_basic_logging.cc @@ -1,4 +1,4 @@ -//===-- xray_inmemory_log.cc ------------------------------------*- C++ -*-===// +//===-- xray_basic_logging.cc -----------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -15,8 +15,6 @@ // //===----------------------------------------------------------------------===// -#include <cassert> -#include <cstring> #include <errno.h> #include <fcntl.h> #include <pthread.h> @@ -29,16 +27,18 @@ #include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_libc.h" #include "xray/xray_records.h" +#include "xray_recursion_guard.h" +#include "xray_basic_flags.h" +#include "xray_basic_logging.h" #include "xray_defs.h" #include "xray_flags.h" -#include "xray_inmemory_log.h" #include "xray_interface_internal.h" #include "xray_tsc.h" #include "xray_utils.h" namespace __xray { -__sanitizer::SpinMutex LogMutex; +SpinMutex LogMutex; // We use elements of this type to record the entry TSC of every function ID we // see as we're tracing a particular thread's execution. @@ -60,43 +60,41 @@ struct alignas(64) ThreadLocalData { size_t StackSize = 0; size_t StackEntries = 0; int Fd = -1; - pid_t TID = 0; }; static pthread_key_t PThreadKey; -static __sanitizer::atomic_uint8_t BasicInitialized{0}; +static atomic_uint8_t BasicInitialized{0}; BasicLoggingOptions GlobalOptions; -thread_local volatile bool RecursionGuard = false; +thread_local atomic_uint8_t Guard{0}; -static uint64_t thresholdTicks() XRAY_NEVER_INSTRUMENT { - static uint64_t TicksPerSec = probeRequiredCPUFeatures() - ? getTSCFrequency() - : __xray::NanosecondsPerSecond; - static const uint64_t ThresholdTicks = - TicksPerSec * GlobalOptions.DurationFilterMicros / 1000000; - return ThresholdTicks; -} +static atomic_uint8_t UseRealTSC{0}; +static atomic_uint64_t ThresholdTicks{0}; +static atomic_uint64_t TicksPerSec{0}; +static atomic_uint64_t CycleFrequency{NanosecondsPerSecond}; static int openLogFile() XRAY_NEVER_INSTRUMENT { int F = getLogFD(); if (F == -1) return -1; - // Test for required CPU features and cache the cycle frequency - static bool TSCSupported = probeRequiredCPUFeatures(); - static uint64_t CycleFrequency = - TSCSupported ? getTSCFrequency() : __xray::NanosecondsPerSecond; + static pthread_once_t DetectOnce = PTHREAD_ONCE_INIT; + pthread_once(&DetectOnce, +[] { + if (atomic_load(&UseRealTSC, memory_order_acquire)) + atomic_store(&CycleFrequency, getTSCFrequency(), memory_order_release); + }); // Since we're here, we get to write the header. We set it up so that the // header will only be written once, at the start, and let the threads // logging do writes which just append. XRayFileHeader Header; - Header.Version = 2; // Version 2 includes tail exit records. + // Version 2 includes tail exit records. + // Version 3 includes pid inside records. + Header.Version = 3; Header.Type = FileTypes::NAIVE_LOG; - Header.CycleFrequency = CycleFrequency; + Header.CycleFrequency = atomic_load(&CycleFrequency, memory_order_acquire); // FIXME: Actually check whether we have 'constant_tsc' and 'nonstop_tsc' // before setting the values in the header. @@ -107,20 +105,21 @@ static int openLogFile() XRAY_NEVER_INSTRUMENT { return F; } -int getGlobalFd() XRAY_NEVER_INSTRUMENT { - static int Fd = openLogFile(); +static int getGlobalFd() XRAY_NEVER_INSTRUMENT { + static pthread_once_t OnceInit = PTHREAD_ONCE_INIT; + static int Fd = 0; + pthread_once(&OnceInit, +[] { Fd = openLogFile(); }); return Fd; } -ThreadLocalData &getThreadLocalData() XRAY_NEVER_INSTRUMENT { +static ThreadLocalData &getThreadLocalData() XRAY_NEVER_INSTRUMENT { thread_local ThreadLocalData TLD; thread_local bool UNUSED TOnce = [] { if (GlobalOptions.ThreadBufferSize == 0) { - if (__sanitizer::Verbosity()) + if (Verbosity()) Report("Not initializing TLD since ThreadBufferSize == 0.\n"); return false; } - TLD.TID = __sanitizer::GetTid(); pthread_setspecific(PThreadKey, &TLD); TLD.Fd = getGlobalFd(); TLD.InMemoryBuffer = reinterpret_cast<XRayRecord *>( @@ -129,7 +128,7 @@ ThreadLocalData &getThreadLocalData() XRAY_NEVER_INSTRUMENT { TLD.BufferSize = GlobalOptions.ThreadBufferSize; TLD.BufferOffset = 0; if (GlobalOptions.MaxStackDepth == 0) { - if (__sanitizer::Verbosity()) + if (Verbosity()) Report("Not initializing the ShadowStack since MaxStackDepth == 0.\n"); TLD.StackSize = 0; TLD.StackEntries = 0; @@ -141,13 +140,6 @@ ThreadLocalData &getThreadLocalData() XRAY_NEVER_INSTRUMENT { alignof(StackEntry))); TLD.StackSize = GlobalOptions.MaxStackDepth; TLD.StackEntries = 0; - if (__sanitizer::Verbosity() >= 2) { - static auto UNUSED Once = [] { - auto ticks = thresholdTicks(); - Report("Ticks threshold: %d\n", ticks); - return false; - }(); - } return false; }(); return TLD; @@ -157,7 +149,6 @@ template <class RDTSC> void InMemoryRawLog(int32_t FuncId, XRayEntryType Type, RDTSC ReadTSC) XRAY_NEVER_INSTRUMENT { auto &TLD = getThreadLocalData(); - auto &InMemoryBuffer = TLD.InMemoryBuffer; int Fd = getGlobalFd(); if (Fd == -1) return; @@ -165,10 +156,9 @@ void InMemoryRawLog(int32_t FuncId, XRayEntryType Type, // Use a simple recursion guard, to handle cases where we're already logging // and for one reason or another, this function gets called again in the same // thread. - if (RecursionGuard) + RecursionGuard G(Guard); + if (!G) return; - RecursionGuard = true; - auto ExitGuard = __sanitizer::at_scope_exit([] { RecursionGuard = false; }); uint8_t CPU = 0; uint64_t TSC = ReadTSC(CPU); @@ -189,7 +179,7 @@ void InMemoryRawLog(int32_t FuncId, XRayEntryType Type, E.TSC = TSC; auto StackEntryPtr = static_cast<char *>(TLD.ShadowStack) + (sizeof(StackEntry) * (TLD.StackEntries - 1)); - __sanitizer::internal_memcpy(StackEntryPtr, &E, sizeof(StackEntry)); + internal_memcpy(StackEntryPtr, &E, sizeof(StackEntry)); break; } case XRayEntryType::EXIT: @@ -213,12 +203,12 @@ void InMemoryRawLog(int32_t FuncId, XRayEntryType Type, StackEntry StackTop; auto StackEntryPtr = static_cast<char *>(TLD.ShadowStack) + (sizeof(StackEntry) * TLD.StackEntries); - __sanitizer::internal_memcpy(&StackTop, StackEntryPtr, sizeof(StackEntry)); + internal_memcpy(&StackTop, StackEntryPtr, sizeof(StackEntry)); if (StackTop.FuncId == FuncId && StackTop.CPU == CPU && StackTop.TSC < TSC) { auto Delta = TSC - StackTop.TSC; - if (Delta < thresholdTicks()) { - assert(TLD.BufferOffset > 0); + if (Delta < atomic_load(&ThresholdTicks, memory_order_relaxed)) { + DCHECK(TLD.BufferOffset > 0); TLD.BufferOffset -= StackTop.Type == XRayEntryType::ENTRY ? 1 : 2; return; } @@ -227,27 +217,26 @@ void InMemoryRawLog(int32_t FuncId, XRayEntryType Type, } default: // Should be unreachable. - assert(false && "Unsupported XRayEntryType encountered."); + DCHECK(false && "Unsupported XRayEntryType encountered."); break; } // First determine whether the delta between the function's enter record and // the exit record is higher than the threshold. - __xray::XRayRecord R; + XRayRecord R; R.RecordType = RecordTypes::NORMAL; R.CPU = CPU; R.TSC = TSC; - R.TId = TLD.TID; + R.TId = GetTid(); + R.PId = internal_getpid(); R.Type = Type; R.FuncId = FuncId; - auto EntryPtr = static_cast<char *>(InMemoryBuffer) + - (sizeof(__xray::XRayRecord) * TLD.BufferOffset); - __sanitizer::internal_memcpy(EntryPtr, &R, sizeof(R)); + auto FirstEntry = reinterpret_cast<XRayRecord *>(TLD.InMemoryBuffer); + internal_memcpy(FirstEntry + TLD.BufferOffset, &R, sizeof(R)); if (++TLD.BufferOffset == TLD.BufferSize) { - __sanitizer::SpinMutexLock L(&LogMutex); - auto RecordBuffer = reinterpret_cast<__xray::XRayRecord *>(InMemoryBuffer); - retryingWriteAll(Fd, reinterpret_cast<char *>(RecordBuffer), - reinterpret_cast<char *>(RecordBuffer + TLD.BufferOffset)); + SpinMutexLock L(&LogMutex); + retryingWriteAll(Fd, reinterpret_cast<char *>(FirstEntry), + reinterpret_cast<char *>(FirstEntry + TLD.BufferOffset)); TLD.BufferOffset = 0; TLD.StackEntries = 0; } @@ -257,8 +246,8 @@ template <class RDTSC> void InMemoryRawLogWithArg(int32_t FuncId, XRayEntryType Type, uint64_t Arg1, RDTSC ReadTSC) XRAY_NEVER_INSTRUMENT { auto &TLD = getThreadLocalData(); - auto &InMemoryBuffer = TLD.InMemoryBuffer; - auto &Offset = TLD.BufferOffset; + auto FirstEntry = + reinterpret_cast<XRayArgPayload *>(TLD.InMemoryBuffer); const auto &BuffLen = TLD.BufferSize; int Fd = getGlobalFd(); if (Fd == -1) @@ -267,45 +256,41 @@ void InMemoryRawLogWithArg(int32_t FuncId, XRayEntryType Type, uint64_t Arg1, // First we check whether there's enough space to write the data consecutively // in the thread-local buffer. If not, we first flush the buffer before // attempting to write the two records that must be consecutive. - if (Offset + 2 > BuffLen) { - __sanitizer::SpinMutexLock L(&LogMutex); - auto RecordBuffer = reinterpret_cast<__xray::XRayRecord *>(InMemoryBuffer); - retryingWriteAll(Fd, reinterpret_cast<char *>(RecordBuffer), - reinterpret_cast<char *>(RecordBuffer + Offset)); - Offset = 0; + if (TLD.BufferOffset + 2 > BuffLen) { + SpinMutexLock L(&LogMutex); + retryingWriteAll(Fd, reinterpret_cast<char *>(FirstEntry), + reinterpret_cast<char *>(FirstEntry + TLD.BufferOffset)); + TLD.BufferOffset = 0; TLD.StackEntries = 0; } // Then we write the "we have an argument" record. InMemoryRawLog(FuncId, Type, ReadTSC); - if (RecursionGuard) + RecursionGuard G(Guard); + if (!G) return; - RecursionGuard = true; - auto ExitGuard = __sanitizer::at_scope_exit([] { RecursionGuard = false; }); - // And from here on write the arg payload. - __xray::XRayArgPayload R; + // And, from here on write the arg payload. + XRayArgPayload R; R.RecordType = RecordTypes::ARG_PAYLOAD; R.FuncId = FuncId; - R.TId = TLD.TID; + R.TId = GetTid(); + R.PId = internal_getpid(); R.Arg = Arg1; - auto EntryPtr = - &reinterpret_cast<__xray::XRayArgPayload *>(&InMemoryBuffer)[Offset]; - std::memcpy(EntryPtr, &R, sizeof(R)); - if (++Offset == BuffLen) { - __sanitizer::SpinMutexLock L(&LogMutex); - auto RecordBuffer = reinterpret_cast<__xray::XRayRecord *>(InMemoryBuffer); - retryingWriteAll(Fd, reinterpret_cast<char *>(RecordBuffer), - reinterpret_cast<char *>(RecordBuffer + Offset)); - Offset = 0; + internal_memcpy(FirstEntry + TLD.BufferOffset, &R, sizeof(R)); + if (++TLD.BufferOffset == BuffLen) { + SpinMutexLock L(&LogMutex); + retryingWriteAll(Fd, reinterpret_cast<char *>(FirstEntry), + reinterpret_cast<char *>(FirstEntry + TLD.BufferOffset)); + TLD.BufferOffset = 0; TLD.StackEntries = 0; } } void basicLoggingHandleArg0RealTSC(int32_t FuncId, XRayEntryType Type) XRAY_NEVER_INSTRUMENT { - InMemoryRawLog(FuncId, Type, __xray::readTSC); + InMemoryRawLog(FuncId, Type, readTSC); } void basicLoggingHandleArg0EmulateTSC(int32_t FuncId, XRayEntryType Type) @@ -318,13 +303,13 @@ void basicLoggingHandleArg0EmulateTSC(int32_t FuncId, XRayEntryType Type) TS = {0, 0}; } CPU = 0; - return TS.tv_sec * __xray::NanosecondsPerSecond + TS.tv_nsec; + return TS.tv_sec * NanosecondsPerSecond + TS.tv_nsec; }); } void basicLoggingHandleArg1RealTSC(int32_t FuncId, XRayEntryType Type, uint64_t Arg1) XRAY_NEVER_INSTRUMENT { - InMemoryRawLogWithArg(FuncId, Type, Arg1, __xray::readTSC); + InMemoryRawLogWithArg(FuncId, Type, Arg1, readTSC); } void basicLoggingHandleArg1EmulateTSC(int32_t FuncId, XRayEntryType Type, @@ -338,34 +323,34 @@ void basicLoggingHandleArg1EmulateTSC(int32_t FuncId, XRayEntryType Type, TS = {0, 0}; } CPU = 0; - return TS.tv_sec * __xray::NanosecondsPerSecond + TS.tv_nsec; + return TS.tv_sec * NanosecondsPerSecond + TS.tv_nsec; }); } static void TLDDestructor(void *P) XRAY_NEVER_INSTRUMENT { ThreadLocalData &TLD = *reinterpret_cast<ThreadLocalData *>(P); - auto ExitGuard = __sanitizer::at_scope_exit([&TLD] { + auto ExitGuard = at_scope_exit([&TLD] { // Clean up dynamic resources. if (TLD.InMemoryBuffer) InternalFree(TLD.InMemoryBuffer); if (TLD.ShadowStack) InternalFree(TLD.ShadowStack); - if (__sanitizer::Verbosity()) - Report("Cleaned up log for TID: %d\n", TLD.TID); + if (Verbosity()) + Report("Cleaned up log for TID: %d\n", GetTid()); }); if (TLD.Fd == -1 || TLD.BufferOffset == 0) { - if (__sanitizer::Verbosity()) - Report("Skipping buffer for TID: %d; Fd = %d; Offset = %llu\n", TLD.TID, + if (Verbosity()) + Report("Skipping buffer for TID: %d; Fd = %d; Offset = %llu\n", GetTid(), TLD.Fd, TLD.BufferOffset); return; } { - __sanitizer::SpinMutexLock L(&LogMutex); + SpinMutexLock L(&LogMutex); retryingWriteAll(TLD.Fd, reinterpret_cast<char *>(TLD.InMemoryBuffer), reinterpret_cast<char *>(TLD.InMemoryBuffer) + - (sizeof(__xray::XRayRecord) * TLD.BufferOffset)); + (sizeof(XRayRecord) * TLD.BufferOffset)); } // Because this thread's exit could be the last one trying to write to @@ -378,45 +363,89 @@ static void TLDDestructor(void *P) XRAY_NEVER_INSTRUMENT { XRayLogInitStatus basicLoggingInit(size_t BufferSize, size_t BufferMax, void *Options, size_t OptionsSize) XRAY_NEVER_INSTRUMENT { - static bool UNUSED Once = [] { - pthread_key_create(&PThreadKey, TLDDestructor); - return false; - }(); - uint8_t Expected = 0; - if (!__sanitizer::atomic_compare_exchange_strong( - &BasicInitialized, &Expected, 1, __sanitizer::memory_order_acq_rel)) { - if (__sanitizer::Verbosity()) + if (!atomic_compare_exchange_strong(&BasicInitialized, &Expected, 1, + memory_order_acq_rel)) { + if (Verbosity()) Report("Basic logging already initialized.\n"); return XRayLogInitStatus::XRAY_LOG_INITIALIZED; } - if (OptionsSize != sizeof(BasicLoggingOptions)) { + static pthread_once_t OnceInit = PTHREAD_ONCE_INIT; + pthread_once(&OnceInit, +[] { + pthread_key_create(&PThreadKey, TLDDestructor); + atomic_store(&UseRealTSC, probeRequiredCPUFeatures(), memory_order_release); + // Initialize the global TicksPerSec value. + atomic_store(&TicksPerSec, + probeRequiredCPUFeatures() ? getTSCFrequency() + : NanosecondsPerSecond, + memory_order_release); + if (!atomic_load(&UseRealTSC, memory_order_relaxed) && Verbosity()) + Report("WARNING: Required CPU features missing for XRay instrumentation, " + "using emulation instead.\n"); + }); + + if (BufferSize == 0 && BufferMax == 0 && Options != nullptr) { + FlagParser P; + BasicFlags F; + F.setDefaults(); + registerXRayBasicFlags(&P, &F); + P.ParseString(useCompilerDefinedBasicFlags()); + auto *EnvOpts = GetEnv("XRAY_BASIC_OPTIONS"); + if (EnvOpts == nullptr) + EnvOpts = ""; + + P.ParseString(EnvOpts); + + // If XRAY_BASIC_OPTIONS was not defined, then we use the deprecated options + // set through XRAY_OPTIONS instead. + if (internal_strlen(EnvOpts) == 0) { + F.func_duration_threshold_us = + flags()->xray_naive_log_func_duration_threshold_us; + F.max_stack_depth = flags()->xray_naive_log_max_stack_depth; + F.thread_buffer_size = flags()->xray_naive_log_thread_buffer_size; + } + + P.ParseString(static_cast<const char *>(Options)); + GlobalOptions.ThreadBufferSize = F.thread_buffer_size; + GlobalOptions.DurationFilterMicros = F.func_duration_threshold_us; + GlobalOptions.MaxStackDepth = F.max_stack_depth; + *basicFlags() = F; + } else if (OptionsSize != sizeof(BasicLoggingOptions)) { Report("Invalid options size, potential ABI mismatch; expected %d got %d", sizeof(BasicLoggingOptions), OptionsSize); return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; + } else { + if (Verbosity()) + Report("XRay Basic: struct-based init is deprecated, please use " + "string-based configuration instead.\n"); + GlobalOptions = *reinterpret_cast<BasicLoggingOptions *>(Options); } - static auto UseRealTSC = probeRequiredCPUFeatures(); - if (!UseRealTSC && __sanitizer::Verbosity()) - Report("WARNING: Required CPU features missing for XRay instrumentation, " - "using emulation instead.\n"); - - GlobalOptions = *reinterpret_cast<BasicLoggingOptions *>(Options); - __xray_set_handler_arg1(UseRealTSC ? basicLoggingHandleArg1RealTSC - : basicLoggingHandleArg1EmulateTSC); - __xray_set_handler(UseRealTSC ? basicLoggingHandleArg0RealTSC - : basicLoggingHandleArg0EmulateTSC); + atomic_store(&ThresholdTicks, + atomic_load(&TicksPerSec, memory_order_acquire) * + GlobalOptions.DurationFilterMicros / 1000000, + memory_order_release); + __xray_set_handler_arg1(atomic_load(&UseRealTSC, memory_order_acquire) + ? basicLoggingHandleArg1RealTSC + : basicLoggingHandleArg1EmulateTSC); + __xray_set_handler(atomic_load(&UseRealTSC, memory_order_acquire) + ? basicLoggingHandleArg0RealTSC + : basicLoggingHandleArg0EmulateTSC); + + // TODO: Implement custom event and typed event handling support in Basic + // Mode. __xray_remove_customevent_handler(); + __xray_remove_typedevent_handler(); return XRayLogInitStatus::XRAY_LOG_INITIALIZED; } XRayLogInitStatus basicLoggingFinalize() XRAY_NEVER_INSTRUMENT { uint8_t Expected = 0; - if (!__sanitizer::atomic_compare_exchange_strong( - &BasicInitialized, &Expected, 0, __sanitizer::memory_order_acq_rel) && - __sanitizer::Verbosity()) + if (!atomic_compare_exchange_strong(&BasicInitialized, &Expected, 0, + memory_order_acq_rel) && + Verbosity()) Report("Basic logging already finalized.\n"); // Nothing really to do aside from marking state of the global to be @@ -444,24 +473,41 @@ bool basicLogDynamicInitializer() XRAY_NEVER_INSTRUMENT { }; auto RegistrationResult = __xray_log_register_mode("xray-basic", Impl); if (RegistrationResult != XRayLogRegisterStatus::XRAY_REGISTRATION_OK && - __sanitizer::Verbosity()) + Verbosity()) Report("Cannot register XRay Basic Mode to 'xray-basic'; error = %d\n", RegistrationResult); if (flags()->xray_naive_log || - !__sanitizer::internal_strcmp(flags()->xray_mode, "xray-basic")) { - __xray_set_log_impl(Impl); - BasicLoggingOptions Options; - Options.DurationFilterMicros = - flags()->xray_naive_log_func_duration_threshold_us; - Options.MaxStackDepth = flags()->xray_naive_log_max_stack_depth; - Options.ThreadBufferSize = flags()->xray_naive_log_thread_buffer_size; - __xray_log_init(flags()->xray_naive_log_thread_buffer_size, 0, &Options, - sizeof(BasicLoggingOptions)); - static auto UNUSED Once = [] { - static auto UNUSED &TLD = getThreadLocalData(); - __sanitizer::Atexit(+[] { TLDDestructor(&TLD); }); + !internal_strcmp(flags()->xray_mode, "xray-basic")) { + auto SelectResult = __xray_log_select_mode("xray-basic"); + if (SelectResult != XRayLogRegisterStatus::XRAY_REGISTRATION_OK) { + if (Verbosity()) + Report("Failed selecting XRay Basic Mode; error = %d\n", SelectResult); return false; - }(); + } + + // We initialize the implementation using the data we get from the + // XRAY_BASIC_OPTIONS environment variable, at this point of the + // implementation. + auto *Env = GetEnv("XRAY_BASIC_OPTIONS"); + auto InitResult = + __xray_log_init_mode("xray-basic", Env == nullptr ? "" : Env); + if (InitResult != XRayLogInitStatus::XRAY_LOG_INITIALIZED) { + if (Verbosity()) + Report("Failed initializing XRay Basic Mode; error = %d\n", InitResult); + return false; + } + + // At this point we know that we've successfully initialized Basic mode + // tracing, and the only chance we're going to get for the current thread to + // clean-up may be at thread/program exit. To ensure that we're going to get + // the cleanup even without calling the finalization routines, we're + // registering a program exit function that will do the cleanup. + static pthread_once_t DynamicOnce = PTHREAD_ONCE_INIT; + pthread_once(&DynamicOnce, +[] { + static void *FakeTLD = nullptr; + FakeTLD = &getThreadLocalData(); + Atexit(+[] { TLDDestructor(FakeTLD); }); + }); } return true; } diff --git a/lib/xray/xray_inmemory_log.h b/lib/xray/xray_basic_logging.h index e4fcb8ca5ffd..1639b96d91a1 100644 --- a/lib/xray/xray_inmemory_log.h +++ b/lib/xray/xray_basic_logging.h @@ -1,5 +1,4 @@ -//===-- xray_inmemory_log.h -//------------------------------------------------===// +//===-- xray_basic_logging.h ----------------------------------------------===// // // The LLVM Compiler Infrastructure // diff --git a/lib/xray/xray_buffer_queue.cc b/lib/xray/xray_buffer_queue.cc index a0018f6b0cba..8dfcc23540b1 100644 --- a/lib/xray/xray_buffer_queue.cc +++ b/lib/xray/xray_buffer_queue.cc @@ -16,14 +16,37 @@ #include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" +#include <memory> using namespace __xray; using namespace __sanitizer; +template <class T> static T *initArray(size_t N) { + auto A = reinterpret_cast<T *>( + InternalAlloc(N * sizeof(T), nullptr, kCacheLineSize)); + if (A != nullptr) + while (N > 0) + new (A + (--N)) T(); + return A; +} + BufferQueue::BufferQueue(size_t B, size_t N, bool &Success) - : BufferSize(B), Buffers(new BufferRep[N]()), BufferCount(N), Finalizing{0}, - OwnedBuffers(new void *[N]()), Next(Buffers), First(Buffers), - LiveBuffers(0) { + : BufferSize(B), Buffers(initArray<BufferQueue::BufferRep>(N)), + BufferCount(N), Finalizing{0}, OwnedBuffers(initArray<void *>(N)), + Next(Buffers), First(Buffers), LiveBuffers(0) { + if (Buffers == nullptr) { + Success = false; + return; + } + if (OwnedBuffers == nullptr) { + // Clean up the buffers we've already allocated. + for (auto B = Buffers, E = Buffers + BufferCount; B != E; ++B) + B->~BufferRep(); + InternalFree(Buffers); + Success = false; + return; + }; + for (size_t i = 0; i < N; ++i) { auto &T = Buffers[i]; void *Tmp = InternalAlloc(BufferSize, nullptr, 64); @@ -37,7 +60,7 @@ BufferQueue::BufferQueue(size_t B, size_t N, bool &Success) return; } auto &Buf = T.Buff; - Buf.Buffer = Tmp; + Buf.Data = Tmp; Buf.Size = B; Buf.Extents = reinterpret_cast<BufferExtents *>(Extents); OwnedBuffers[i] = Tmp; @@ -46,9 +69,9 @@ BufferQueue::BufferQueue(size_t B, size_t N, bool &Success) } BufferQueue::ErrorCode BufferQueue::getBuffer(Buffer &Buf) { - if (__sanitizer::atomic_load(&Finalizing, __sanitizer::memory_order_acquire)) + if (atomic_load(&Finalizing, memory_order_acquire)) return ErrorCode::QueueFinalizing; - __sanitizer::SpinMutexLock Guard(&Mutex); + SpinMutexLock Guard(&Mutex); if (LiveBuffers == BufferCount) return ErrorCode::NotEnoughMemory; @@ -68,7 +91,7 @@ BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) { // Blitz through the buffers array to find the buffer. bool Found = false; for (auto I = OwnedBuffers, E = OwnedBuffers + BufferCount; I != E; ++I) { - if (*I == Buf.Buffer) { + if (*I == Buf.Data) { Found = true; break; } @@ -76,7 +99,7 @@ BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) { if (!Found) return ErrorCode::UnrecognizedBuffer; - __sanitizer::SpinMutexLock Guard(&Mutex); + SpinMutexLock Guard(&Mutex); // This points to a semantic bug, we really ought to not be releasing more // buffers than we actually get. @@ -86,7 +109,7 @@ BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) { // Now that the buffer has been released, we mark it as "used". First->Buff = Buf; First->Used = true; - Buf.Buffer = nullptr; + Buf.Data = nullptr; Buf.Size = 0; --LiveBuffers; if (++First == (Buffers + BufferCount)) @@ -96,8 +119,7 @@ BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) { } BufferQueue::ErrorCode BufferQueue::finalize() { - if (__sanitizer::atomic_exchange(&Finalizing, 1, - __sanitizer::memory_order_acq_rel)) + if (atomic_exchange(&Finalizing, 1, memory_order_acq_rel)) return ErrorCode::QueueFinalizing; return ErrorCode::Ok; } @@ -106,9 +128,11 @@ BufferQueue::~BufferQueue() { for (auto I = Buffers, E = Buffers + BufferCount; I != E; ++I) { auto &T = *I; auto &Buf = T.Buff; - InternalFree(Buf.Buffer); + InternalFree(Buf.Data); InternalFree(Buf.Extents); } - delete[] Buffers; - delete[] OwnedBuffers; + for (auto B = Buffers, E = Buffers + BufferCount; B != E; ++B) + B->~BufferRep(); + InternalFree(Buffers); + InternalFree(OwnedBuffers); } diff --git a/lib/xray/xray_buffer_queue.h b/lib/xray/xray_buffer_queue.h index 1ceb58274616..e76fa7983c90 100644 --- a/lib/xray/xray_buffer_queue.h +++ b/lib/xray/xray_buffer_queue.h @@ -15,9 +15,10 @@ #ifndef XRAY_BUFFER_QUEUE_H #define XRAY_BUFFER_QUEUE_H -#include <cstddef> #include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_mutex.h" +#include <cstddef> namespace __xray { @@ -27,18 +28,17 @@ namespace __xray { /// the "flight data recorder" (FDR) mode to support ongoing XRay function call /// trace collection. class BufferQueue { - public: +public: struct alignas(64) BufferExtents { - __sanitizer::atomic_uint64_t Size; + atomic_uint64_t Size; }; struct Buffer { - void *Buffer = nullptr; + void *Data = nullptr; size_t Size = 0; - BufferExtents* Extents; + BufferExtents *Extents; }; - private: struct BufferRep { // The managed buffer. Buffer Buff; @@ -48,14 +48,72 @@ class BufferQueue { bool Used = false; }; +private: + // This models a ForwardIterator. |T| Must be either a `Buffer` or `const + // Buffer`. Note that we only advance to the "used" buffers, when + // incrementing, so that at dereference we're always at a valid point. + template <class T> class Iterator { + public: + BufferRep *Buffers = nullptr; + size_t Offset = 0; + size_t Max = 0; + + Iterator &operator++() { + DCHECK_NE(Offset, Max); + do { + ++Offset; + } while (!Buffers[Offset].Used && Offset != Max); + return *this; + } + + Iterator operator++(int) { + Iterator C = *this; + ++(*this); + return C; + } + + T &operator*() const { return Buffers[Offset].Buff; } + + T *operator->() const { return &(Buffers[Offset].Buff); } + + Iterator(BufferRep *Root, size_t O, size_t M) + : Buffers(Root), Offset(O), Max(M) { + // We want to advance to the first Offset where the 'Used' property is + // true, or to the end of the list/queue. + while (!Buffers[Offset].Used && Offset != Max) { + ++Offset; + } + } + + Iterator() = default; + Iterator(const Iterator &) = default; + Iterator(Iterator &&) = default; + Iterator &operator=(const Iterator &) = default; + Iterator &operator=(Iterator &&) = default; + ~Iterator() = default; + + template <class V> + friend bool operator==(const Iterator &L, const Iterator<V> &R) { + DCHECK_EQ(L.Max, R.Max); + return L.Buffers == R.Buffers && L.Offset == R.Offset; + } + + template <class V> + friend bool operator!=(const Iterator &L, const Iterator<V> &R) { + return !(L == R); + } + }; + // Size of each individual Buffer. size_t BufferSize; BufferRep *Buffers; + + // Amount of pre-allocated buffers. size_t BufferCount; - __sanitizer::SpinMutex Mutex; - __sanitizer::atomic_uint8_t Finalizing; + SpinMutex Mutex; + atomic_uint8_t Finalizing; // Pointers to buffers managed/owned by the BufferQueue. void **OwnedBuffers; @@ -70,7 +128,7 @@ class BufferQueue { // Count of buffers that have been handed out through 'getBuffer'. size_t LiveBuffers; - public: +public: enum class ErrorCode : unsigned { Ok, NotEnoughMemory, @@ -81,16 +139,16 @@ class BufferQueue { static const char *getErrorString(ErrorCode E) { switch (E) { - case ErrorCode::Ok: - return "(none)"; - case ErrorCode::NotEnoughMemory: - return "no available buffers in the queue"; - case ErrorCode::QueueFinalizing: - return "queue already finalizing"; - case ErrorCode::UnrecognizedBuffer: - return "buffer being returned not owned by buffer queue"; - case ErrorCode::AlreadyFinalized: - return "queue already finalized"; + case ErrorCode::Ok: + return "(none)"; + case ErrorCode::NotEnoughMemory: + return "no available buffers in the queue"; + case ErrorCode::QueueFinalizing: + return "queue already finalizing"; + case ErrorCode::UnrecognizedBuffer: + return "buffer being returned not owned by buffer queue"; + case ErrorCode::AlreadyFinalized: + return "queue already finalized"; } return "unknown error"; } @@ -122,8 +180,7 @@ class BufferQueue { ErrorCode releaseBuffer(Buffer &Buf); bool finalizing() const { - return __sanitizer::atomic_load(&Finalizing, - __sanitizer::memory_order_acquire); + return atomic_load(&Finalizing, memory_order_acquire); } /// Returns the configured size of the buffers in the buffer queue. @@ -141,19 +198,29 @@ class BufferQueue { /// Applies the provided function F to each Buffer in the queue, only if the /// Buffer is marked 'used' (i.e. has been the result of getBuffer(...) and a /// releaseBuffer(...) operation). - template <class F> - void apply(F Fn) { - __sanitizer::SpinMutexLock G(&Mutex); - for (auto I = Buffers, E = Buffers + BufferCount; I != E; ++I) { - const auto &T = *I; - if (T.Used) Fn(T.Buff); - } + template <class F> void apply(F Fn) { + SpinMutexLock G(&Mutex); + for (auto I = begin(), E = end(); I != E; ++I) + Fn(*I); + } + + using const_iterator = Iterator<const Buffer>; + using iterator = Iterator<Buffer>; + + /// Provides iterator access to the raw Buffer instances. + iterator begin() const { return iterator(Buffers, 0, BufferCount); } + const_iterator cbegin() const { + return const_iterator(Buffers, 0, BufferCount); + } + iterator end() const { return iterator(Buffers, BufferCount, BufferCount); } + const_iterator cend() const { + return const_iterator(Buffers, BufferCount, BufferCount); } // Cleans up allocated buffers. ~BufferQueue(); }; -} // namespace __xray +} // namespace __xray -#endif // XRAY_BUFFER_QUEUE_H +#endif // XRAY_BUFFER_QUEUE_H diff --git a/lib/xray/xray_fdr_flags.cc b/lib/xray/xray_fdr_flags.cc new file mode 100644 index 000000000000..a14851b1b616 --- /dev/null +++ b/lib/xray/xray_fdr_flags.cc @@ -0,0 +1,48 @@ +//===-- xray_fdr_flags.cc ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a dynamic runtime instrumentation system. +// +// XRay FDR flag parsing logic. +//===----------------------------------------------------------------------===// + +#include "xray_fdr_flags.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "xray_defs.h" + +using namespace __sanitizer; + +namespace __xray { + +FDRFlags xray_fdr_flags_dont_use_directly; // use via fdrFlags(). + +void FDRFlags::setDefaults() XRAY_NEVER_INSTRUMENT { +#define XRAY_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "xray_fdr_flags.inc" +#undef XRAY_FLAG +} + +void registerXRayFDRFlags(FlagParser *P, FDRFlags *F) XRAY_NEVER_INSTRUMENT { +#define XRAY_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(P, #Name, Description, &F->Name); +#include "xray_fdr_flags.inc" +#undef XRAY_FLAG +} + +const char *useCompilerDefinedFDRFlags() XRAY_NEVER_INSTRUMENT { +#ifdef XRAY_FDR_OPTIONS + return SANITIZER_STRINGIFY(XRAY_FDR_OPTIONS); +#else + return ""; +#endif +} + +} // namespace __xray diff --git a/lib/xray/xray_fdr_flags.h b/lib/xray/xray_fdr_flags.h new file mode 100644 index 000000000000..9c953f1cabcf --- /dev/null +++ b/lib/xray/xray_fdr_flags.h @@ -0,0 +1,38 @@ +//===-- xray_fdr_flags.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a dynamic runtime instrumentation system. +// +// This file defines the flags for the flight-data-recorder mode implementation. +// +//===----------------------------------------------------------------------===// +#ifndef XRAY_FDR_FLAGS_H +#define XRAY_FDR_FLAGS_H + +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_internal_defs.h" + +namespace __xray { + +struct FDRFlags { +#define XRAY_FLAG(Type, Name, DefaultValue, Description) Type Name; +#include "xray_fdr_flags.inc" +#undef XRAY_FLAG + + void setDefaults(); +}; + +extern FDRFlags xray_fdr_flags_dont_use_directly; +extern void registerXRayFDRFlags(FlagParser *P, FDRFlags *F); +const char *useCompilerDefinedFDRFlags(); +inline FDRFlags *fdrFlags() { return &xray_fdr_flags_dont_use_directly; } + +} // namespace __xray + +#endif // XRAY_FDR_FLAGS_H diff --git a/lib/xray/xray_fdr_flags.inc b/lib/xray/xray_fdr_flags.inc new file mode 100644 index 000000000000..d8721ad12cbe --- /dev/null +++ b/lib/xray/xray_fdr_flags.inc @@ -0,0 +1,29 @@ +//===-- xray_fdr_flags.inc --------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// XRay FDR Mode runtime flags. +// +//===----------------------------------------------------------------------===// +#ifndef XRAY_FLAG +#error "Define XRAY_FLAG prior to including this file!" +#endif + +// FDR (Flight Data Recorder) Mode logging options. +XRAY_FLAG(int, func_duration_threshold_us, 5, + "FDR logging will try to skip functions that execute for fewer " + "microseconds than this threshold.") +XRAY_FLAG(int, grace_period_ms, 100, + "FDR logging will wait this much time in milliseconds before " + "actually flushing the log; this gives a chance for threads to " + "notice that the log has been finalized and clean up.") +XRAY_FLAG(int, buffer_size, 16384, + "Size of buffers in the circular buffer queue.") +XRAY_FLAG(int, buffer_max, 100, "Maximum number of buffers in the queue.") +XRAY_FLAG(bool, no_file_flush, false, + "Set to true to not write log files by default.") diff --git a/lib/xray/xray_fdr_log_records.h b/lib/xray/xray_fdr_log_records.h index 324208db82ca..87096d4fc29e 100644 --- a/lib/xray/xray_fdr_log_records.h +++ b/lib/xray/xray_fdr_log_records.h @@ -32,6 +32,8 @@ struct alignas(16) MetadataRecord { CustomEventMarker, CallArgument, BufferExtents, + TypedEventMarker, + Pid, }; // Use 7 bits to identify this record type. diff --git a/lib/xray/xray_fdr_logging.cc b/lib/xray/xray_fdr_logging.cc index 1bfa10c21f5c..6cb2dfa0c658 100644 --- a/lib/xray/xray_fdr_logging.cc +++ b/lib/xray/xray_fdr_logging.cc @@ -15,64 +15,836 @@ // //===----------------------------------------------------------------------===// #include "xray_fdr_logging.h" +#include <cassert> #include <errno.h> +#include <limits> +#include <memory> +#include <pthread.h> #include <sys/syscall.h> #include <sys/time.h> #include <time.h> #include <unistd.h> +#include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" #include "xray/xray_interface.h" #include "xray/xray_records.h" #include "xray_buffer_queue.h" #include "xray_defs.h" -#include "xray_fdr_logging_impl.h" +#include "xray_fdr_flags.h" #include "xray_flags.h" +#include "xray_recursion_guard.h" #include "xray_tsc.h" #include "xray_utils.h" namespace __xray { +atomic_sint32_t LoggingStatus = {XRayLogInitStatus::XRAY_LOG_UNINITIALIZED}; + +// Group together thread-local-data in a struct, then hide it behind a function +// call so that it can be initialized on first use instead of as a global. We +// force the alignment to 64-bytes for x86 cache line alignment, as this +// structure is used in the hot path of implementation. +struct alignas(64) ThreadLocalData { + BufferQueue::Buffer Buffer; + char *RecordPtr = nullptr; + // The number of FunctionEntry records immediately preceding RecordPtr. + uint8_t NumConsecutiveFnEnters = 0; + + // The number of adjacent, consecutive pairs of FunctionEntry, Tail Exit + // records preceding RecordPtr. + uint8_t NumTailCalls = 0; + + // We use a thread_local variable to keep track of which CPUs we've already + // run, and the TSC times for these CPUs. This allows us to stop repeating the + // CPU field in the function records. + // + // We assume that we'll support only 65536 CPUs for x86_64. + uint16_t CurrentCPU = std::numeric_limits<uint16_t>::max(); + uint64_t LastTSC = 0; + uint64_t LastFunctionEntryTSC = 0; + + // Make sure a thread that's ever called handleArg0 has a thread-local + // live reference to the buffer queue for this particular instance of + // FDRLogging, and that we're going to clean it up when the thread exits. + BufferQueue *BQ = nullptr; +}; + +static_assert(std::is_trivially_destructible<ThreadLocalData>::value, + "ThreadLocalData must be trivially destructible"); + +static constexpr auto MetadataRecSize = sizeof(MetadataRecord); +static constexpr auto FunctionRecSize = sizeof(FunctionRecord); + +// Use a global pthread key to identify thread-local data for logging. +static pthread_key_t Key; + // Global BufferQueue. -BufferQueue *BQ = nullptr; +static BufferQueue *BQ = nullptr; -__sanitizer::atomic_sint32_t LogFlushStatus = { +static atomic_sint32_t LogFlushStatus = { XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING}; -FDRLoggingOptions FDROptions; +static FDRLoggingOptions FDROptions; + +static SpinMutex FDROptionsMutex; + +// This function will initialize the thread-local data structure used by the FDR +// logging implementation and return a reference to it. The implementation +// details require a bit of care to maintain. +// +// First, some requirements on the implementation in general: +// +// - XRay handlers should not call any memory allocation routines that may +// delegate to an instrumented implementation. This means functions like +// malloc() and free() should not be called while instrumenting. +// +// - We would like to use some thread-local data initialized on first-use of +// the XRay instrumentation. These allow us to implement unsynchronized +// routines that access resources associated with the thread. +// +// The implementation here uses a few mechanisms that allow us to provide both +// the requirements listed above. We do this by: +// +// 1. Using a thread-local aligned storage buffer for representing the +// ThreadLocalData struct. This data will be uninitialized memory by +// design. +// +// 2. Not requiring a thread exit handler/implementation, keeping the +// thread-local as purely a collection of references/data that do not +// require cleanup. +// +// We're doing this to avoid using a `thread_local` object that has a +// non-trivial destructor, because the C++ runtime might call std::malloc(...) +// to register calls to destructors. Deadlocks may arise when, for example, an +// externally provided malloc implementation is XRay instrumented, and +// initializing the thread-locals involves calling into malloc. A malloc +// implementation that does global synchronization might be holding a lock for a +// critical section, calling a function that might be XRay instrumented (and +// thus in turn calling into malloc by virtue of registration of the +// thread_local's destructor). +static_assert(alignof(ThreadLocalData) >= 64, + "ThreadLocalData must be cache line aligned."); +static ThreadLocalData &getThreadLocalData() { + thread_local typename std::aligned_storage< + sizeof(ThreadLocalData), alignof(ThreadLocalData)>::type TLDStorage{}; + + if (pthread_getspecific(Key) == NULL) { + new (reinterpret_cast<ThreadLocalData *>(&TLDStorage)) ThreadLocalData{}; + pthread_setspecific(Key, &TLDStorage); + } + + return *reinterpret_cast<ThreadLocalData *>(&TLDStorage); +} + +static void writeNewBufferPreamble(tid_t Tid, timespec TS, + pid_t Pid) XRAY_NEVER_INSTRUMENT { + static constexpr int InitRecordsCount = 3; + auto &TLD = getThreadLocalData(); + MetadataRecord Metadata[InitRecordsCount]; + { + // Write out a MetadataRecord to signify that this is the start of a new + // buffer, associated with a particular thread, with a new CPU. For the + // data, we have 15 bytes to squeeze as much information as we can. At this + // point we only write down the following bytes: + // - Thread ID (tid_t, cast to 4 bytes type due to Darwin being 8 bytes) + auto &NewBuffer = Metadata[0]; + NewBuffer.Type = uint8_t(RecordType::Metadata); + NewBuffer.RecordKind = uint8_t(MetadataRecord::RecordKinds::NewBuffer); + int32_t tid = static_cast<int32_t>(Tid); + internal_memcpy(&NewBuffer.Data, &tid, sizeof(tid)); + } + + // Also write the WalltimeMarker record. + { + static_assert(sizeof(time_t) <= 8, "time_t needs to be at most 8 bytes"); + auto &WalltimeMarker = Metadata[1]; + WalltimeMarker.Type = uint8_t(RecordType::Metadata); + WalltimeMarker.RecordKind = + uint8_t(MetadataRecord::RecordKinds::WalltimeMarker); + + // We only really need microsecond precision here, and enforce across + // platforms that we need 64-bit seconds and 32-bit microseconds encoded in + // the Metadata record. + int32_t Micros = TS.tv_nsec / 1000; + int64_t Seconds = TS.tv_sec; + internal_memcpy(WalltimeMarker.Data, &Seconds, sizeof(Seconds)); + internal_memcpy(WalltimeMarker.Data + sizeof(Seconds), &Micros, + sizeof(Micros)); + } + + // Also write the Pid record. + { + // Write out a MetadataRecord that contains the current pid + auto &PidMetadata = Metadata[2]; + PidMetadata.Type = uint8_t(RecordType::Metadata); + PidMetadata.RecordKind = uint8_t(MetadataRecord::RecordKinds::Pid); + int32_t pid = static_cast<int32_t>(Pid); + internal_memcpy(&PidMetadata.Data, &pid, sizeof(pid)); + } + + TLD.NumConsecutiveFnEnters = 0; + TLD.NumTailCalls = 0; + if (TLD.BQ == nullptr || TLD.BQ->finalizing()) + return; + internal_memcpy(TLD.RecordPtr, Metadata, sizeof(Metadata)); + TLD.RecordPtr += sizeof(Metadata); + // Since we write out the extents as the first metadata record of the + // buffer, we need to write out the extents including the extents record. + atomic_store(&TLD.Buffer.Extents->Size, sizeof(Metadata), + memory_order_release); +} + +static void setupNewBuffer(int (*wall_clock_reader)( + clockid_t, struct timespec *)) XRAY_NEVER_INSTRUMENT { + auto &TLD = getThreadLocalData(); + auto &B = TLD.Buffer; + TLD.RecordPtr = static_cast<char *>(B.Data); + tid_t Tid = GetTid(); + timespec TS{0, 0}; + pid_t Pid = internal_getpid(); + // This is typically clock_gettime, but callers have injection ability. + wall_clock_reader(CLOCK_MONOTONIC, &TS); + writeNewBufferPreamble(Tid, TS, Pid); + TLD.NumConsecutiveFnEnters = 0; + TLD.NumTailCalls = 0; +} + +static void incrementExtents(size_t Add) { + auto &TLD = getThreadLocalData(); + atomic_fetch_add(&TLD.Buffer.Extents->Size, Add, memory_order_acq_rel); +} + +static void decrementExtents(size_t Subtract) { + auto &TLD = getThreadLocalData(); + atomic_fetch_sub(&TLD.Buffer.Extents->Size, Subtract, memory_order_acq_rel); +} + +static void writeNewCPUIdMetadata(uint16_t CPU, + uint64_t TSC) XRAY_NEVER_INSTRUMENT { + auto &TLD = getThreadLocalData(); + MetadataRecord NewCPUId; + NewCPUId.Type = uint8_t(RecordType::Metadata); + NewCPUId.RecordKind = uint8_t(MetadataRecord::RecordKinds::NewCPUId); + + // The data for the New CPU will contain the following bytes: + // - CPU ID (uint16_t, 2 bytes) + // - Full TSC (uint64_t, 8 bytes) + // Total = 10 bytes. + internal_memcpy(&NewCPUId.Data, &CPU, sizeof(CPU)); + internal_memcpy(&NewCPUId.Data[sizeof(CPU)], &TSC, sizeof(TSC)); + internal_memcpy(TLD.RecordPtr, &NewCPUId, sizeof(MetadataRecord)); + TLD.RecordPtr += sizeof(MetadataRecord); + TLD.NumConsecutiveFnEnters = 0; + TLD.NumTailCalls = 0; + incrementExtents(sizeof(MetadataRecord)); +} + +static void writeTSCWrapMetadata(uint64_t TSC) XRAY_NEVER_INSTRUMENT { + auto &TLD = getThreadLocalData(); + MetadataRecord TSCWrap; + TSCWrap.Type = uint8_t(RecordType::Metadata); + TSCWrap.RecordKind = uint8_t(MetadataRecord::RecordKinds::TSCWrap); + + // The data for the TSCWrap record contains the following bytes: + // - Full TSC (uint64_t, 8 bytes) + // Total = 8 bytes. + internal_memcpy(&TSCWrap.Data, &TSC, sizeof(TSC)); + internal_memcpy(TLD.RecordPtr, &TSCWrap, sizeof(MetadataRecord)); + TLD.RecordPtr += sizeof(MetadataRecord); + TLD.NumConsecutiveFnEnters = 0; + TLD.NumTailCalls = 0; + incrementExtents(sizeof(MetadataRecord)); +} + +// Call Argument metadata records store the arguments to a function in the +// order of their appearance; holes are not supported by the buffer format. +static void writeCallArgumentMetadata(uint64_t A) XRAY_NEVER_INSTRUMENT { + auto &TLD = getThreadLocalData(); + MetadataRecord CallArg; + CallArg.Type = uint8_t(RecordType::Metadata); + CallArg.RecordKind = uint8_t(MetadataRecord::RecordKinds::CallArgument); + + internal_memcpy(CallArg.Data, &A, sizeof(A)); + internal_memcpy(TLD.RecordPtr, &CallArg, sizeof(MetadataRecord)); + TLD.RecordPtr += sizeof(MetadataRecord); + incrementExtents(sizeof(MetadataRecord)); +} + +static void writeFunctionRecord(int FuncId, uint32_t TSCDelta, + XRayEntryType EntryType) XRAY_NEVER_INSTRUMENT { + FunctionRecord FuncRecord; + FuncRecord.Type = uint8_t(RecordType::Function); + // Only take 28 bits of the function id. + FuncRecord.FuncId = FuncId & ~(0x0F << 28); + FuncRecord.TSCDelta = TSCDelta; + + auto &TLD = getThreadLocalData(); + switch (EntryType) { + case XRayEntryType::ENTRY: + ++TLD.NumConsecutiveFnEnters; + FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionEnter); + break; + case XRayEntryType::LOG_ARGS_ENTRY: + // We should not rewind functions with logged args. + TLD.NumConsecutiveFnEnters = 0; + TLD.NumTailCalls = 0; + FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionEnter); + break; + case XRayEntryType::EXIT: + // If we've decided to log the function exit, we will never erase the log + // before it. + TLD.NumConsecutiveFnEnters = 0; + TLD.NumTailCalls = 0; + FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionExit); + break; + case XRayEntryType::TAIL: + // If we just entered the function we're tail exiting from or erased every + // invocation since then, this function entry tail pair is a candidate to + // be erased when the child function exits. + if (TLD.NumConsecutiveFnEnters > 0) { + ++TLD.NumTailCalls; + TLD.NumConsecutiveFnEnters = 0; + } else { + // We will never be able to erase this tail call since we have logged + // something in between the function entry and tail exit. + TLD.NumTailCalls = 0; + TLD.NumConsecutiveFnEnters = 0; + } + FuncRecord.RecordKind = + uint8_t(FunctionRecord::RecordKinds::FunctionTailExit); + break; + case XRayEntryType::CUSTOM_EVENT: { + // This is a bug in patching, so we'll report it once and move on. + static atomic_uint8_t ErrorLatch{0}; + if (!atomic_exchange(&ErrorLatch, 1, memory_order_acq_rel)) + Report("Internal error: patched an XRay custom event call as a function; " + "func id = %d\n", + FuncId); + return; + } + case XRayEntryType::TYPED_EVENT: { + static atomic_uint8_t ErrorLatch{0}; + if (!atomic_exchange(&ErrorLatch, 1, memory_order_acq_rel)) + Report("Internal error: patched an XRay typed event call as a function; " + "func id = %d\n", + FuncId); + return; + } + } + + internal_memcpy(TLD.RecordPtr, &FuncRecord, sizeof(FunctionRecord)); + TLD.RecordPtr += sizeof(FunctionRecord); + incrementExtents(sizeof(FunctionRecord)); +} + +static atomic_uint64_t TicksPerSec{0}; +static atomic_uint64_t ThresholdTicks{0}; + +// Re-point the thread local pointer into this thread's Buffer before the recent +// "Function Entry" record and any "Tail Call Exit" records after that. +static void rewindRecentCall(uint64_t TSC, uint64_t &LastTSC, + uint64_t &LastFunctionEntryTSC, int32_t FuncId) { + auto &TLD = getThreadLocalData(); + TLD.RecordPtr -= FunctionRecSize; + decrementExtents(FunctionRecSize); + FunctionRecord FuncRecord; + internal_memcpy(&FuncRecord, TLD.RecordPtr, FunctionRecSize); + DCHECK(FuncRecord.RecordKind == + uint8_t(FunctionRecord::RecordKinds::FunctionEnter) && + "Expected to find function entry recording when rewinding."); + DCHECK(FuncRecord.FuncId == (FuncId & ~(0x0F << 28)) && + "Expected matching function id when rewinding Exit"); + --TLD.NumConsecutiveFnEnters; + LastTSC -= FuncRecord.TSCDelta; + + // We unwound one call. Update the state and return without writing a log. + if (TLD.NumConsecutiveFnEnters != 0) { + LastFunctionEntryTSC -= FuncRecord.TSCDelta; + return; + } + + // Otherwise we've rewound the stack of all function entries, we might be + // able to rewind further by erasing tail call functions that are being + // exited from via this exit. + LastFunctionEntryTSC = 0; + auto RewindingTSC = LastTSC; + auto RewindingRecordPtr = TLD.RecordPtr - FunctionRecSize; + while (TLD.NumTailCalls > 0) { + // Rewind the TSC back over the TAIL EXIT record. + FunctionRecord ExpectedTailExit; + internal_memcpy(&ExpectedTailExit, RewindingRecordPtr, FunctionRecSize); + + DCHECK(ExpectedTailExit.RecordKind == + uint8_t(FunctionRecord::RecordKinds::FunctionTailExit) && + "Expected to find tail exit when rewinding."); + RewindingRecordPtr -= FunctionRecSize; + RewindingTSC -= ExpectedTailExit.TSCDelta; + FunctionRecord ExpectedFunctionEntry; + internal_memcpy(&ExpectedFunctionEntry, RewindingRecordPtr, + FunctionRecSize); + DCHECK(ExpectedFunctionEntry.RecordKind == + uint8_t(FunctionRecord::RecordKinds::FunctionEnter) && + "Expected to find function entry when rewinding tail call."); + DCHECK(ExpectedFunctionEntry.FuncId == ExpectedTailExit.FuncId && + "Expected funcids to match when rewinding tail call."); + + // This tail call exceeded the threshold duration. It will not be erased. + if ((TSC - RewindingTSC) >= atomic_load_relaxed(&ThresholdTicks)) { + TLD.NumTailCalls = 0; + return; + } + + // We can erase a tail exit pair that we're exiting through since + // its duration is under threshold. + --TLD.NumTailCalls; + RewindingRecordPtr -= FunctionRecSize; + RewindingTSC -= ExpectedFunctionEntry.TSCDelta; + TLD.RecordPtr -= 2 * FunctionRecSize; + LastTSC = RewindingTSC; + decrementExtents(2 * FunctionRecSize); + } +} + +static bool releaseThreadLocalBuffer(BufferQueue &BQArg) { + auto &TLD = getThreadLocalData(); + auto EC = BQArg.releaseBuffer(TLD.Buffer); + if (EC != BufferQueue::ErrorCode::Ok) { + Report("Failed to release buffer at %p; error=%s\n", TLD.Buffer.Data, + BufferQueue::getErrorString(EC)); + return false; + } + return true; +} + +static bool prepareBuffer(uint64_t TSC, unsigned char CPU, + int (*wall_clock_reader)(clockid_t, + struct timespec *), + size_t MaxSize) XRAY_NEVER_INSTRUMENT { + auto &TLD = getThreadLocalData(); + char *BufferStart = static_cast<char *>(TLD.Buffer.Data); + if ((TLD.RecordPtr + MaxSize) > (BufferStart + TLD.Buffer.Size)) { + if (!releaseThreadLocalBuffer(*TLD.BQ)) + return false; + auto EC = TLD.BQ->getBuffer(TLD.Buffer); + if (EC != BufferQueue::ErrorCode::Ok) { + Report("Failed to prepare a buffer; error = '%s'\n", + BufferQueue::getErrorString(EC)); + return false; + } + setupNewBuffer(wall_clock_reader); -__sanitizer::SpinMutex FDROptionsMutex; + // Always write the CPU metadata as the first record in the buffer. + writeNewCPUIdMetadata(CPU, TSC); + } + return true; +} + +static bool +isLogInitializedAndReady(BufferQueue *LBQ, uint64_t TSC, unsigned char CPU, + int (*wall_clock_reader)(clockid_t, struct timespec *)) + XRAY_NEVER_INSTRUMENT { + // Bail out right away if logging is not initialized yet. + // We should take the opportunity to release the buffer though. + auto Status = atomic_load(&LoggingStatus, memory_order_acquire); + auto &TLD = getThreadLocalData(); + if (Status != XRayLogInitStatus::XRAY_LOG_INITIALIZED) { + if (TLD.RecordPtr != nullptr && + (Status == XRayLogInitStatus::XRAY_LOG_FINALIZING || + Status == XRayLogInitStatus::XRAY_LOG_FINALIZED)) { + if (!releaseThreadLocalBuffer(*LBQ)) + return false; + TLD.RecordPtr = nullptr; + return false; + } + return false; + } + + if (atomic_load(&LoggingStatus, memory_order_acquire) != + XRayLogInitStatus::XRAY_LOG_INITIALIZED || + LBQ->finalizing()) { + if (!releaseThreadLocalBuffer(*LBQ)) + return false; + TLD.RecordPtr = nullptr; + } + + if (TLD.Buffer.Data == nullptr) { + auto EC = LBQ->getBuffer(TLD.Buffer); + if (EC != BufferQueue::ErrorCode::Ok) { + auto LS = atomic_load(&LoggingStatus, memory_order_acquire); + if (LS != XRayLogInitStatus::XRAY_LOG_FINALIZING && + LS != XRayLogInitStatus::XRAY_LOG_FINALIZED) + Report("Failed to acquire a buffer; error = '%s'\n", + BufferQueue::getErrorString(EC)); + return false; + } + + setupNewBuffer(wall_clock_reader); + + // Always write the CPU metadata as the first record in the buffer. + writeNewCPUIdMetadata(CPU, TSC); + } + + if (TLD.CurrentCPU == std::numeric_limits<uint16_t>::max()) { + // This means this is the first CPU this thread has ever run on. We set + // the current CPU and record this as the first TSC we've seen. + TLD.CurrentCPU = CPU; + writeNewCPUIdMetadata(CPU, TSC); + } + + return true; +} + +// Compute the TSC difference between the time of measurement and the previous +// event. There are a few interesting situations we need to account for: +// +// - The thread has migrated to a different CPU. If this is the case, then +// we write down the following records: +// +// 1. A 'NewCPUId' Metadata record. +// 2. A FunctionRecord with a 0 for the TSCDelta field. +// +// - The TSC delta is greater than the 32 bits we can store in a +// FunctionRecord. In this case we write down the following records: +// +// 1. A 'TSCWrap' Metadata record. +// 2. A FunctionRecord with a 0 for the TSCDelta field. +// +// - The TSC delta is representable within the 32 bits we can store in a +// FunctionRecord. In this case we write down just a FunctionRecord with +// the correct TSC delta. +static uint32_t writeCurrentCPUTSC(ThreadLocalData &TLD, uint64_t TSC, + uint8_t CPU) { + if (CPU != TLD.CurrentCPU) { + // We've moved to a new CPU. + writeNewCPUIdMetadata(CPU, TSC); + return 0; + } + // If the delta is greater than the range for a uint32_t, then we write out + // the TSC wrap metadata entry with the full TSC, and the TSC for the + // function record be 0. + uint64_t Delta = TSC - TLD.LastTSC; + if (Delta <= std::numeric_limits<uint32_t>::max()) + return Delta; + + writeTSCWrapMetadata(TSC); + return 0; +} + +static void endBufferIfFull() XRAY_NEVER_INSTRUMENT { + auto &TLD = getThreadLocalData(); + auto BufferStart = static_cast<char *>(TLD.Buffer.Data); + if ((TLD.RecordPtr + MetadataRecSize) - BufferStart <= + ptrdiff_t{MetadataRecSize}) { + if (!releaseThreadLocalBuffer(*TLD.BQ)) + return; + TLD.RecordPtr = nullptr; + } +} + +thread_local atomic_uint8_t Running{0}; + +/// Here's where the meat of the processing happens. The writer captures +/// function entry, exit and tail exit points with a time and will create +/// TSCWrap, NewCPUId and Function records as necessary. The writer might +/// walk backward through its buffer and erase trivial functions to avoid +/// polluting the log and may use the buffer queue to obtain or release a +/// buffer. +static void processFunctionHook(int32_t FuncId, XRayEntryType Entry, + uint64_t TSC, unsigned char CPU, uint64_t Arg1, + int (*wall_clock_reader)(clockid_t, + struct timespec *)) + XRAY_NEVER_INSTRUMENT { + __asm volatile("# LLVM-MCA-BEGIN processFunctionHook"); + // Prevent signal handler recursion, so in case we're already in a log writing + // mode and the signal handler comes in (and is also instrumented) then we + // don't want to be clobbering potentially partial writes already happening in + // the thread. We use a simple thread_local latch to only allow one on-going + // handleArg0 to happen at any given time. + RecursionGuard Guard{Running}; + if (!Guard) { + DCHECK(atomic_load_relaxed(&Running) && "RecursionGuard is buggy!"); + return; + } + + auto &TLD = getThreadLocalData(); + + if (TLD.BQ == nullptr) + TLD.BQ = BQ; + + if (!isLogInitializedAndReady(TLD.BQ, TSC, CPU, wall_clock_reader)) + return; + + // Before we go setting up writing new function entries, we need to be really + // careful about the pointer math we're doing. This means we need to ensure + // that the record we are about to write is going to fit into the buffer, + // without overflowing the buffer. + // + // To do this properly, we use the following assumptions: + // + // - The least number of bytes we will ever write is 8 + // (sizeof(FunctionRecord)) only if the delta between the previous entry + // and this entry is within 32 bits. + // - The most number of bytes we will ever write is 8 + 16 + 16 = 40. + // This is computed by: + // + // MaxSize = sizeof(FunctionRecord) + 2 * sizeof(MetadataRecord) + // + // These arise in the following cases: + // + // 1. When the delta between the TSC we get and the previous TSC for the + // same CPU is outside of the uint32_t range, we end up having to + // write a MetadataRecord to indicate a "tsc wrap" before the actual + // FunctionRecord. + // 2. When we learn that we've moved CPUs, we need to write a + // MetadataRecord to indicate a "cpu change", and thus write out the + // current TSC for that CPU before writing out the actual + // FunctionRecord. + // 3. When we learn about a new CPU ID, we need to write down a "new cpu + // id" MetadataRecord before writing out the actual FunctionRecord. + // 4. The second MetadataRecord is the optional function call argument. + // + // So the math we need to do is to determine whether writing 40 bytes past the + // current pointer exceeds the buffer's maximum size. If we don't have enough + // space to write 40 bytes in the buffer, we need get a new Buffer, set it up + // properly before doing any further writing. + size_t MaxSize = FunctionRecSize + 2 * MetadataRecSize; + if (!prepareBuffer(TSC, CPU, wall_clock_reader, MaxSize)) { + TLD.BQ = nullptr; + return; + } + + // By this point, we are now ready to write up to 40 bytes (explained above). + DCHECK((TLD.RecordPtr + MaxSize) - static_cast<char *>(TLD.Buffer.Data) >= + static_cast<ptrdiff_t>(MetadataRecSize) && + "Misconfigured BufferQueue provided; Buffer size not large enough."); + + auto RecordTSCDelta = writeCurrentCPUTSC(TLD, TSC, CPU); + TLD.LastTSC = TSC; + TLD.CurrentCPU = CPU; + switch (Entry) { + case XRayEntryType::ENTRY: + case XRayEntryType::LOG_ARGS_ENTRY: + // Update the thread local state for the next invocation. + TLD.LastFunctionEntryTSC = TSC; + break; + case XRayEntryType::TAIL: + case XRayEntryType::EXIT: + // Break out and write the exit record if we can't erase any functions. + if (TLD.NumConsecutiveFnEnters == 0 || + (TSC - TLD.LastFunctionEntryTSC) >= + atomic_load_relaxed(&ThresholdTicks)) + break; + rewindRecentCall(TSC, TLD.LastTSC, TLD.LastFunctionEntryTSC, FuncId); + return; // without writing log. + case XRayEntryType::CUSTOM_EVENT: { + // This is a bug in patching, so we'll report it once and move on. + static atomic_uint8_t ErrorLatch{0}; + if (!atomic_exchange(&ErrorLatch, 1, memory_order_acq_rel)) + Report("Internal error: patched an XRay custom event call as a function; " + "func id = %d\n", + FuncId); + return; + } + case XRayEntryType::TYPED_EVENT: { + static atomic_uint8_t ErrorLatch{0}; + if (!atomic_exchange(&ErrorLatch, 1, memory_order_acq_rel)) + Report("Internal error: patched an XRay typed event call as a function; " + "func id = %d\n", + FuncId); + return; + } + } + + writeFunctionRecord(FuncId, RecordTSCDelta, Entry); + if (Entry == XRayEntryType::LOG_ARGS_ENTRY) + writeCallArgumentMetadata(Arg1); + + // If we've exhausted the buffer by this time, we then release the buffer to + // make sure that other threads may start using this buffer. + endBufferIfFull(); + __asm volatile("# LLVM-MCA-END"); +} + +static XRayFileHeader &fdrCommonHeaderInfo() { + static std::aligned_storage<sizeof(XRayFileHeader)>::type HStorage; + static pthread_once_t OnceInit = PTHREAD_ONCE_INIT; + static bool TSCSupported = true; + static uint64_t CycleFrequency = NanosecondsPerSecond; + pthread_once(&OnceInit, +[] { + XRayFileHeader &H = reinterpret_cast<XRayFileHeader &>(HStorage); + // Version 2 of the log writes the extents of the buffer, instead of + // relying on an end-of-buffer record. + // Version 3 includes PID metadata record + H.Version = 3; + H.Type = FileTypes::FDR_LOG; + + // Test for required CPU features and cache the cycle frequency + TSCSupported = probeRequiredCPUFeatures(); + if (TSCSupported) + CycleFrequency = getTSCFrequency(); + H.CycleFrequency = CycleFrequency; + + // FIXME: Actually check whether we have 'constant_tsc' and + // 'nonstop_tsc' before setting the values in the header. + H.ConstantTSC = 1; + H.NonstopTSC = 1; + }); + return reinterpret_cast<XRayFileHeader &>(HStorage); +} + +// This is the iterator implementation, which knows how to handle FDR-mode +// specific buffers. This is used as an implementation of the iterator function +// needed by __xray_set_buffer_iterator(...). It maintains a global state of the +// buffer iteration for the currently installed FDR mode buffers. In particular: +// +// - If the argument represents the initial state of XRayBuffer ({nullptr, 0}) +// then the iterator returns the header information. +// - If the argument represents the header information ({address of header +// info, size of the header info}) then it returns the first FDR buffer's +// address and extents. +// - It will keep returning the next buffer and extents as there are more +// buffers to process. When the input represents the last buffer, it will +// return the initial state to signal completion ({nullptr, 0}). +// +// See xray/xray_log_interface.h for more details on the requirements for the +// implementations of __xray_set_buffer_iterator(...) and +// __xray_log_process_buffers(...). +XRayBuffer fdrIterator(const XRayBuffer B) { + DCHECK(internal_strcmp(__xray_log_get_current_mode(), "xray-fdr") == 0); + DCHECK(BQ->finalizing()); + + if (BQ == nullptr || !BQ->finalizing()) { + if (Verbosity()) + Report( + "XRay FDR: Failed global buffer queue is null or not finalizing!\n"); + return {nullptr, 0}; + } + + // We use a global scratch-pad for the header information, which only gets + // initialized the first time this function is called. We'll update one part + // of this information with some relevant data (in particular the number of + // buffers to expect). + static std::aligned_storage<sizeof(XRayFileHeader)>::type HeaderStorage; + static pthread_once_t HeaderOnce = PTHREAD_ONCE_INIT; + pthread_once(&HeaderOnce, +[] { + reinterpret_cast<XRayFileHeader &>(HeaderStorage) = fdrCommonHeaderInfo(); + }); + + // We use a convenience alias for code referring to Header from here on out. + auto &Header = reinterpret_cast<XRayFileHeader &>(HeaderStorage); + if (B.Data == nullptr && B.Size == 0) { + Header.FdrData = FdrAdditionalHeaderData{BQ->ConfiguredBufferSize()}; + return XRayBuffer{static_cast<void *>(&Header), sizeof(Header)}; + } + + static BufferQueue::const_iterator It{}; + static BufferQueue::const_iterator End{}; + static void *CurrentBuffer{nullptr}; + if (B.Data == static_cast<void *>(&Header) && B.Size == sizeof(Header)) { + // From this point on, we provide raw access to the raw buffer we're getting + // from the BufferQueue. We're relying on the iterators from the current + // Buffer queue. + It = BQ->cbegin(); + End = BQ->cend(); + } + + if (CurrentBuffer != nullptr) { + InternalFree(CurrentBuffer); + CurrentBuffer = nullptr; + } + + if (It == End) + return {nullptr, 0}; + + // Set up the current buffer to contain the extents like we would when writing + // out to disk. The difference here would be that we still write "empty" + // buffers, or at least go through the iterators faithfully to let the + // handlers see the empty buffers in the queue. + auto BufferSize = atomic_load(&It->Extents->Size, memory_order_acquire); + auto SerializedBufferSize = BufferSize + sizeof(MetadataRecord); + CurrentBuffer = InternalAlloc(SerializedBufferSize); + if (CurrentBuffer == nullptr) + return {nullptr, 0}; + + // Write out the extents as a Metadata Record into the CurrentBuffer. + MetadataRecord ExtentsRecord; + ExtentsRecord.Type = uint8_t(RecordType::Metadata); + ExtentsRecord.RecordKind = + uint8_t(MetadataRecord::RecordKinds::BufferExtents); + internal_memcpy(ExtentsRecord.Data, &BufferSize, sizeof(BufferSize)); + auto AfterExtents = + static_cast<char *>(internal_memcpy(CurrentBuffer, &ExtentsRecord, + sizeof(MetadataRecord))) + + sizeof(MetadataRecord); + internal_memcpy(AfterExtents, It->Data, BufferSize); + + XRayBuffer Result; + Result.Data = CurrentBuffer; + Result.Size = SerializedBufferSize; + ++It; + return Result; +} // Must finalize before flushing. XRayLogFlushStatus fdrLoggingFlush() XRAY_NEVER_INSTRUMENT { - if (__sanitizer::atomic_load(&LoggingStatus, - __sanitizer::memory_order_acquire) != + if (atomic_load(&LoggingStatus, memory_order_acquire) != XRayLogInitStatus::XRAY_LOG_FINALIZED) { - if (__sanitizer::Verbosity()) + if (Verbosity()) Report("Not flushing log, implementation is not finalized.\n"); return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING; } s32 Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING; - if (!__sanitizer::atomic_compare_exchange_strong( - &LogFlushStatus, &Result, XRayLogFlushStatus::XRAY_LOG_FLUSHING, - __sanitizer::memory_order_release)) { - - if (__sanitizer::Verbosity()) + if (!atomic_compare_exchange_strong(&LogFlushStatus, &Result, + XRayLogFlushStatus::XRAY_LOG_FLUSHING, + memory_order_release)) { + if (Verbosity()) Report("Not flushing log, implementation is still finalizing.\n"); return static_cast<XRayLogFlushStatus>(Result); } if (BQ == nullptr) { - if (__sanitizer::Verbosity()) + if (Verbosity()) Report("Cannot flush when global buffer queue is null.\n"); return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING; } // We wait a number of milliseconds to allow threads to see that we've // finalised before attempting to flush the log. - __sanitizer::SleepForMillis(flags()->xray_fdr_log_grace_period_ms); + SleepForMillis(fdrFlags()->grace_period_ms); + + // At this point, we're going to uninstall the iterator implementation, before + // we decide to do anything further with the global buffer queue. + __xray_log_remove_buffer_iterator(); + + // Once flushed, we should set the global status of the logging implementation + // to "uninitialized" to allow for FDR-logging multiple runs. + auto ResetToUnitialized = at_scope_exit([] { + atomic_store(&LoggingStatus, XRayLogInitStatus::XRAY_LOG_UNINITIALIZED, + memory_order_release); + }); + + auto CleanupBuffers = at_scope_exit([] { + if (BQ != nullptr) { + auto &TLD = getThreadLocalData(); + if (TLD.RecordPtr != nullptr && TLD.BQ != nullptr) + releaseThreadLocalBuffer(*TLD.BQ); + BQ->~BufferQueue(); + InternalFree(BQ); + BQ = nullptr; + } + }); + + if (fdrFlags()->no_file_flush) { + if (Verbosity()) + Report("XRay FDR: Not flushing to file, 'no_file_flush=true'.\n"); + + atomic_store(&LogFlushStatus, XRayLogFlushStatus::XRAY_LOG_FLUSHED, + memory_order_release); + return XRayLogFlushStatus::XRAY_LOG_FLUSHED; + } // We write out the file in the following format: // @@ -85,35 +857,20 @@ XRayLogFlushStatus fdrLoggingFlush() XRAY_NEVER_INSTRUMENT { // int Fd = -1; { - __sanitizer::SpinMutexLock Guard(&FDROptionsMutex); + // FIXME: Remove this section of the code, when we remove the struct-based + // configuration API. + SpinMutexLock Guard(&FDROptionsMutex); Fd = FDROptions.Fd; } if (Fd == -1) Fd = getLogFD(); if (Fd == -1) { auto Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING; - __sanitizer::atomic_store(&LogFlushStatus, Result, - __sanitizer::memory_order_release); + atomic_store(&LogFlushStatus, Result, memory_order_release); return Result; } - // Test for required CPU features and cache the cycle frequency - static bool TSCSupported = probeRequiredCPUFeatures(); - static uint64_t CycleFrequency = - TSCSupported ? getTSCFrequency() : __xray::NanosecondsPerSecond; - - XRayFileHeader Header; - - // Version 2 of the log writes the extents of the buffer, instead of relying - // on an end-of-buffer record. - Header.Version = 2; - Header.Type = FileTypes::FDR_LOG; - Header.CycleFrequency = CycleFrequency; - - // FIXME: Actually check whether we have 'constant_tsc' and 'nonstop_tsc' - // before setting the values in the header. - Header.ConstantTSC = 1; - Header.NonstopTSC = 1; + XRayFileHeader Header = fdrCommonHeaderInfo(); Header.FdrData = FdrAdditionalHeaderData{BQ->ConfiguredBufferSize()}; retryingWriteAll(Fd, reinterpret_cast<char *>(&Header), reinterpret_cast<char *>(&Header) + sizeof(Header)); @@ -121,39 +878,36 @@ XRayLogFlushStatus fdrLoggingFlush() XRAY_NEVER_INSTRUMENT { BQ->apply([&](const BufferQueue::Buffer &B) { // Starting at version 2 of the FDR logging implementation, we only write // the records identified by the extents of the buffer. We use the Extents - // from the Buffer and write that out as the first record in the buffer. - // We still use a Metadata record, but fill in the extents instead for the + // from the Buffer and write that out as the first record in the buffer. We + // still use a Metadata record, but fill in the extents instead for the // data. MetadataRecord ExtentsRecord; - auto BufferExtents = __sanitizer::atomic_load( - &B.Extents->Size, __sanitizer::memory_order_acquire); - assert(BufferExtents <= B.Size); + auto BufferExtents = atomic_load(&B.Extents->Size, memory_order_acquire); + DCHECK(BufferExtents <= B.Size); ExtentsRecord.Type = uint8_t(RecordType::Metadata); ExtentsRecord.RecordKind = uint8_t(MetadataRecord::RecordKinds::BufferExtents); - std::memcpy(ExtentsRecord.Data, &BufferExtents, sizeof(BufferExtents)); + internal_memcpy(ExtentsRecord.Data, &BufferExtents, sizeof(BufferExtents)); if (BufferExtents > 0) { retryingWriteAll(Fd, reinterpret_cast<char *>(&ExtentsRecord), reinterpret_cast<char *>(&ExtentsRecord) + sizeof(MetadataRecord)); - retryingWriteAll(Fd, reinterpret_cast<char *>(B.Buffer), - reinterpret_cast<char *>(B.Buffer) + BufferExtents); + retryingWriteAll(Fd, reinterpret_cast<char *>(B.Data), + reinterpret_cast<char *>(B.Data) + BufferExtents); } }); - __sanitizer::atomic_store(&LogFlushStatus, - XRayLogFlushStatus::XRAY_LOG_FLUSHED, - __sanitizer::memory_order_release); + atomic_store(&LogFlushStatus, XRayLogFlushStatus::XRAY_LOG_FLUSHED, + memory_order_release); return XRayLogFlushStatus::XRAY_LOG_FLUSHED; } XRayLogInitStatus fdrLoggingFinalize() XRAY_NEVER_INSTRUMENT { s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_INITIALIZED; - if (!__sanitizer::atomic_compare_exchange_strong( - &LoggingStatus, &CurrentStatus, - XRayLogInitStatus::XRAY_LOG_FINALIZING, - __sanitizer::memory_order_release)) { - if (__sanitizer::Verbosity()) + if (!atomic_compare_exchange_strong(&LoggingStatus, &CurrentStatus, + XRayLogInitStatus::XRAY_LOG_FINALIZING, + memory_order_release)) { + if (Verbosity()) Report("Cannot finalize log, implementation not initialized.\n"); return static_cast<XRayLogInitStatus>(CurrentStatus); } @@ -162,39 +916,11 @@ XRayLogInitStatus fdrLoggingFinalize() XRAY_NEVER_INSTRUMENT { // operations to be performed until re-initialized. BQ->finalize(); - __sanitizer::atomic_store(&LoggingStatus, - XRayLogInitStatus::XRAY_LOG_FINALIZED, - __sanitizer::memory_order_release); + atomic_store(&LoggingStatus, XRayLogInitStatus::XRAY_LOG_FINALIZED, + memory_order_release); return XRayLogInitStatus::XRAY_LOG_FINALIZED; } -XRayLogInitStatus fdrLoggingReset() XRAY_NEVER_INSTRUMENT { - s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_FINALIZED; - if (__sanitizer::atomic_compare_exchange_strong( - &LoggingStatus, &CurrentStatus, - XRayLogInitStatus::XRAY_LOG_INITIALIZED, - __sanitizer::memory_order_release)) - return static_cast<XRayLogInitStatus>(CurrentStatus); - - // Release the in-memory buffer queue. - delete BQ; - BQ = nullptr; - - // Spin until the flushing status is flushed. - s32 CurrentFlushingStatus = XRayLogFlushStatus::XRAY_LOG_FLUSHED; - while (__sanitizer::atomic_compare_exchange_weak( - &LogFlushStatus, &CurrentFlushingStatus, - XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING, - __sanitizer::memory_order_release)) { - if (CurrentFlushingStatus == XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING) - break; - CurrentFlushingStatus = XRayLogFlushStatus::XRAY_LOG_FLUSHED; - } - - // At this point, we know that the status is flushed, and that we can assume - return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; -} - struct TSCAndCPU { uint64_t TSC = 0; unsigned char CPU = 0; @@ -202,12 +928,14 @@ struct TSCAndCPU { static TSCAndCPU getTimestamp() XRAY_NEVER_INSTRUMENT { // We want to get the TSC as early as possible, so that we can check whether - // we've seen this CPU before. We also do it before we load anything else, to - // allow for forward progress with the scheduling. + // we've seen this CPU before. We also do it before we load anything else, + // to allow for forward progress with the scheduling. TSCAndCPU Result; // Test once for required CPU features - static bool TSCSupported = probeRequiredCPUFeatures(); + static pthread_once_t OnceProbe = PTHREAD_ONCE_INIT; + static bool TSCSupported = true; + pthread_once(&OnceProbe, +[] { TSCSupported = probeRequiredCPUFeatures(); }); if (TSCSupported) { Result.TSC = __xray::readTSC(Result.CPU); @@ -228,20 +956,17 @@ static TSCAndCPU getTimestamp() XRAY_NEVER_INSTRUMENT { void fdrLoggingHandleArg0(int32_t FuncId, XRayEntryType Entry) XRAY_NEVER_INSTRUMENT { auto TC = getTimestamp(); - __xray_fdr_internal::processFunctionHook(FuncId, Entry, TC.TSC, TC.CPU, 0, - clock_gettime, BQ); + processFunctionHook(FuncId, Entry, TC.TSC, TC.CPU, 0, clock_gettime); } void fdrLoggingHandleArg1(int32_t FuncId, XRayEntryType Entry, uint64_t Arg) XRAY_NEVER_INSTRUMENT { auto TC = getTimestamp(); - __xray_fdr_internal::processFunctionHook(FuncId, Entry, TC.TSC, TC.CPU, Arg, - clock_gettime, BQ); + processFunctionHook(FuncId, Entry, TC.TSC, TC.CPU, Arg, clock_gettime); } void fdrLoggingHandleCustomEvent(void *Event, std::size_t EventSize) XRAY_NEVER_INSTRUMENT { - using namespace __xray_fdr_internal; auto TC = getTimestamp(); auto &TSC = TC.TSC; auto &CPU = TC.CPU; @@ -249,13 +974,8 @@ void fdrLoggingHandleCustomEvent(void *Event, if (!Guard) return; if (EventSize > std::numeric_limits<int32_t>::max()) { - using Empty = struct {}; - static Empty Once = [&] { - Report("Event size too large = %zu ; > max = %d\n", EventSize, - std::numeric_limits<int32_t>::max()); - return Empty(); - }(); - (void)Once; + static pthread_once_t Once = PTHREAD_ONCE_INIT; + pthread_once(&Once, +[] { Report("Event size too large.\n"); }); } int32_t ReducedEventSize = static_cast<int32_t>(EventSize); auto &TLD = getThreadLocalData(); @@ -264,8 +984,8 @@ void fdrLoggingHandleCustomEvent(void *Event, // Here we need to prepare the log to handle: // - The metadata record we're going to write. (16 bytes) - // - The additional data we're going to write. Currently, that's the size of - // the event we're going to dump into the log as free-form bytes. + // - The additional data we're going to write. Currently, that's the size + // of the event we're going to dump into the log as free-form bytes. if (!prepareBuffer(TSC, CPU, clock_gettime, MetadataRecSize + EventSize)) { TLD.BQ = nullptr; return; @@ -280,90 +1000,207 @@ void fdrLoggingHandleCustomEvent(void *Event, CustomEvent.RecordKind = uint8_t(MetadataRecord::RecordKinds::CustomEventMarker); constexpr auto TSCSize = sizeof(TC.TSC); - std::memcpy(&CustomEvent.Data, &ReducedEventSize, sizeof(int32_t)); - std::memcpy(&CustomEvent.Data[sizeof(int32_t)], &TSC, TSCSize); - std::memcpy(TLD.RecordPtr, &CustomEvent, sizeof(CustomEvent)); + internal_memcpy(&CustomEvent.Data, &ReducedEventSize, sizeof(int32_t)); + internal_memcpy(&CustomEvent.Data[sizeof(int32_t)], &TSC, TSCSize); + internal_memcpy(TLD.RecordPtr, &CustomEvent, sizeof(CustomEvent)); TLD.RecordPtr += sizeof(CustomEvent); - std::memcpy(TLD.RecordPtr, Event, ReducedEventSize); + internal_memcpy(TLD.RecordPtr, Event, ReducedEventSize); + incrementExtents(MetadataRecSize + EventSize); + endBufferIfFull(); +} + +void fdrLoggingHandleTypedEvent( + uint16_t EventType, const void *Event, + std::size_t EventSize) noexcept XRAY_NEVER_INSTRUMENT { + auto TC = getTimestamp(); + auto &TSC = TC.TSC; + auto &CPU = TC.CPU; + RecursionGuard Guard{Running}; + if (!Guard) + return; + if (EventSize > std::numeric_limits<int32_t>::max()) { + static pthread_once_t Once = PTHREAD_ONCE_INIT; + pthread_once(&Once, +[] { Report("Event size too large.\n"); }); + } + int32_t ReducedEventSize = static_cast<int32_t>(EventSize); + auto &TLD = getThreadLocalData(); + if (!isLogInitializedAndReady(TLD.BQ, TSC, CPU, clock_gettime)) + return; + + // Here we need to prepare the log to handle: + // - The metadata record we're going to write. (16 bytes) + // - The additional data we're going to write. Currently, that's the size + // of the event we're going to dump into the log as free-form bytes. + if (!prepareBuffer(TSC, CPU, clock_gettime, MetadataRecSize + EventSize)) { + TLD.BQ = nullptr; + return; + } + // Write the custom event metadata record, which consists of the following + // information: + // - 8 bytes (64-bits) for the full TSC when the event started. + // - 4 bytes (32-bits) for the length of the data. + // - 2 bytes (16-bits) for the event type. 3 bytes remain since one of the + // bytes has the record type (Metadata Record) and kind (TypedEvent). + // We'll log the error if the event type is greater than 2 bytes. + // Event types are generated sequentially, so 2^16 is enough. + MetadataRecord TypedEvent; + TypedEvent.Type = uint8_t(RecordType::Metadata); + TypedEvent.RecordKind = + uint8_t(MetadataRecord::RecordKinds::TypedEventMarker); + constexpr auto TSCSize = sizeof(TC.TSC); + internal_memcpy(&TypedEvent.Data, &ReducedEventSize, sizeof(int32_t)); + internal_memcpy(&TypedEvent.Data[sizeof(int32_t)], &TSC, TSCSize); + internal_memcpy(&TypedEvent.Data[sizeof(int32_t) + TSCSize], &EventType, + sizeof(EventType)); + internal_memcpy(TLD.RecordPtr, &TypedEvent, sizeof(TypedEvent)); + + TLD.RecordPtr += sizeof(TypedEvent); + internal_memcpy(TLD.RecordPtr, Event, ReducedEventSize); incrementExtents(MetadataRecSize + EventSize); endBufferIfFull(); } -XRayLogInitStatus fdrLoggingInit(std::size_t BufferSize, std::size_t BufferMax, +XRayLogInitStatus fdrLoggingInit(size_t BufferSize, size_t BufferMax, void *Options, size_t OptionsSize) XRAY_NEVER_INSTRUMENT { - if (OptionsSize != sizeof(FDRLoggingOptions)) { - if (__sanitizer::Verbosity()) - Report("Cannot initialize FDR logging; wrong size for options: %d\n", - OptionsSize); - return static_cast<XRayLogInitStatus>(__sanitizer::atomic_load( - &LoggingStatus, __sanitizer::memory_order_acquire)); - } + if (Options == nullptr) + return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; + s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; - if (!__sanitizer::atomic_compare_exchange_strong( - &LoggingStatus, &CurrentStatus, - XRayLogInitStatus::XRAY_LOG_INITIALIZING, - __sanitizer::memory_order_release)) { - if (__sanitizer::Verbosity()) + if (!atomic_compare_exchange_strong(&LoggingStatus, &CurrentStatus, + XRayLogInitStatus::XRAY_LOG_INITIALIZING, + memory_order_release)) { + if (Verbosity()) Report("Cannot initialize already initialized implementation.\n"); return static_cast<XRayLogInitStatus>(CurrentStatus); } - { - __sanitizer::SpinMutexLock Guard(&FDROptionsMutex); - memcpy(&FDROptions, Options, OptionsSize); + // Because of __xray_log_init_mode(...) which guarantees that this will be + // called with BufferSize == 0 and BufferMax == 0 we parse the configuration + // provided in the Options pointer as a string instead. + if (BufferSize == 0 && BufferMax == 0) { + if (Verbosity()) + Report("Initializing FDR mode with options: %s\n", + static_cast<const char *>(Options)); + + // TODO: Factor out the flags specific to the FDR mode implementation. For + // now, use the global/single definition of the flags, since the FDR mode + // flags are already defined there. + FlagParser FDRParser; + FDRFlags FDRFlags; + registerXRayFDRFlags(&FDRParser, &FDRFlags); + FDRFlags.setDefaults(); + + // Override first from the general XRAY_DEFAULT_OPTIONS compiler-provided + // options until we migrate everyone to use the XRAY_FDR_OPTIONS + // compiler-provided options. + FDRParser.ParseString(useCompilerDefinedFlags()); + FDRParser.ParseString(useCompilerDefinedFDRFlags()); + auto *EnvOpts = GetEnv("XRAY_FDR_OPTIONS"); + if (EnvOpts == nullptr) + EnvOpts = ""; + FDRParser.ParseString(EnvOpts); + + // FIXME: Remove this when we fully remove the deprecated flags. + if (internal_strlen(EnvOpts) == 0) { + FDRFlags.func_duration_threshold_us = + flags()->xray_fdr_log_func_duration_threshold_us; + FDRFlags.grace_period_ms = flags()->xray_fdr_log_grace_period_ms; + } + + // The provided options should always override the compiler-provided and + // environment-variable defined options. + FDRParser.ParseString(static_cast<const char *>(Options)); + *fdrFlags() = FDRFlags; + BufferSize = FDRFlags.buffer_size; + BufferMax = FDRFlags.buffer_max; + SpinMutexLock Guard(&FDROptionsMutex); + FDROptions.Fd = -1; + FDROptions.ReportErrors = true; + } else if (OptionsSize != sizeof(FDRLoggingOptions)) { + // FIXME: This is deprecated, and should really be removed. + // At this point we use the flag parser specific to the FDR mode + // implementation. + if (Verbosity()) + Report("Cannot initialize FDR logging; wrong size for options: %d\n", + OptionsSize); + return static_cast<XRayLogInitStatus>( + atomic_load(&LoggingStatus, memory_order_acquire)); + } else { + if (Verbosity()) + Report("XRay FDR: struct-based init is deprecated, please use " + "string-based configuration instead.\n"); + SpinMutexLock Guard(&FDROptionsMutex); + internal_memcpy(&FDROptions, Options, OptionsSize); } bool Success = false; if (BQ != nullptr) { - delete BQ; + BQ->~BufferQueue(); + InternalFree(BQ); BQ = nullptr; } - if (BQ == nullptr) - BQ = new BufferQueue(BufferSize, BufferMax, Success); + if (BQ == nullptr) { + BQ = reinterpret_cast<BufferQueue *>( + InternalAlloc(sizeof(BufferQueue), nullptr, 64)); + new (BQ) BufferQueue(BufferSize, BufferMax, Success); + } if (!Success) { Report("BufferQueue init failed.\n"); if (BQ != nullptr) { - delete BQ; + BQ->~BufferQueue(); + InternalFree(BQ); BQ = nullptr; } return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; } - static bool UNUSED Once = [] { - pthread_key_create(&__xray_fdr_internal::Key, +[](void *) { - auto &TLD = __xray_fdr_internal::getThreadLocalData(); + static pthread_once_t OnceInit = PTHREAD_ONCE_INIT; + pthread_once(&OnceInit, +[] { + atomic_store(&TicksPerSec, + probeRequiredCPUFeatures() ? getTSCFrequency() + : __xray::NanosecondsPerSecond, + memory_order_release); + pthread_key_create(&Key, +[](void *TLDPtr) { + if (TLDPtr == nullptr) + return; + auto &TLD = *reinterpret_cast<ThreadLocalData *>(TLDPtr); if (TLD.BQ == nullptr) return; auto EC = TLD.BQ->releaseBuffer(TLD.Buffer); if (EC != BufferQueue::ErrorCode::Ok) Report("At thread exit, failed to release buffer at %p; error=%s\n", - TLD.Buffer.Buffer, BufferQueue::getErrorString(EC)); + TLD.Buffer.Data, BufferQueue::getErrorString(EC)); }); - return false; - }(); + }); + atomic_store(&ThresholdTicks, + atomic_load_relaxed(&TicksPerSec) * + fdrFlags()->func_duration_threshold_us / 1000000, + memory_order_release); // Arg1 handler should go in first to avoid concurrent code accidentally // falling back to arg0 when it should have ran arg1. __xray_set_handler_arg1(fdrLoggingHandleArg1); // Install the actual handleArg0 handler after initialising the buffers. __xray_set_handler(fdrLoggingHandleArg0); __xray_set_customevent_handler(fdrLoggingHandleCustomEvent); + __xray_set_typedevent_handler(fdrLoggingHandleTypedEvent); + + // Install the buffer iterator implementation. + __xray_log_set_buffer_iterator(fdrIterator); - __sanitizer::atomic_store(&LoggingStatus, - XRayLogInitStatus::XRAY_LOG_INITIALIZED, - __sanitizer::memory_order_release); + atomic_store(&LoggingStatus, XRayLogInitStatus::XRAY_LOG_INITIALIZED, + memory_order_release); - if (__sanitizer::Verbosity()) + if (Verbosity()) Report("XRay FDR init successful.\n"); return XRayLogInitStatus::XRAY_LOG_INITIALIZED; } bool fdrLogDynamicInitializer() XRAY_NEVER_INSTRUMENT { - using namespace __xray; XRayLogImpl Impl{ fdrLoggingInit, fdrLoggingFinalize, @@ -372,11 +1209,10 @@ bool fdrLogDynamicInitializer() XRAY_NEVER_INSTRUMENT { }; auto RegistrationResult = __xray_log_register_mode("xray-fdr", Impl); if (RegistrationResult != XRayLogRegisterStatus::XRAY_REGISTRATION_OK && - __sanitizer::Verbosity()) + Verbosity()) Report("Cannot register XRay FDR mode to 'xray-fdr'; error = %d\n", RegistrationResult); - if (flags()->xray_fdr_log || - !__sanitizer::internal_strcmp(flags()->xray_mode, "xray-fdr")) + if (flags()->xray_fdr_log || !internal_strcmp(flags()->xray_mode, "xray-fdr")) __xray_set_log_impl(Impl); return true; } diff --git a/lib/xray/xray_fdr_logging_impl.h b/lib/xray/xray_fdr_logging_impl.h deleted file mode 100644 index 59eab55b2573..000000000000 --- a/lib/xray/xray_fdr_logging_impl.h +++ /dev/null @@ -1,705 +0,0 @@ -//===-- xray_fdr_logging_impl.h ---------------------------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file is a part of XRay, a dynamic runtime instrumentation system. -// -// Here we implement the thread local state management and record i/o for Flight -// Data Recorder mode for XRay, where we use compact structures to store records -// in memory as well as when writing out the data to files. -// -//===----------------------------------------------------------------------===// -#ifndef XRAY_XRAY_FDR_LOGGING_IMPL_H -#define XRAY_XRAY_FDR_LOGGING_IMPL_H - -#include <cassert> -#include <cstddef> -#include <cstring> -#include <limits> -#include <pthread.h> -#include <sys/syscall.h> -#include <time.h> -#include <type_traits> -#include <unistd.h> - -#include "sanitizer_common/sanitizer_common.h" -#include "xray/xray_log_interface.h" -#include "xray_buffer_queue.h" -#include "xray_defs.h" -#include "xray_fdr_log_records.h" -#include "xray_flags.h" -#include "xray_tsc.h" - -namespace __xray { - -__sanitizer::atomic_sint32_t LoggingStatus = { - XRayLogInitStatus::XRAY_LOG_UNINITIALIZED}; - -/// We expose some of the state transitions when FDR logging mode is operating -/// such that we can simulate a series of log events that may occur without -/// and test with determinism without worrying about the real CPU time. -/// -/// Because the code uses thread_local allocation extensively as part of its -/// design, callers that wish to test events occuring on different threads -/// will actually have to run them on different threads. -/// -/// This also means that it is possible to break invariants maintained by -/// cooperation with xray_fdr_logging class, so be careful and think twice. -namespace __xray_fdr_internal { - -/// Writes the new buffer record and wallclock time that begin a buffer for the -/// current thread. -static void writeNewBufferPreamble(pid_t Tid, timespec TS); - -/// Writes a Function Record to the buffer associated with the current thread. -static void writeFunctionRecord(int FuncId, uint32_t TSCDelta, - XRayEntryType EntryType); - -/// Sets up a new buffer in thread_local storage and writes a preamble. The -/// wall_clock_reader function is used to populate the WallTimeRecord entry. -static void setupNewBuffer(int (*wall_clock_reader)(clockid_t, - struct timespec *)); - -/// TSC Wrap records are written when a TSC delta encoding scheme overflows. -static void writeTSCWrapMetadata(uint64_t TSC); - -// Group together thread-local-data in a struct, then hide it behind a function -// call so that it can be initialized on first use instead of as a global. We -// force the alignment to 64-bytes for x86 cache line alignment, as this -// structure is used in the hot path of implementation. -struct alignas(64) ThreadLocalData { - BufferQueue::Buffer Buffer; - char *RecordPtr = nullptr; - // The number of FunctionEntry records immediately preceding RecordPtr. - uint8_t NumConsecutiveFnEnters = 0; - - // The number of adjacent, consecutive pairs of FunctionEntry, Tail Exit - // records preceding RecordPtr. - uint8_t NumTailCalls = 0; - - // We use a thread_local variable to keep track of which CPUs we've already - // run, and the TSC times for these CPUs. This allows us to stop repeating the - // CPU field in the function records. - // - // We assume that we'll support only 65536 CPUs for x86_64. - uint16_t CurrentCPU = std::numeric_limits<uint16_t>::max(); - uint64_t LastTSC = 0; - uint64_t LastFunctionEntryTSC = 0; - - // Make sure a thread that's ever called handleArg0 has a thread-local - // live reference to the buffer queue for this particular instance of - // FDRLogging, and that we're going to clean it up when the thread exits. - BufferQueue *BQ = nullptr; -}; - -static_assert(std::is_trivially_destructible<ThreadLocalData>::value, - "ThreadLocalData must be trivially destructible"); - -static constexpr auto MetadataRecSize = sizeof(MetadataRecord); -static constexpr auto FunctionRecSize = sizeof(FunctionRecord); - -// Use a global pthread key to identify thread-local data for logging. -static pthread_key_t Key; - -// This function will initialize the thread-local data structure used by the FDR -// logging implementation and return a reference to it. The implementation -// details require a bit of care to maintain. -// -// First, some requirements on the implementation in general: -// -// - XRay handlers should not call any memory allocation routines that may -// delegate to an instrumented implementation. This means functions like -// malloc() and free() should not be called while instrumenting. -// -// - We would like to use some thread-local data initialized on first-use of -// the XRay instrumentation. These allow us to implement unsynchronized -// routines that access resources associated with the thread. -// -// The implementation here uses a few mechanisms that allow us to provide both -// the requirements listed above. We do this by: -// -// 1. Using a thread-local aligned storage buffer for representing the -// ThreadLocalData struct. This data will be uninitialized memory by -// design. -// -// 2. Not requiring a thread exit handler/implementation, keeping the -// thread-local as purely a collection of references/data that do not -// require cleanup. -// -// We're doing this to avoid using a `thread_local` object that has a -// non-trivial destructor, because the C++ runtime might call std::malloc(...) -// to register calls to destructors. Deadlocks may arise when, for example, an -// externally provided malloc implementation is XRay instrumented, and -// initializing the thread-locals involves calling into malloc. A malloc -// implementation that does global synchronization might be holding a lock for a -// critical section, calling a function that might be XRay instrumented (and -// thus in turn calling into malloc by virtue of registration of the -// thread_local's destructor). -static ThreadLocalData &getThreadLocalData() { - static_assert(alignof(ThreadLocalData) >= 64, - "ThreadLocalData must be cache line aligned."); - thread_local ThreadLocalData TLD; - thread_local bool UNUSED ThreadOnce = [] { - pthread_setspecific(Key, &TLD); - return false; - }(); - return TLD; -} - -//-----------------------------------------------------------------------------| -// The rest of the file is implementation. | -//-----------------------------------------------------------------------------| -// Functions are implemented in the header for inlining since we don't want | -// to grow the stack when we've hijacked the binary for logging. | -//-----------------------------------------------------------------------------| - -namespace { - -class RecursionGuard { - volatile bool &Running; - const bool Valid; - -public: - explicit RecursionGuard(volatile bool &R) : Running(R), Valid(!R) { - if (Valid) - Running = true; - } - - RecursionGuard(const RecursionGuard &) = delete; - RecursionGuard(RecursionGuard &&) = delete; - RecursionGuard &operator=(const RecursionGuard &) = delete; - RecursionGuard &operator=(RecursionGuard &&) = delete; - - explicit operator bool() const { return Valid; } - - ~RecursionGuard() noexcept { - if (Valid) - Running = false; - } -}; - -} // namespace - -static void writeNewBufferPreamble(pid_t Tid, - timespec TS) XRAY_NEVER_INSTRUMENT { - static constexpr int InitRecordsCount = 2; - auto &TLD = getThreadLocalData(); - MetadataRecord Metadata[InitRecordsCount]; - { - // Write out a MetadataRecord to signify that this is the start of a new - // buffer, associated with a particular thread, with a new CPU. For the - // data, we have 15 bytes to squeeze as much information as we can. At this - // point we only write down the following bytes: - // - Thread ID (pid_t, 4 bytes) - auto &NewBuffer = Metadata[0]; - NewBuffer.Type = uint8_t(RecordType::Metadata); - NewBuffer.RecordKind = uint8_t(MetadataRecord::RecordKinds::NewBuffer); - std::memcpy(&NewBuffer.Data, &Tid, sizeof(pid_t)); - } - - // Also write the WalltimeMarker record. - { - static_assert(sizeof(time_t) <= 8, "time_t needs to be at most 8 bytes"); - auto &WalltimeMarker = Metadata[1]; - WalltimeMarker.Type = uint8_t(RecordType::Metadata); - WalltimeMarker.RecordKind = - uint8_t(MetadataRecord::RecordKinds::WalltimeMarker); - - // We only really need microsecond precision here, and enforce across - // platforms that we need 64-bit seconds and 32-bit microseconds encoded in - // the Metadata record. - int32_t Micros = TS.tv_nsec / 1000; - int64_t Seconds = TS.tv_sec; - std::memcpy(WalltimeMarker.Data, &Seconds, sizeof(Seconds)); - std::memcpy(WalltimeMarker.Data + sizeof(Seconds), &Micros, sizeof(Micros)); - } - - TLD.NumConsecutiveFnEnters = 0; - TLD.NumTailCalls = 0; - if (TLD.BQ == nullptr || TLD.BQ->finalizing()) - return; - std::memcpy(TLD.RecordPtr, Metadata, sizeof(Metadata)); - TLD.RecordPtr += sizeof(Metadata); - // Since we write out the extents as the first metadata record of the - // buffer, we need to write out the extents including the extents record. - __sanitizer::atomic_store(&TLD.Buffer.Extents->Size, sizeof(Metadata), - __sanitizer::memory_order_release); -} - -inline void setupNewBuffer(int (*wall_clock_reader)( - clockid_t, struct timespec *)) XRAY_NEVER_INSTRUMENT { - auto &TLD = getThreadLocalData(); - auto &B = TLD.Buffer; - TLD.RecordPtr = static_cast<char *>(B.Buffer); - pid_t Tid = syscall(SYS_gettid); - timespec TS{0, 0}; - // This is typically clock_gettime, but callers have injection ability. - wall_clock_reader(CLOCK_MONOTONIC, &TS); - writeNewBufferPreamble(Tid, TS); - TLD.NumConsecutiveFnEnters = 0; - TLD.NumTailCalls = 0; -} - -static void incrementExtents(size_t Add) { - auto &TLD = getThreadLocalData(); - __sanitizer::atomic_fetch_add(&TLD.Buffer.Extents->Size, Add, - __sanitizer::memory_order_acq_rel); -} - -static void decrementExtents(size_t Subtract) { - auto &TLD = getThreadLocalData(); - __sanitizer::atomic_fetch_sub(&TLD.Buffer.Extents->Size, Subtract, - __sanitizer::memory_order_acq_rel); -} - -inline void writeNewCPUIdMetadata(uint16_t CPU, - uint64_t TSC) XRAY_NEVER_INSTRUMENT { - auto &TLD = getThreadLocalData(); - MetadataRecord NewCPUId; - NewCPUId.Type = uint8_t(RecordType::Metadata); - NewCPUId.RecordKind = uint8_t(MetadataRecord::RecordKinds::NewCPUId); - - // The data for the New CPU will contain the following bytes: - // - CPU ID (uint16_t, 2 bytes) - // - Full TSC (uint64_t, 8 bytes) - // Total = 10 bytes. - std::memcpy(&NewCPUId.Data, &CPU, sizeof(CPU)); - std::memcpy(&NewCPUId.Data[sizeof(CPU)], &TSC, sizeof(TSC)); - std::memcpy(TLD.RecordPtr, &NewCPUId, sizeof(MetadataRecord)); - TLD.RecordPtr += sizeof(MetadataRecord); - TLD.NumConsecutiveFnEnters = 0; - TLD.NumTailCalls = 0; - incrementExtents(sizeof(MetadataRecord)); -} - -inline void writeTSCWrapMetadata(uint64_t TSC) XRAY_NEVER_INSTRUMENT { - auto &TLD = getThreadLocalData(); - MetadataRecord TSCWrap; - TSCWrap.Type = uint8_t(RecordType::Metadata); - TSCWrap.RecordKind = uint8_t(MetadataRecord::RecordKinds::TSCWrap); - - // The data for the TSCWrap record contains the following bytes: - // - Full TSC (uint64_t, 8 bytes) - // Total = 8 bytes. - std::memcpy(&TSCWrap.Data, &TSC, sizeof(TSC)); - std::memcpy(TLD.RecordPtr, &TSCWrap, sizeof(MetadataRecord)); - TLD.RecordPtr += sizeof(MetadataRecord); - TLD.NumConsecutiveFnEnters = 0; - TLD.NumTailCalls = 0; - incrementExtents(sizeof(MetadataRecord)); -} - -// Call Argument metadata records store the arguments to a function in the -// order of their appearance; holes are not supported by the buffer format. -static inline void writeCallArgumentMetadata(uint64_t A) XRAY_NEVER_INSTRUMENT { - auto &TLD = getThreadLocalData(); - MetadataRecord CallArg; - CallArg.Type = uint8_t(RecordType::Metadata); - CallArg.RecordKind = uint8_t(MetadataRecord::RecordKinds::CallArgument); - - std::memcpy(CallArg.Data, &A, sizeof(A)); - std::memcpy(TLD.RecordPtr, &CallArg, sizeof(MetadataRecord)); - TLD.RecordPtr += sizeof(MetadataRecord); - incrementExtents(sizeof(MetadataRecord)); -} - -static inline void -writeFunctionRecord(int FuncId, uint32_t TSCDelta, - XRayEntryType EntryType) XRAY_NEVER_INSTRUMENT { - FunctionRecord FuncRecord; - FuncRecord.Type = uint8_t(RecordType::Function); - // Only take 28 bits of the function id. - FuncRecord.FuncId = FuncId & ~(0x0F << 28); - FuncRecord.TSCDelta = TSCDelta; - - auto &TLD = getThreadLocalData(); - switch (EntryType) { - case XRayEntryType::ENTRY: - ++TLD.NumConsecutiveFnEnters; - FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionEnter); - break; - case XRayEntryType::LOG_ARGS_ENTRY: - // We should not rewind functions with logged args. - TLD.NumConsecutiveFnEnters = 0; - TLD.NumTailCalls = 0; - FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionEnter); - break; - case XRayEntryType::EXIT: - // If we've decided to log the function exit, we will never erase the log - // before it. - TLD.NumConsecutiveFnEnters = 0; - TLD.NumTailCalls = 0; - FuncRecord.RecordKind = uint8_t(FunctionRecord::RecordKinds::FunctionExit); - break; - case XRayEntryType::TAIL: - // If we just entered the function we're tail exiting from or erased every - // invocation since then, this function entry tail pair is a candidate to - // be erased when the child function exits. - if (TLD.NumConsecutiveFnEnters > 0) { - ++TLD.NumTailCalls; - TLD.NumConsecutiveFnEnters = 0; - } else { - // We will never be able to erase this tail call since we have logged - // something in between the function entry and tail exit. - TLD.NumTailCalls = 0; - TLD.NumConsecutiveFnEnters = 0; - } - FuncRecord.RecordKind = - uint8_t(FunctionRecord::RecordKinds::FunctionTailExit); - break; - case XRayEntryType::CUSTOM_EVENT: { - // This is a bug in patching, so we'll report it once and move on. - static bool Once = [&] { - Report("Internal error: patched an XRay custom event call as a function; " - "func id = %d\n", - FuncId); - return true; - }(); - (void)Once; - return; - } - } - - std::memcpy(TLD.RecordPtr, &FuncRecord, sizeof(FunctionRecord)); - TLD.RecordPtr += sizeof(FunctionRecord); - incrementExtents(sizeof(FunctionRecord)); -} - -static uint64_t thresholdTicks() { - static uint64_t TicksPerSec = probeRequiredCPUFeatures() - ? getTSCFrequency() - : __xray::NanosecondsPerSecond; - static const uint64_t ThresholdTicks = - TicksPerSec * flags()->xray_fdr_log_func_duration_threshold_us / 1000000; - return ThresholdTicks; -} - -// Re-point the thread local pointer into this thread's Buffer before the recent -// "Function Entry" record and any "Tail Call Exit" records after that. -static void rewindRecentCall(uint64_t TSC, uint64_t &LastTSC, - uint64_t &LastFunctionEntryTSC, int32_t FuncId) { - auto &TLD = getThreadLocalData(); - TLD.RecordPtr -= FunctionRecSize; - decrementExtents(FunctionRecSize); - FunctionRecord FuncRecord; - std::memcpy(&FuncRecord, TLD.RecordPtr, FunctionRecSize); - assert(FuncRecord.RecordKind == - uint8_t(FunctionRecord::RecordKinds::FunctionEnter) && - "Expected to find function entry recording when rewinding."); - assert(FuncRecord.FuncId == (FuncId & ~(0x0F << 28)) && - "Expected matching function id when rewinding Exit"); - --TLD.NumConsecutiveFnEnters; - LastTSC -= FuncRecord.TSCDelta; - - // We unwound one call. Update the state and return without writing a log. - if (TLD.NumConsecutiveFnEnters != 0) { - LastFunctionEntryTSC -= FuncRecord.TSCDelta; - return; - } - - // Otherwise we've rewound the stack of all function entries, we might be - // able to rewind further by erasing tail call functions that are being - // exited from via this exit. - LastFunctionEntryTSC = 0; - auto RewindingTSC = LastTSC; - auto RewindingRecordPtr = TLD.RecordPtr - FunctionRecSize; - while (TLD.NumTailCalls > 0) { - // Rewind the TSC back over the TAIL EXIT record. - FunctionRecord ExpectedTailExit; - std::memcpy(&ExpectedTailExit, RewindingRecordPtr, FunctionRecSize); - - assert(ExpectedTailExit.RecordKind == - uint8_t(FunctionRecord::RecordKinds::FunctionTailExit) && - "Expected to find tail exit when rewinding."); - RewindingRecordPtr -= FunctionRecSize; - RewindingTSC -= ExpectedTailExit.TSCDelta; - FunctionRecord ExpectedFunctionEntry; - std::memcpy(&ExpectedFunctionEntry, RewindingRecordPtr, FunctionRecSize); - assert(ExpectedFunctionEntry.RecordKind == - uint8_t(FunctionRecord::RecordKinds::FunctionEnter) && - "Expected to find function entry when rewinding tail call."); - assert(ExpectedFunctionEntry.FuncId == ExpectedTailExit.FuncId && - "Expected funcids to match when rewinding tail call."); - - // This tail call exceeded the threshold duration. It will not be erased. - if ((TSC - RewindingTSC) >= thresholdTicks()) { - TLD.NumTailCalls = 0; - return; - } - - // We can erase a tail exit pair that we're exiting through since - // its duration is under threshold. - --TLD.NumTailCalls; - RewindingRecordPtr -= FunctionRecSize; - RewindingTSC -= ExpectedFunctionEntry.TSCDelta; - TLD.RecordPtr -= 2 * FunctionRecSize; - LastTSC = RewindingTSC; - decrementExtents(2 * FunctionRecSize); - } -} - -inline bool releaseThreadLocalBuffer(BufferQueue &BQArg) { - auto &TLD = getThreadLocalData(); - auto EC = BQArg.releaseBuffer(TLD.Buffer); - if (EC != BufferQueue::ErrorCode::Ok) { - Report("Failed to release buffer at %p; error=%s\n", TLD.Buffer.Buffer, - BufferQueue::getErrorString(EC)); - return false; - } - return true; -} - -inline bool prepareBuffer(uint64_t TSC, unsigned char CPU, - int (*wall_clock_reader)(clockid_t, - struct timespec *), - size_t MaxSize) XRAY_NEVER_INSTRUMENT { - auto &TLD = getThreadLocalData(); - char *BufferStart = static_cast<char *>(TLD.Buffer.Buffer); - if ((TLD.RecordPtr + MaxSize) > (BufferStart + TLD.Buffer.Size)) { - if (!releaseThreadLocalBuffer(*TLD.BQ)) - return false; - auto EC = TLD.BQ->getBuffer(TLD.Buffer); - if (EC != BufferQueue::ErrorCode::Ok) { - Report("Failed to acquire a buffer; error=%s\n", - BufferQueue::getErrorString(EC)); - return false; - } - setupNewBuffer(wall_clock_reader); - - // Always write the CPU metadata as the first record in the buffer. - writeNewCPUIdMetadata(CPU, TSC); - } - return true; -} - -inline bool -isLogInitializedAndReady(BufferQueue *LBQ, uint64_t TSC, unsigned char CPU, - int (*wall_clock_reader)(clockid_t, struct timespec *)) - XRAY_NEVER_INSTRUMENT { - // Bail out right away if logging is not initialized yet. - // We should take the opportunity to release the buffer though. - auto Status = __sanitizer::atomic_load(&LoggingStatus, - __sanitizer::memory_order_acquire); - auto &TLD = getThreadLocalData(); - if (Status != XRayLogInitStatus::XRAY_LOG_INITIALIZED) { - if (TLD.RecordPtr != nullptr && - (Status == XRayLogInitStatus::XRAY_LOG_FINALIZING || - Status == XRayLogInitStatus::XRAY_LOG_FINALIZED)) { - if (!releaseThreadLocalBuffer(*LBQ)) - return false; - TLD.RecordPtr = nullptr; - return false; - } - return false; - } - - if (__sanitizer::atomic_load(&LoggingStatus, - __sanitizer::memory_order_acquire) != - XRayLogInitStatus::XRAY_LOG_INITIALIZED || - LBQ->finalizing()) { - if (!releaseThreadLocalBuffer(*LBQ)) - return false; - TLD.RecordPtr = nullptr; - } - - if (TLD.Buffer.Buffer == nullptr) { - auto EC = LBQ->getBuffer(TLD.Buffer); - if (EC != BufferQueue::ErrorCode::Ok) { - auto LS = __sanitizer::atomic_load(&LoggingStatus, - __sanitizer::memory_order_acquire); - if (LS != XRayLogInitStatus::XRAY_LOG_FINALIZING && - LS != XRayLogInitStatus::XRAY_LOG_FINALIZED) - Report("Failed to acquire a buffer; error=%s\n", - BufferQueue::getErrorString(EC)); - return false; - } - - setupNewBuffer(wall_clock_reader); - - // Always write the CPU metadata as the first record in the buffer. - writeNewCPUIdMetadata(CPU, TSC); - } - - if (TLD.CurrentCPU == std::numeric_limits<uint16_t>::max()) { - // This means this is the first CPU this thread has ever run on. We set - // the current CPU and record this as the first TSC we've seen. - TLD.CurrentCPU = CPU; - writeNewCPUIdMetadata(CPU, TSC); - } - - return true; -} // namespace __xray_fdr_internal - -// Compute the TSC difference between the time of measurement and the previous -// event. There are a few interesting situations we need to account for: -// -// - The thread has migrated to a different CPU. If this is the case, then -// we write down the following records: -// -// 1. A 'NewCPUId' Metadata record. -// 2. A FunctionRecord with a 0 for the TSCDelta field. -// -// - The TSC delta is greater than the 32 bits we can store in a -// FunctionRecord. In this case we write down the following records: -// -// 1. A 'TSCWrap' Metadata record. -// 2. A FunctionRecord with a 0 for the TSCDelta field. -// -// - The TSC delta is representable within the 32 bits we can store in a -// FunctionRecord. In this case we write down just a FunctionRecord with -// the correct TSC delta. -inline uint32_t writeCurrentCPUTSC(ThreadLocalData &TLD, uint64_t TSC, - uint8_t CPU) { - if (CPU != TLD.CurrentCPU) { - // We've moved to a new CPU. - writeNewCPUIdMetadata(CPU, TSC); - return 0; - } - // If the delta is greater than the range for a uint32_t, then we write out - // the TSC wrap metadata entry with the full TSC, and the TSC for the - // function record be 0. - uint64_t Delta = TSC - TLD.LastTSC; - if (Delta <= std::numeric_limits<uint32_t>::max()) - return Delta; - - writeTSCWrapMetadata(TSC); - return 0; -} - -inline void endBufferIfFull() XRAY_NEVER_INSTRUMENT { - auto &TLD = getThreadLocalData(); - auto BufferStart = static_cast<char *>(TLD.Buffer.Buffer); - if ((TLD.RecordPtr + MetadataRecSize) - BufferStart <= - ptrdiff_t{MetadataRecSize}) { - if (!releaseThreadLocalBuffer(*TLD.BQ)) - return; - TLD.RecordPtr = nullptr; - } -} - -thread_local volatile bool Running = false; - -/// Here's where the meat of the processing happens. The writer captures -/// function entry, exit and tail exit points with a time and will create -/// TSCWrap, NewCPUId and Function records as necessary. The writer might -/// walk backward through its buffer and erase trivial functions to avoid -/// polluting the log and may use the buffer queue to obtain or release a -/// buffer. -inline void processFunctionHook(int32_t FuncId, XRayEntryType Entry, - uint64_t TSC, unsigned char CPU, uint64_t Arg1, - int (*wall_clock_reader)(clockid_t, - struct timespec *), - BufferQueue *BQ) XRAY_NEVER_INSTRUMENT { - // Prevent signal handler recursion, so in case we're already in a log writing - // mode and the signal handler comes in (and is also instrumented) then we - // don't want to be clobbering potentially partial writes already happening in - // the thread. We use a simple thread_local latch to only allow one on-going - // handleArg0 to happen at any given time. - RecursionGuard Guard{Running}; - if (!Guard) { - assert(Running == true && "RecursionGuard is buggy!"); - return; - } - - auto &TLD = getThreadLocalData(); - - // In case the reference has been cleaned up before, we make sure we - // initialize it to the provided BufferQueue. - if (TLD.BQ == nullptr) - TLD.BQ = BQ; - - if (!isLogInitializedAndReady(TLD.BQ, TSC, CPU, wall_clock_reader)) - return; - - // Before we go setting up writing new function entries, we need to be really - // careful about the pointer math we're doing. This means we need to ensure - // that the record we are about to write is going to fit into the buffer, - // without overflowing the buffer. - // - // To do this properly, we use the following assumptions: - // - // - The least number of bytes we will ever write is 8 - // (sizeof(FunctionRecord)) only if the delta between the previous entry - // and this entry is within 32 bits. - // - The most number of bytes we will ever write is 8 + 16 + 16 = 40. - // This is computed by: - // - // MaxSize = sizeof(FunctionRecord) + 2 * sizeof(MetadataRecord) - // - // These arise in the following cases: - // - // 1. When the delta between the TSC we get and the previous TSC for the - // same CPU is outside of the uint32_t range, we end up having to - // write a MetadataRecord to indicate a "tsc wrap" before the actual - // FunctionRecord. - // 2. When we learn that we've moved CPUs, we need to write a - // MetadataRecord to indicate a "cpu change", and thus write out the - // current TSC for that CPU before writing out the actual - // FunctionRecord. - // 3. When we learn about a new CPU ID, we need to write down a "new cpu - // id" MetadataRecord before writing out the actual FunctionRecord. - // 4. The second MetadataRecord is the optional function call argument. - // - // So the math we need to do is to determine whether writing 40 bytes past the - // current pointer exceeds the buffer's maximum size. If we don't have enough - // space to write 40 bytes in the buffer, we need get a new Buffer, set it up - // properly before doing any further writing. - size_t MaxSize = FunctionRecSize + 2 * MetadataRecSize; - if (!prepareBuffer(TSC, CPU, wall_clock_reader, MaxSize)) { - TLD.BQ = nullptr; - return; - } - - // By this point, we are now ready to write up to 40 bytes (explained above). - assert((TLD.RecordPtr + MaxSize) - static_cast<char *>(TLD.Buffer.Buffer) >= - static_cast<ptrdiff_t>(MetadataRecSize) && - "Misconfigured BufferQueue provided; Buffer size not large enough."); - - auto RecordTSCDelta = writeCurrentCPUTSC(TLD, TSC, CPU); - TLD.LastTSC = TSC; - TLD.CurrentCPU = CPU; - switch (Entry) { - case XRayEntryType::ENTRY: - case XRayEntryType::LOG_ARGS_ENTRY: - // Update the thread local state for the next invocation. - TLD.LastFunctionEntryTSC = TSC; - break; - case XRayEntryType::TAIL: - case XRayEntryType::EXIT: - // Break out and write the exit record if we can't erase any functions. - if (TLD.NumConsecutiveFnEnters == 0 || - (TSC - TLD.LastFunctionEntryTSC) >= thresholdTicks()) - break; - rewindRecentCall(TSC, TLD.LastTSC, TLD.LastFunctionEntryTSC, FuncId); - return; // without writing log. - case XRayEntryType::CUSTOM_EVENT: { - // This is a bug in patching, so we'll report it once and move on. - static bool Once = [&] { - Report("Internal error: patched an XRay custom event call as a function; " - "func id = %d", - FuncId); - return true; - }(); - (void)Once; - return; - } - } - - writeFunctionRecord(FuncId, RecordTSCDelta, Entry); - if (Entry == XRayEntryType::LOG_ARGS_ENTRY) - writeCallArgumentMetadata(Arg1); - - // If we've exhausted the buffer by this time, we then release the buffer to - // make sure that other threads may start using this buffer. - endBufferIfFull(); -} - -} // namespace __xray_fdr_internal -} // namespace __xray - -#endif // XRAY_XRAY_FDR_LOGGING_IMPL_H diff --git a/lib/xray/xray_flags.cc b/lib/xray/xray_flags.cc index 1ee4d10d753c..b50b68666d80 100644 --- a/lib/xray/xray_flags.cc +++ b/lib/xray/xray_flags.cc @@ -30,7 +30,7 @@ void Flags::setDefaults() XRAY_NEVER_INSTRUMENT { #undef XRAY_FLAG } -static void registerXRayFlags(FlagParser *P, Flags *F) XRAY_NEVER_INSTRUMENT { +void registerXRayFlags(FlagParser *P, Flags *F) XRAY_NEVER_INSTRUMENT { #define XRAY_FLAG(Type, Name, DefaultValue, Description) \ RegisterFlag(P, #Name, Description, &F->Name); #include "xray_flags.inc" @@ -42,15 +42,14 @@ static void registerXRayFlags(FlagParser *P, Flags *F) XRAY_NEVER_INSTRUMENT { // options that control XRay. This means users/deployments can tweak the // defaults that override the hard-coded defaults in the xray_flags.inc at // compile-time using the XRAY_DEFAULT_OPTIONS macro. -static const char *useCompilerDefinedFlags() XRAY_NEVER_INSTRUMENT { +const char *useCompilerDefinedFlags() XRAY_NEVER_INSTRUMENT { #ifdef XRAY_DEFAULT_OPTIONS -// Do the double-layered string conversion to prevent badly crafted strings -// provided through the XRAY_DEFAULT_OPTIONS from causing compilation issues (or -// changing the semantics of the implementation through the macro). This ensures -// that we convert whatever XRAY_DEFAULT_OPTIONS is defined as a string literal. -#define XRAY_STRINGIZE(x) #x -#define XRAY_STRINGIZE_OPTIONS(options) XRAY_STRINGIZE(options) - return XRAY_STRINGIZE_OPTIONS(XRAY_DEFAULT_OPTIONS); + // Do the double-layered string conversion to prevent badly crafted strings + // provided through the XRAY_DEFAULT_OPTIONS from causing compilation issues + // (or changing the semantics of the implementation through the macro). This + // ensures that we convert whatever XRAY_DEFAULT_OPTIONS is defined as a + // string literal. + return SANITIZER_STRINGIFY(XRAY_DEFAULT_OPTIONS); #else return ""; #endif diff --git a/lib/xray/xray_flags.h b/lib/xray/xray_flags.h index 3ed5b8844cb4..7c1ba9458856 100644 --- a/lib/xray/xray_flags.h +++ b/lib/xray/xray_flags.h @@ -29,6 +29,8 @@ struct Flags { }; extern Flags xray_flags_dont_use_directly; +extern void registerXRayFlags(FlagParser *P, Flags *F); +const char *useCompilerDefinedFlags(); inline Flags *flags() { return &xray_flags_dont_use_directly; } void initializeFlags(); diff --git a/lib/xray/xray_flags.inc b/lib/xray/xray_flags.inc index 29f1fce7d7f4..c87903963a36 100644 --- a/lib/xray/xray_flags.inc +++ b/lib/xray/xray_flags.inc @@ -27,23 +27,24 @@ XRAY_FLAG(uptr, xray_page_size_override, 0, XRAY_FLAG(bool, xray_naive_log, false, "DEPRECATED: Use xray_mode=xray-basic instead.") XRAY_FLAG(int, xray_naive_log_func_duration_threshold_us, 5, - "Naive logging will try to skip functions that execute for fewer " - "microseconds than this threshold.") + "DEPRECATED: use the environment variable XRAY_BASIC_OPTIONS and set " + "func_duration_threshold_us instead.") XRAY_FLAG(int, xray_naive_log_max_stack_depth, 64, - "Naive logging will keep track of at most this deep a call stack, " - "any more and the recordings will be droppped.") + "DEPRECATED: use the environment variable XRAY_BASIC_OPTIONS and set " + "max_stack_depth instead.") XRAY_FLAG(int, xray_naive_log_thread_buffer_size, 1024, - "The number of entries to keep on a per-thread buffer.") + "DEPRECATED: use the environment variable XRAY_BASIC_OPTIONS and set " + "thread_buffer_size instead.") // FDR (Flight Data Recorder) Mode logging options. XRAY_FLAG(bool, xray_fdr_log, false, "DEPRECATED: Use xray_mode=xray-fdr instead.") XRAY_FLAG(int, xray_fdr_log_func_duration_threshold_us, 5, - "FDR logging will try to skip functions that execute for fewer " - "microseconds than this threshold.") + "DEPRECATED: use the environment variable XRAY_FDR_OPTIONS and set " + "func_duration_threshold_us instead.") XRAY_FLAG(int, xray_fdr_log_grace_period_us, 0, - "DEPRECATED: use xray_fdr_log_grace_period_ms instead.") + "DEPRECATED: use the environment variable XRAY_FDR_OPTIONS and set " + "grace_period_ms instead.") XRAY_FLAG(int, xray_fdr_log_grace_period_ms, 100, - "FDR logging will wait this much time in microseconds before " - "actually flushing the log; this gives a chance for threads to " - "notice that the log has been finalized and clean up.") + "DEPRECATED: use the environment variable XRAY_FDR_OPTIONS and set " + "grace_period_ms instead.") diff --git a/lib/xray/xray_function_call_trie.h b/lib/xray/xray_function_call_trie.h new file mode 100644 index 000000000000..2acf14aa5625 --- /dev/null +++ b/lib/xray/xray_function_call_trie.h @@ -0,0 +1,455 @@ +//===-- xray_function_call_trie.h ------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a dynamic runtime instrumentation system. +// +// This file defines the interface for a function call trie. +// +//===----------------------------------------------------------------------===// +#ifndef XRAY_FUNCTION_CALL_TRIE_H +#define XRAY_FUNCTION_CALL_TRIE_H + +#include "sanitizer_common/sanitizer_allocator_internal.h" +#include "xray_profiling_flags.h" +#include "xray_segmented_array.h" +#include <memory> // For placement new. +#include <utility> + +namespace __xray { + +/// A FunctionCallTrie represents the stack traces of XRay instrumented +/// functions that we've encountered, where a node corresponds to a function and +/// the path from the root to the node its stack trace. Each node in the trie +/// will contain some useful values, including: +/// +/// * The cumulative amount of time spent in this particular node/stack. +/// * The number of times this stack has appeared. +/// * A histogram of latencies for that particular node. +/// +/// Each node in the trie will also contain a list of callees, represented using +/// a Array<NodeIdPair> -- each NodeIdPair instance will contain the function +/// ID of the callee, and a pointer to the node. +/// +/// If we visualise this data structure, we'll find the following potential +/// representation: +/// +/// [function id node] -> [callees] [cumulative time] +/// [call counter] [latency histogram] +/// +/// As an example, when we have a function in this pseudocode: +/// +/// func f(N) { +/// g() +/// h() +/// for i := 1..N { j() } +/// } +/// +/// We may end up with a trie of the following form: +/// +/// f -> [ g, h, j ] [...] [1] [...] +/// g -> [ ... ] [...] [1] [...] +/// h -> [ ... ] [...] [1] [...] +/// j -> [ ... ] [...] [N] [...] +/// +/// If for instance the function g() called j() like so: +/// +/// func g() { +/// for i := 1..10 { j() } +/// } +/// +/// We'll find the following updated trie: +/// +/// f -> [ g, h, j ] [...] [1] [...] +/// g -> [ j' ] [...] [1] [...] +/// h -> [ ... ] [...] [1] [...] +/// j -> [ ... ] [...] [N] [...] +/// j' -> [ ... ] [...] [10] [...] +/// +/// Note that we'll have a new node representing the path `f -> g -> j'` with +/// isolated data. This isolation gives us a means of representing the stack +/// traces as a path, as opposed to a key in a table. The alternative +/// implementation here would be to use a separate table for the path, and use +/// hashes of the path as an identifier to accumulate the information. We've +/// moved away from this approach as it takes a lot of time to compute the hash +/// every time we need to update a function's call information as we're handling +/// the entry and exit events. +/// +/// This approach allows us to maintain a shadow stack, which represents the +/// currently executing path, and on function exits quickly compute the amount +/// of time elapsed from the entry, then update the counters for the node +/// already represented in the trie. This necessitates an efficient +/// representation of the various data structures (the list of callees must be +/// cache-aware and efficient to look up, and the histogram must be compact and +/// quick to update) to enable us to keep the overheads of this implementation +/// to the minimum. +class FunctionCallTrie { +public: + struct Node; + + // We use a NodeIdPair type instead of a std::pair<...> to not rely on the + // standard library types in this header. + struct NodeIdPair { + Node *NodePtr; + int32_t FId; + + // Constructor for inplace-construction. + NodeIdPair(Node *N, int32_t F) : NodePtr(N), FId(F) {} + }; + + using NodeIdPairArray = Array<NodeIdPair>; + using NodeIdPairAllocatorType = NodeIdPairArray::AllocatorType; + + // A Node in the FunctionCallTrie gives us a list of callees, the cumulative + // number of times this node actually appeared, the cumulative amount of time + // for this particular node including its children call times, and just the + // local time spent on this node. Each Node will have the ID of the XRay + // instrumented function that it is associated to. + struct Node { + Node *Parent; + NodeIdPairArray Callees; + int64_t CallCount; + int64_t CumulativeLocalTime; // Typically in TSC deltas, not wall-time. + int32_t FId; + + // We add a constructor here to allow us to inplace-construct through + // Array<...>'s AppendEmplace. + Node(Node *P, NodeIdPairAllocatorType &A, int64_t CC, int64_t CLT, + int32_t F) + : Parent(P), Callees(A), CallCount(CC), CumulativeLocalTime(CLT), + FId(F) {} + + // TODO: Include the compact histogram. + }; + +private: + struct ShadowStackEntry { + uint64_t EntryTSC; + Node *NodePtr; + + // We add a constructor here to allow us to inplace-construct through + // Array<...>'s AppendEmplace. + ShadowStackEntry(uint64_t T, Node *N) : EntryTSC{T}, NodePtr{N} {} + }; + + using NodeArray = Array<Node>; + using RootArray = Array<Node *>; + using ShadowStackArray = Array<ShadowStackEntry>; + +public: + // We collate the allocators we need into a single struct, as a convenience to + // allow us to initialize these as a group. + struct Allocators { + using NodeAllocatorType = NodeArray::AllocatorType; + using RootAllocatorType = RootArray::AllocatorType; + using ShadowStackAllocatorType = ShadowStackArray::AllocatorType; + + NodeAllocatorType *NodeAllocator = nullptr; + RootAllocatorType *RootAllocator = nullptr; + ShadowStackAllocatorType *ShadowStackAllocator = nullptr; + NodeIdPairAllocatorType *NodeIdPairAllocator = nullptr; + + Allocators() {} + Allocators(const Allocators &) = delete; + Allocators &operator=(const Allocators &) = delete; + + Allocators(Allocators &&O) + : NodeAllocator(O.NodeAllocator), RootAllocator(O.RootAllocator), + ShadowStackAllocator(O.ShadowStackAllocator), + NodeIdPairAllocator(O.NodeIdPairAllocator) { + O.NodeAllocator = nullptr; + O.RootAllocator = nullptr; + O.ShadowStackAllocator = nullptr; + O.NodeIdPairAllocator = nullptr; + } + + Allocators &operator=(Allocators &&O) { + { + auto Tmp = O.NodeAllocator; + O.NodeAllocator = this->NodeAllocator; + this->NodeAllocator = Tmp; + } + { + auto Tmp = O.RootAllocator; + O.RootAllocator = this->RootAllocator; + this->RootAllocator = Tmp; + } + { + auto Tmp = O.ShadowStackAllocator; + O.ShadowStackAllocator = this->ShadowStackAllocator; + this->ShadowStackAllocator = Tmp; + } + { + auto Tmp = O.NodeIdPairAllocator; + O.NodeIdPairAllocator = this->NodeIdPairAllocator; + this->NodeIdPairAllocator = Tmp; + } + return *this; + } + + ~Allocators() { + // Note that we cannot use delete on these pointers, as they need to be + // returned to the sanitizer_common library's internal memory tracking + // system. + if (NodeAllocator != nullptr) { + NodeAllocator->~NodeAllocatorType(); + InternalFree(NodeAllocator); + NodeAllocator = nullptr; + } + if (RootAllocator != nullptr) { + RootAllocator->~RootAllocatorType(); + InternalFree(RootAllocator); + RootAllocator = nullptr; + } + if (ShadowStackAllocator != nullptr) { + ShadowStackAllocator->~ShadowStackAllocatorType(); + InternalFree(ShadowStackAllocator); + ShadowStackAllocator = nullptr; + } + if (NodeIdPairAllocator != nullptr) { + NodeIdPairAllocator->~NodeIdPairAllocatorType(); + InternalFree(NodeIdPairAllocator); + NodeIdPairAllocator = nullptr; + } + } + }; + + // TODO: Support configuration of options through the arguments. + static Allocators InitAllocators() { + return InitAllocatorsCustom(profilingFlags()->per_thread_allocator_max); + } + + static Allocators InitAllocatorsCustom(uptr Max) { + Allocators A; + auto NodeAllocator = reinterpret_cast<Allocators::NodeAllocatorType *>( + InternalAlloc(sizeof(Allocators::NodeAllocatorType))); + new (NodeAllocator) Allocators::NodeAllocatorType(Max); + A.NodeAllocator = NodeAllocator; + + auto RootAllocator = reinterpret_cast<Allocators::RootAllocatorType *>( + InternalAlloc(sizeof(Allocators::RootAllocatorType))); + new (RootAllocator) Allocators::RootAllocatorType(Max); + A.RootAllocator = RootAllocator; + + auto ShadowStackAllocator = + reinterpret_cast<Allocators::ShadowStackAllocatorType *>( + InternalAlloc(sizeof(Allocators::ShadowStackAllocatorType))); + new (ShadowStackAllocator) Allocators::ShadowStackAllocatorType(Max); + A.ShadowStackAllocator = ShadowStackAllocator; + + auto NodeIdPairAllocator = reinterpret_cast<NodeIdPairAllocatorType *>( + InternalAlloc(sizeof(NodeIdPairAllocatorType))); + new (NodeIdPairAllocator) NodeIdPairAllocatorType(Max); + A.NodeIdPairAllocator = NodeIdPairAllocator; + return A; + } + +private: + NodeArray Nodes; + RootArray Roots; + ShadowStackArray ShadowStack; + NodeIdPairAllocatorType *NodeIdPairAllocator = nullptr; + +public: + explicit FunctionCallTrie(const Allocators &A) + : Nodes(*A.NodeAllocator), Roots(*A.RootAllocator), + ShadowStack(*A.ShadowStackAllocator), + NodeIdPairAllocator(A.NodeIdPairAllocator) {} + + void enterFunction(const int32_t FId, uint64_t TSC) { + DCHECK_NE(FId, 0); + // This function primarily deals with ensuring that the ShadowStack is + // consistent and ready for when an exit event is encountered. + if (UNLIKELY(ShadowStack.empty())) { + auto NewRoot = + Nodes.AppendEmplace(nullptr, *NodeIdPairAllocator, 0, 0, FId); + if (UNLIKELY(NewRoot == nullptr)) + return; + Roots.Append(NewRoot); + ShadowStack.AppendEmplace(TSC, NewRoot); + return; + } + + auto &Top = ShadowStack.back(); + auto TopNode = Top.NodePtr; + DCHECK_NE(TopNode, nullptr); + + // If we've seen this callee before, then we just access that node and place + // that on the top of the stack. + auto Callee = TopNode->Callees.find_element( + [FId](const NodeIdPair &NR) { return NR.FId == FId; }); + if (Callee != nullptr) { + CHECK_NE(Callee->NodePtr, nullptr); + ShadowStack.AppendEmplace(TSC, Callee->NodePtr); + return; + } + + // This means we've never seen this stack before, create a new node here. + auto NewNode = + Nodes.AppendEmplace(TopNode, *NodeIdPairAllocator, 0, 0, FId); + if (UNLIKELY(NewNode == nullptr)) + return; + DCHECK_NE(NewNode, nullptr); + TopNode->Callees.AppendEmplace(NewNode, FId); + ShadowStack.AppendEmplace(TSC, NewNode); + DCHECK_NE(ShadowStack.back().NodePtr, nullptr); + return; + } + + void exitFunction(int32_t FId, uint64_t TSC) { + // When we exit a function, we look up the ShadowStack to see whether we've + // entered this function before. We do as little processing here as we can, + // since most of the hard work would have already been done at function + // entry. + uint64_t CumulativeTreeTime = 0; + while (!ShadowStack.empty()) { + const auto &Top = ShadowStack.back(); + auto TopNode = Top.NodePtr; + DCHECK_NE(TopNode, nullptr); + auto LocalTime = TSC - Top.EntryTSC; + TopNode->CallCount++; + TopNode->CumulativeLocalTime += LocalTime - CumulativeTreeTime; + CumulativeTreeTime += LocalTime; + ShadowStack.trim(1); + + // TODO: Update the histogram for the node. + if (TopNode->FId == FId) + break; + } + } + + const RootArray &getRoots() const { return Roots; } + + // The deepCopyInto operation will update the provided FunctionCallTrie by + // re-creating the contents of this particular FunctionCallTrie in the other + // FunctionCallTrie. It will do this using a Depth First Traversal from the + // roots, and while doing so recreating the traversal in the provided + // FunctionCallTrie. + // + // This operation will *not* destroy the state in `O`, and thus may cause some + // duplicate entries in `O` if it is not empty. + // + // This function is *not* thread-safe, and may require external + // synchronisation of both "this" and |O|. + // + // This function must *not* be called with a non-empty FunctionCallTrie |O|. + void deepCopyInto(FunctionCallTrie &O) const { + DCHECK(O.getRoots().empty()); + + // We then push the root into a stack, to use as the parent marker for new + // nodes we push in as we're traversing depth-first down the call tree. + struct NodeAndParent { + FunctionCallTrie::Node *Node; + FunctionCallTrie::Node *NewNode; + }; + using Stack = Array<NodeAndParent>; + + typename Stack::AllocatorType StackAllocator( + profilingFlags()->stack_allocator_max); + Stack DFSStack(StackAllocator); + + for (const auto Root : getRoots()) { + // Add a node in O for this root. + auto NewRoot = O.Nodes.AppendEmplace( + nullptr, *O.NodeIdPairAllocator, Root->CallCount, + Root->CumulativeLocalTime, Root->FId); + + // Because we cannot allocate more memory we should bail out right away. + if (UNLIKELY(NewRoot == nullptr)) + return; + + O.Roots.Append(NewRoot); + + // TODO: Figure out what to do if we fail to allocate any more stack + // space. Maybe warn or report once? + DFSStack.AppendEmplace(Root, NewRoot); + while (!DFSStack.empty()) { + NodeAndParent NP = DFSStack.back(); + DCHECK_NE(NP.Node, nullptr); + DCHECK_NE(NP.NewNode, nullptr); + DFSStack.trim(1); + for (const auto Callee : NP.Node->Callees) { + auto NewNode = O.Nodes.AppendEmplace( + NP.NewNode, *O.NodeIdPairAllocator, Callee.NodePtr->CallCount, + Callee.NodePtr->CumulativeLocalTime, Callee.FId); + if (UNLIKELY(NewNode == nullptr)) + return; + NP.NewNode->Callees.AppendEmplace(NewNode, Callee.FId); + DFSStack.AppendEmplace(Callee.NodePtr, NewNode); + } + } + } + } + + // The mergeInto operation will update the provided FunctionCallTrie by + // traversing the current trie's roots and updating (i.e. merging) the data in + // the nodes with the data in the target's nodes. If the node doesn't exist in + // the provided trie, we add a new one in the right position, and inherit the + // data from the original (current) trie, along with all its callees. + // + // This function is *not* thread-safe, and may require external + // synchronisation of both "this" and |O|. + void mergeInto(FunctionCallTrie &O) const { + struct NodeAndTarget { + FunctionCallTrie::Node *OrigNode; + FunctionCallTrie::Node *TargetNode; + }; + using Stack = Array<NodeAndTarget>; + typename Stack::AllocatorType StackAllocator( + profilingFlags()->stack_allocator_max); + Stack DFSStack(StackAllocator); + + for (const auto Root : getRoots()) { + Node *TargetRoot = nullptr; + auto R = O.Roots.find_element( + [&](const Node *Node) { return Node->FId == Root->FId; }); + if (R == nullptr) { + TargetRoot = O.Nodes.AppendEmplace(nullptr, *O.NodeIdPairAllocator, 0, + 0, Root->FId); + if (UNLIKELY(TargetRoot == nullptr)) + return; + + O.Roots.Append(TargetRoot); + } else { + TargetRoot = *R; + } + + DFSStack.Append(NodeAndTarget{Root, TargetRoot}); + while (!DFSStack.empty()) { + NodeAndTarget NT = DFSStack.back(); + DCHECK_NE(NT.OrigNode, nullptr); + DCHECK_NE(NT.TargetNode, nullptr); + DFSStack.trim(1); + // TODO: Update the histogram as well when we have it ready. + NT.TargetNode->CallCount += NT.OrigNode->CallCount; + NT.TargetNode->CumulativeLocalTime += NT.OrigNode->CumulativeLocalTime; + for (const auto Callee : NT.OrigNode->Callees) { + auto TargetCallee = NT.TargetNode->Callees.find_element( + [&](const FunctionCallTrie::NodeIdPair &C) { + return C.FId == Callee.FId; + }); + if (TargetCallee == nullptr) { + auto NewTargetNode = O.Nodes.AppendEmplace( + NT.TargetNode, *O.NodeIdPairAllocator, 0, 0, Callee.FId); + + if (UNLIKELY(NewTargetNode == nullptr)) + return; + + TargetCallee = + NT.TargetNode->Callees.AppendEmplace(NewTargetNode, Callee.FId); + } + DFSStack.AppendEmplace(Callee.NodePtr, TargetCallee->NodePtr); + } + } + } + } +}; + +} // namespace __xray + +#endif // XRAY_FUNCTION_CALL_TRIE_H diff --git a/lib/xray/xray_init.cc b/lib/xray/xray_init.cc index 11892cb8b7a3..b4e069795195 100644 --- a/lib/xray/xray_init.cc +++ b/lib/xray/xray_init.cc @@ -38,32 +38,29 @@ using namespace __xray; // // FIXME: Support DSO instrumentation maps too. The current solution only works // for statically linked executables. -__sanitizer::atomic_uint8_t XRayInitialized{0}; +atomic_uint8_t XRayInitialized{0}; // This should always be updated before XRayInitialized is updated. -__sanitizer::SpinMutex XRayInstrMapMutex; +SpinMutex XRayInstrMapMutex; XRaySledMap XRayInstrMap; // Global flag to determine whether the flags have been initialized. -__sanitizer::atomic_uint8_t XRayFlagsInitialized{0}; +atomic_uint8_t XRayFlagsInitialized{0}; // A mutex to allow only one thread to initialize the XRay data structures. -__sanitizer::SpinMutex XRayInitMutex; +SpinMutex XRayInitMutex; // __xray_init() will do the actual loading of the current process' memory map // and then proceed to look for the .xray_instr_map section/segment. void __xray_init() XRAY_NEVER_INSTRUMENT { - __sanitizer::SpinMutexLock Guard(&XRayInitMutex); + SpinMutexLock Guard(&XRayInitMutex); // Short-circuit if we've already initialized XRay before. - if (__sanitizer::atomic_load(&XRayInitialized, - __sanitizer::memory_order_acquire)) + if (atomic_load(&XRayInitialized, memory_order_acquire)) return; - if (!__sanitizer::atomic_load(&XRayFlagsInitialized, - __sanitizer::memory_order_acquire)) { + if (!atomic_load(&XRayFlagsInitialized, memory_order_acquire)) { initializeFlags(); - __sanitizer::atomic_store(&XRayFlagsInitialized, true, - __sanitizer::memory_order_release); + atomic_store(&XRayFlagsInitialized, true, memory_order_release); } if (__start_xray_instr_map == nullptr) { @@ -73,14 +70,13 @@ void __xray_init() XRAY_NEVER_INSTRUMENT { } { - __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex); + SpinMutexLock Guard(&XRayInstrMapMutex); XRayInstrMap.Sleds = __start_xray_instr_map; XRayInstrMap.Entries = __stop_xray_instr_map - __start_xray_instr_map; XRayInstrMap.SledsIndex = __start_xray_fn_idx; XRayInstrMap.Functions = __stop_xray_fn_idx - __start_xray_fn_idx; } - __sanitizer::atomic_store(&XRayInitialized, true, - __sanitizer::memory_order_release); + atomic_store(&XRayInitialized, true, memory_order_release); #ifndef XRAY_NO_PREINIT if (flags()->patch_premain) @@ -88,7 +84,13 @@ void __xray_init() XRAY_NEVER_INSTRUMENT { #endif } -#if !defined(XRAY_NO_PREINIT) && SANITIZER_CAN_USE_PREINIT_ARRAY +// FIXME: Make check-xray tests work on FreeBSD without +// SANITIZER_CAN_USE_PREINIT_ARRAY. +// See sanitizer_internal_defs.h where the macro is defined. +// Calling unresolved PLT functions in .preinit_array can lead to deadlock on +// FreeBSD but here it seems benign. +#if !defined(XRAY_NO_PREINIT) && \ + (SANITIZER_CAN_USE_PREINIT_ARRAY || SANITIZER_FREEBSD) // Only add the preinit array initialization if the sanitizers can. __attribute__((section(".preinit_array"), used)) void (*__local_xray_preinit)(void) = __xray_init; diff --git a/lib/xray/xray_interface.cc b/lib/xray/xray_interface.cc index 766313e85c58..01bf6ddc607e 100644 --- a/lib/xray/xray_interface.cc +++ b/lib/xray/xray_interface.cc @@ -19,9 +19,12 @@ #include <cstdio> #include <errno.h> #include <limits> +#include <string.h> #include <sys/mman.h> +#include "sanitizer_common/sanitizer_addrhashmap.h" #include "sanitizer_common/sanitizer_common.h" + #include "xray_defs.h" #include "xray_flags.h" @@ -48,26 +51,40 @@ static const int16_t cSledLength = 8; #endif /* CPU architecture */ // This is the function to call when we encounter the entry or exit sleds. -__sanitizer::atomic_uintptr_t XRayPatchedFunction{0}; +atomic_uintptr_t XRayPatchedFunction{0}; // This is the function to call from the arg1-enabled sleds/trampolines. -__sanitizer::atomic_uintptr_t XRayArgLogger{0}; +atomic_uintptr_t XRayArgLogger{0}; // This is the function to call when we encounter a custom event log call. -__sanitizer::atomic_uintptr_t XRayPatchedCustomEvent{0}; +atomic_uintptr_t XRayPatchedCustomEvent{0}; + +// This is the function to call when we encounter a typed event log call. +atomic_uintptr_t XRayPatchedTypedEvent{0}; // This is the global status to determine whether we are currently // patching/unpatching. -__sanitizer::atomic_uint8_t XRayPatching{0}; +atomic_uint8_t XRayPatching{0}; + +struct TypeDescription { + uint32_t type_id; + std::size_t description_string_length; +}; + +using TypeDescriptorMapType = AddrHashMap<TypeDescription, 11>; +// An address map from immutable descriptors to type ids. +TypeDescriptorMapType TypeDescriptorAddressMap{}; -// MProtectHelper is an RAII wrapper for calls to mprotect(...) that will undo -// any successful mprotect(...) changes. This is used to make a page writeable -// and executable, and upon destruction if it was successful in doing so returns -// the page into a read-only and executable page. +atomic_uint32_t TypeEventDescriptorCounter{0}; + +// MProtectHelper is an RAII wrapper for calls to mprotect(...) that will +// undo any successful mprotect(...) changes. This is used to make a page +// writeable and executable, and upon destruction if it was successful in +// doing so returns the page into a read-only and executable page. // // This is only used specifically for runtime-patching of the XRay -// instrumentation points. This assumes that the executable pages are originally -// read-and-execute only. +// instrumentation points. This assumes that the executable pages are +// originally read-and-execute only. class MProtectHelper { void *PageAlignedAddr; std::size_t MProtectLen; @@ -116,6 +133,9 @@ bool patchSled(const XRaySledEntry &Sled, bool Enable, case XRayEntryType::CUSTOM_EVENT: Success = patchCustomEvent(Enable, FuncId, Sled); break; + case XRayEntryType::TYPED_EVENT: + Success = patchTypedEvent(Enable, FuncId, Sled); + break; default: Report("Unsupported sled kind '%d' @%04x\n", Sled.Address, int(Sled.Kind)); return false; @@ -125,19 +145,19 @@ bool patchSled(const XRaySledEntry &Sled, bool Enable, XRayPatchingStatus patchFunction(int32_t FuncId, bool Enable) XRAY_NEVER_INSTRUMENT { - if (!__sanitizer::atomic_load(&XRayInitialized, - __sanitizer::memory_order_acquire)) + if (!atomic_load(&XRayInitialized, + memory_order_acquire)) return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized. uint8_t NotPatching = false; - if (!__sanitizer::atomic_compare_exchange_strong( - &XRayPatching, &NotPatching, true, __sanitizer::memory_order_acq_rel)) + if (!atomic_compare_exchange_strong( + &XRayPatching, &NotPatching, true, memory_order_acq_rel)) return XRayPatchingStatus::ONGOING; // Already patching. // Next, we look for the function index. XRaySledMap InstrMap; { - __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex); + SpinMutexLock Guard(&XRayInstrMapMutex); InstrMap = XRayInstrMap; } @@ -161,8 +181,8 @@ XRayPatchingStatus patchFunction(int32_t FuncId, while (f != e) SucceedOnce |= patchSled(*f++, Enable, FuncId); - __sanitizer::atomic_store(&XRayPatching, false, - __sanitizer::memory_order_release); + atomic_store(&XRayPatching, false, + memory_order_release); if (!SucceedOnce) { Report("Failed patching any sled for function '%d'.", FuncId); @@ -176,26 +196,26 @@ XRayPatchingStatus patchFunction(int32_t FuncId, // implementation. |Enable| defines whether we're enabling or disabling the // runtime XRay instrumentation. XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT { - if (!__sanitizer::atomic_load(&XRayInitialized, - __sanitizer::memory_order_acquire)) + if (!atomic_load(&XRayInitialized, + memory_order_acquire)) return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized. uint8_t NotPatching = false; - if (!__sanitizer::atomic_compare_exchange_strong( - &XRayPatching, &NotPatching, true, __sanitizer::memory_order_acq_rel)) + if (!atomic_compare_exchange_strong( + &XRayPatching, &NotPatching, true, memory_order_acq_rel)) return XRayPatchingStatus::ONGOING; // Already patching. uint8_t PatchingSuccess = false; auto XRayPatchingStatusResetter = - __sanitizer::at_scope_exit([&PatchingSuccess] { + at_scope_exit([&PatchingSuccess] { if (!PatchingSuccess) - __sanitizer::atomic_store(&XRayPatching, false, - __sanitizer::memory_order_release); + atomic_store(&XRayPatching, false, + memory_order_release); }); XRaySledMap InstrMap; { - __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex); + SpinMutexLock Guard(&XRayInstrMapMutex); InstrMap = XRayInstrMap; } if (InstrMap.Entries == 0) @@ -251,8 +271,8 @@ XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT { } patchSled(Sled, Enable, FuncId); } - __sanitizer::atomic_store(&XRayPatching, false, - __sanitizer::memory_order_release); + atomic_store(&XRayPatching, false, + memory_order_release); PatchingSuccess = true; return XRayPatchingStatus::SUCCESS; } @@ -261,7 +281,7 @@ XRayPatchingStatus mprotectAndPatchFunction(int32_t FuncId, bool Enable) XRAY_NEVER_INSTRUMENT { XRaySledMap InstrMap; { - __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex); + SpinMutexLock Guard(&XRayInstrMapMutex); InstrMap = XRayInstrMap; } @@ -318,12 +338,12 @@ using namespace __xray; int __xray_set_handler(void (*entry)(int32_t, XRayEntryType)) XRAY_NEVER_INSTRUMENT { - if (__sanitizer::atomic_load(&XRayInitialized, - __sanitizer::memory_order_acquire)) { + if (atomic_load(&XRayInitialized, + memory_order_acquire)) { - __sanitizer::atomic_store(&__xray::XRayPatchedFunction, + atomic_store(&__xray::XRayPatchedFunction, reinterpret_cast<uintptr_t>(entry), - __sanitizer::memory_order_release); + memory_order_release); return 1; } return 0; @@ -331,11 +351,23 @@ int __xray_set_handler(void (*entry)(int32_t, int __xray_set_customevent_handler(void (*entry)(void *, size_t)) XRAY_NEVER_INSTRUMENT { - if (__sanitizer::atomic_load(&XRayInitialized, - __sanitizer::memory_order_acquire)) { - __sanitizer::atomic_store(&__xray::XRayPatchedCustomEvent, + if (atomic_load(&XRayInitialized, + memory_order_acquire)) { + atomic_store(&__xray::XRayPatchedCustomEvent, + reinterpret_cast<uintptr_t>(entry), + memory_order_release); + return 1; + } + return 0; +} + +int __xray_set_typedevent_handler(void (*entry)( + uint16_t, const void *, size_t)) XRAY_NEVER_INSTRUMENT { + if (atomic_load(&XRayInitialized, + memory_order_acquire)) { + atomic_store(&__xray::XRayPatchedTypedEvent, reinterpret_cast<uintptr_t>(entry), - __sanitizer::memory_order_release); + memory_order_release); return 1; } return 0; @@ -349,6 +381,21 @@ int __xray_remove_customevent_handler() XRAY_NEVER_INSTRUMENT { return __xray_set_customevent_handler(nullptr); } +int __xray_remove_typedevent_handler() XRAY_NEVER_INSTRUMENT { + return __xray_set_typedevent_handler(nullptr); +} + +uint16_t __xray_register_event_type( + const char *const event_type) XRAY_NEVER_INSTRUMENT { + TypeDescriptorMapType::Handle h(&TypeDescriptorAddressMap, (uptr)event_type); + if (h.created()) { + h->type_id = atomic_fetch_add( + &TypeEventDescriptorCounter, 1, memory_order_acq_rel); + h->description_string_length = strnlen(event_type, 1024); + } + return h->type_id; +} + XRayPatchingStatus __xray_patch() XRAY_NEVER_INSTRUMENT { return controlPatching(true); } @@ -367,22 +414,22 @@ __xray_unpatch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT { } int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, uint64_t)) { - if (!__sanitizer::atomic_load(&XRayInitialized, - __sanitizer::memory_order_acquire)) + if (!atomic_load(&XRayInitialized, + memory_order_acquire)) return 0; // A relaxed write might not be visible even if the current thread gets // scheduled on a different CPU/NUMA node. We need to wait for everyone to // have this handler installed for consistency of collected data across CPUs. - __sanitizer::atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry), - __sanitizer::memory_order_release); + atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry), + memory_order_release); return 1; } int __xray_remove_handler_arg1() { return __xray_set_handler_arg1(nullptr); } uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT { - __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex); + SpinMutexLock Guard(&XRayInstrMapMutex); if (FuncId <= 0 || static_cast<size_t>(FuncId) > XRayInstrMap.Functions) return 0; return XRayInstrMap.SledsIndex[FuncId - 1].Begin->Function @@ -396,6 +443,6 @@ uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT { } size_t __xray_max_function_id() XRAY_NEVER_INSTRUMENT { - __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex); + SpinMutexLock Guard(&XRayInstrMapMutex); return XRayInstrMap.Functions; } diff --git a/lib/xray/xray_interface_internal.h b/lib/xray/xray_interface_internal.h index 5811e2b7300a..8ca87457437e 100644 --- a/lib/xray/xray_interface_internal.h +++ b/lib/xray/xray_interface_internal.h @@ -43,8 +43,8 @@ struct XRaySledEntry { }; struct XRayFunctionSledIndex { - const XRaySledEntry* Begin; - const XRaySledEntry* End; + const XRaySledEntry *Begin; + const XRaySledEntry *End; }; } @@ -57,12 +57,13 @@ struct XRaySledMap { size_t Functions; }; -bool patchFunctionEntry(bool Enable, uint32_t FuncId, - const XRaySledEntry &Sled, void (*Trampoline)()); +bool patchFunctionEntry(bool Enable, uint32_t FuncId, const XRaySledEntry &Sled, + void (*Trampoline)()); bool patchFunctionExit(bool Enable, uint32_t FuncId, const XRaySledEntry &Sled); bool patchFunctionTailExit(bool Enable, uint32_t FuncId, const XRaySledEntry &Sled); bool patchCustomEvent(bool Enable, uint32_t FuncId, const XRaySledEntry &Sled); +bool patchTypedEvent(bool Enable, uint32_t FuncId, const XRaySledEntry &Sled); } // namespace __xray @@ -74,6 +75,7 @@ extern void __xray_FunctionExit(); extern void __xray_FunctionTailExit(); extern void __xray_ArgLoggerEntry(); extern void __xray_CustomEvent(); +extern void __xray_TypedEvent(); } #endif diff --git a/lib/xray/xray_log_interface.cc b/lib/xray/xray_log_interface.cc index 783f004d292a..0886fd0d1210 100644 --- a/lib/xray/xray_log_interface.cc +++ b/lib/xray/xray_log_interface.cc @@ -18,9 +18,20 @@ #include "xray/xray_interface.h" #include "xray_defs.h" -__sanitizer::SpinMutex XRayImplMutex; -XRayLogImpl CurrentXRayImpl{nullptr, nullptr, nullptr, nullptr}; -XRayLogImpl *GlobalXRayImpl = nullptr; +namespace __xray { +static SpinMutex XRayImplMutex; +static XRayLogImpl CurrentXRayImpl{nullptr, nullptr, nullptr, nullptr}; +static XRayLogImpl *GlobalXRayImpl = nullptr; + +// This is the default implementation of a buffer iterator, which always yields +// a null buffer. +XRayBuffer NullBufferIterator(XRayBuffer) XRAY_NEVER_INSTRUMENT { + return {nullptr, 0}; +} + +// This is the global function responsible for iterating through given buffers. +atomic_uintptr_t XRayBufferIterator{ + reinterpret_cast<uintptr_t>(&NullBufferIterator)}; // We use a linked list of Mode to XRayLogImpl mappings. This is a linked list // when it should be a map because we're avoiding having to depend on C++ @@ -31,9 +42,24 @@ struct ModeImpl { XRayLogImpl Impl; }; -ModeImpl SentinelModeImpl{ +static ModeImpl SentinelModeImpl{ nullptr, nullptr, {nullptr, nullptr, nullptr, nullptr}}; -ModeImpl *ModeImpls = &SentinelModeImpl; +static ModeImpl *ModeImpls = &SentinelModeImpl; +static const ModeImpl *CurrentMode = nullptr; + +} // namespace __xray + +using namespace __xray; + +void __xray_log_set_buffer_iterator(XRayBuffer (*Iterator)(XRayBuffer)) + XRAY_NEVER_INSTRUMENT { + atomic_store(&__xray::XRayBufferIterator, + reinterpret_cast<uintptr_t>(Iterator), memory_order_release); +} + +void __xray_log_remove_buffer_iterator() XRAY_NEVER_INSTRUMENT { + __xray_log_set_buffer_iterator(&NullBufferIterator); +} XRayLogRegisterStatus __xray_log_register_mode(const char *Mode, @@ -42,16 +68,15 @@ __xray_log_register_mode(const char *Mode, Impl.log_finalize == nullptr || Impl.log_init == nullptr) return XRayLogRegisterStatus::XRAY_INCOMPLETE_IMPL; - __sanitizer::SpinMutexLock Guard(&XRayImplMutex); + SpinMutexLock Guard(&XRayImplMutex); // First, look for whether the mode already has a registered implementation. for (ModeImpl *it = ModeImpls; it != &SentinelModeImpl; it = it->Next) { - if (!__sanitizer::internal_strcmp(Mode, it->Mode)) + if (!internal_strcmp(Mode, it->Mode)) return XRayLogRegisterStatus::XRAY_DUPLICATE_MODE; } - auto *NewModeImpl = - static_cast<ModeImpl *>(__sanitizer::InternalAlloc(sizeof(ModeImpl))); + auto *NewModeImpl = static_cast<ModeImpl *>(InternalAlloc(sizeof(ModeImpl))); NewModeImpl->Next = ModeImpls; - NewModeImpl->Mode = __sanitizer::internal_strdup(Mode); + NewModeImpl->Mode = internal_strdup(Mode); NewModeImpl->Impl = Impl; ModeImpls = NewModeImpl; return XRayLogRegisterStatus::XRAY_REGISTRATION_OK; @@ -59,9 +84,10 @@ __xray_log_register_mode(const char *Mode, XRayLogRegisterStatus __xray_log_select_mode(const char *Mode) XRAY_NEVER_INSTRUMENT { - __sanitizer::SpinMutexLock Guard(&XRayImplMutex); + SpinMutexLock Guard(&XRayImplMutex); for (ModeImpl *it = ModeImpls; it != &SentinelModeImpl; it = it->Next) { - if (!__sanitizer::internal_strcmp(Mode, it->Mode)) { + if (!internal_strcmp(Mode, it->Mode)) { + CurrentMode = it; CurrentXRayImpl = it->Impl; GlobalXRayImpl = &CurrentXRayImpl; __xray_set_handler(it->Impl.handle_arg0); @@ -71,24 +97,32 @@ __xray_log_select_mode(const char *Mode) XRAY_NEVER_INSTRUMENT { return XRayLogRegisterStatus::XRAY_MODE_NOT_FOUND; } +const char *__xray_log_get_current_mode() XRAY_NEVER_INSTRUMENT { + SpinMutexLock Guard(&XRayImplMutex); + if (CurrentMode != nullptr) + return CurrentMode->Mode; + return nullptr; +} + void __xray_set_log_impl(XRayLogImpl Impl) XRAY_NEVER_INSTRUMENT { if (Impl.log_init == nullptr || Impl.log_finalize == nullptr || Impl.handle_arg0 == nullptr || Impl.flush_log == nullptr) { - __sanitizer::SpinMutexLock Guard(&XRayImplMutex); + SpinMutexLock Guard(&XRayImplMutex); GlobalXRayImpl = nullptr; + CurrentMode = nullptr; __xray_remove_handler(); __xray_remove_handler_arg1(); return; } - __sanitizer::SpinMutexLock Guard(&XRayImplMutex); + SpinMutexLock Guard(&XRayImplMutex); CurrentXRayImpl = Impl; GlobalXRayImpl = &CurrentXRayImpl; __xray_set_handler(Impl.handle_arg0); } void __xray_remove_log_impl() XRAY_NEVER_INSTRUMENT { - __sanitizer::SpinMutexLock Guard(&XRayImplMutex); + SpinMutexLock Guard(&XRayImplMutex); GlobalXRayImpl = nullptr; __xray_remove_handler(); __xray_remove_handler_arg1(); @@ -97,22 +131,80 @@ void __xray_remove_log_impl() XRAY_NEVER_INSTRUMENT { XRayLogInitStatus __xray_log_init(size_t BufferSize, size_t MaxBuffers, void *Args, size_t ArgsSize) XRAY_NEVER_INSTRUMENT { - __sanitizer::SpinMutexLock Guard(&XRayImplMutex); + SpinMutexLock Guard(&XRayImplMutex); if (!GlobalXRayImpl) return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; return GlobalXRayImpl->log_init(BufferSize, MaxBuffers, Args, ArgsSize); } +XRayLogInitStatus __xray_log_init_mode(const char *Mode, const char *Config) + XRAY_NEVER_INSTRUMENT { + SpinMutexLock Guard(&XRayImplMutex); + if (!GlobalXRayImpl) + return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; + + if (Config == nullptr) + return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; + + // Check first whether the current mode is the same as what we expect. + if (CurrentMode == nullptr || internal_strcmp(CurrentMode->Mode, Mode) != 0) + return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; + + // Here we do some work to coerce the pointer we're provided, so that + // the implementations that still take void* pointers can handle the + // data provided in the Config argument. + return GlobalXRayImpl->log_init( + 0, 0, const_cast<void *>(static_cast<const void *>(Config)), 0); +} + +XRayLogInitStatus +__xray_log_init_mode_bin(const char *Mode, const char *Config, + size_t ConfigSize) XRAY_NEVER_INSTRUMENT { + SpinMutexLock Guard(&XRayImplMutex); + if (!GlobalXRayImpl) + return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; + + if (Config == nullptr) + return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; + + // Check first whether the current mode is the same as what we expect. + if (CurrentMode == nullptr || internal_strcmp(CurrentMode->Mode, Mode) != 0) + return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; + + // Here we do some work to coerce the pointer we're provided, so that + // the implementations that still take void* pointers can handle the + // data provided in the Config argument. + return GlobalXRayImpl->log_init( + 0, 0, const_cast<void *>(static_cast<const void *>(Config)), ConfigSize); +} + XRayLogInitStatus __xray_log_finalize() XRAY_NEVER_INSTRUMENT { - __sanitizer::SpinMutexLock Guard(&XRayImplMutex); + SpinMutexLock Guard(&XRayImplMutex); if (!GlobalXRayImpl) return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; return GlobalXRayImpl->log_finalize(); } XRayLogFlushStatus __xray_log_flushLog() XRAY_NEVER_INSTRUMENT { - __sanitizer::SpinMutexLock Guard(&XRayImplMutex); + SpinMutexLock Guard(&XRayImplMutex); if (!GlobalXRayImpl) return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING; return GlobalXRayImpl->flush_log(); } + +XRayLogFlushStatus __xray_log_process_buffers( + void (*Processor)(const char *, XRayBuffer)) XRAY_NEVER_INSTRUMENT { + // We want to make sure that there will be no changes to the global state for + // the log by synchronising on the XRayBufferIteratorMutex. + if (!GlobalXRayImpl) + return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING; + auto Iterator = reinterpret_cast<XRayBuffer (*)(XRayBuffer)>( + atomic_load(&XRayBufferIterator, memory_order_acquire)); + auto Buffer = (*Iterator)(XRayBuffer{nullptr, 0}); + auto Mode = CurrentMode ? CurrentMode->Mode : nullptr; + while (Buffer.Data != nullptr) { + (*Processor)(Mode, Buffer); + Buffer = (*Iterator)(Buffer); + } + return XRayLogFlushStatus::XRAY_LOG_FLUSHED; +} diff --git a/lib/xray/xray_mips.cc b/lib/xray/xray_mips.cc index cd863304db29..6f8243828668 100644 --- a/lib/xray/xray_mips.cc +++ b/lib/xray/xray_mips.cc @@ -158,6 +158,12 @@ bool patchCustomEvent(const bool Enable, const uint32_t FuncId, return false; } +bool patchTypedEvent(const bool Enable, const uint32_t FuncId, + const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { + // FIXME: Implement in mips? + return false; +} + } // namespace __xray extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT { diff --git a/lib/xray/xray_mips64.cc b/lib/xray/xray_mips64.cc index fa8fdd5abccc..f1bdf1d7d22d 100644 --- a/lib/xray/xray_mips64.cc +++ b/lib/xray/xray_mips64.cc @@ -166,6 +166,12 @@ bool patchCustomEvent(const bool Enable, const uint32_t FuncId, // FIXME: Implement in mips64? return false; } + +bool patchTypedEvent(const bool Enable, const uint32_t FuncId, + const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { + // FIXME: Implement in mips64? + return false; +} } // namespace __xray extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT { diff --git a/lib/xray/xray_powerpc64.cc b/lib/xray/xray_powerpc64.cc index ab03cb10042f..5e4938361c0c 100644 --- a/lib/xray/xray_powerpc64.cc +++ b/lib/xray/xray_powerpc64.cc @@ -99,6 +99,12 @@ bool patchCustomEvent(const bool Enable, const uint32_t FuncId, return false; } +bool patchTypedEvent(const bool Enable, const uint32_t FuncId, + const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { + // FIXME: Implement in powerpc64? + return false; +} + } // namespace __xray extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT { diff --git a/lib/xray/xray_profile_collector.cc b/lib/xray/xray_profile_collector.cc new file mode 100644 index 000000000000..a43744d9a0cb --- /dev/null +++ b/lib/xray/xray_profile_collector.cc @@ -0,0 +1,318 @@ +//===-- xray_profile_collector.cc ------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a dynamic runtime instrumentation system. +// +// This implements the interface for the profileCollectorService. +// +//===----------------------------------------------------------------------===// +#include "xray_profile_collector.h" +#include "sanitizer_common/sanitizer_allocator_internal.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_vector.h" +#include "xray_profiling_flags.h" +#include <memory> +#include <pthread.h> +#include <utility> + +namespace __xray { +namespace profileCollectorService { + +namespace { + +SpinMutex GlobalMutex; +struct ThreadTrie { + tid_t TId; + FunctionCallTrie *Trie; +}; + +struct ProfileBuffer { + void *Data; + size_t Size; +}; + +struct BlockHeader { + u32 BlockSize; + u32 BlockNum; + u64 ThreadId; +}; + +// These need to be pointers that point to heap/internal-allocator-allocated +// objects because these are accessed even at program exit. +Vector<ThreadTrie> *ThreadTries = nullptr; +Vector<ProfileBuffer> *ProfileBuffers = nullptr; +FunctionCallTrie::Allocators *GlobalAllocators = nullptr; + +} // namespace + +void post(const FunctionCallTrie &T, tid_t TId) { + static pthread_once_t Once = PTHREAD_ONCE_INIT; + pthread_once(&Once, +[] { + SpinMutexLock Lock(&GlobalMutex); + GlobalAllocators = reinterpret_cast<FunctionCallTrie::Allocators *>( + InternalAlloc(sizeof(FunctionCallTrie::Allocators))); + new (GlobalAllocators) FunctionCallTrie::Allocators(); + *GlobalAllocators = FunctionCallTrie::InitAllocatorsCustom( + profilingFlags()->global_allocator_max); + ThreadTries = reinterpret_cast<Vector<ThreadTrie> *>( + InternalAlloc(sizeof(Vector<ThreadTrie>))); + new (ThreadTries) Vector<ThreadTrie>(); + ProfileBuffers = reinterpret_cast<Vector<ProfileBuffer> *>( + InternalAlloc(sizeof(Vector<ProfileBuffer>))); + new (ProfileBuffers) Vector<ProfileBuffer>(); + }); + DCHECK_NE(GlobalAllocators, nullptr); + DCHECK_NE(ThreadTries, nullptr); + DCHECK_NE(ProfileBuffers, nullptr); + + ThreadTrie *Item = nullptr; + { + SpinMutexLock Lock(&GlobalMutex); + if (GlobalAllocators == nullptr) + return; + + Item = ThreadTries->PushBack(); + Item->TId = TId; + + // Here we're using the internal allocator instead of the managed allocator + // because: + // + // 1) We're not using the segmented array data structure to host + // FunctionCallTrie objects. We're using a Vector (from sanitizer_common) + // which works like a std::vector<...> keeping elements contiguous in + // memory. The segmented array data structure assumes that elements are + // trivially destructible, where FunctionCallTrie isn't. + // + // 2) Using a managed allocator means we need to manage that separately, + // which complicates the nature of this code. To get around that, we're + // using the internal allocator instead, which has its own global state + // and is decoupled from the lifetime management required by the managed + // allocator we have in XRay. + // + Item->Trie = reinterpret_cast<FunctionCallTrie *>(InternalAlloc( + sizeof(FunctionCallTrie), nullptr, alignof(FunctionCallTrie))); + DCHECK_NE(Item->Trie, nullptr); + new (Item->Trie) FunctionCallTrie(*GlobalAllocators); + } + + T.deepCopyInto(*Item->Trie); +} + +// A PathArray represents the function id's representing a stack trace. In this +// context a path is almost always represented from the leaf function in a call +// stack to a root of the call trie. +using PathArray = Array<int32_t>; + +struct ProfileRecord { + using PathAllocator = typename PathArray::AllocatorType; + + // The Path in this record is the function id's from the leaf to the root of + // the function call stack as represented from a FunctionCallTrie. + PathArray *Path = nullptr; + const FunctionCallTrie::Node *Node = nullptr; + + // Constructor for in-place construction. + ProfileRecord(PathAllocator &A, const FunctionCallTrie::Node *N) + : Path([&] { + auto P = + reinterpret_cast<PathArray *>(InternalAlloc(sizeof(PathArray))); + new (P) PathArray(A); + return P; + }()), + Node(N) {} +}; + +namespace { + +using ProfileRecordArray = Array<ProfileRecord>; + +// Walk a depth-first traversal of each root of the FunctionCallTrie to generate +// the path(s) and the data associated with the path. +static void populateRecords(ProfileRecordArray &PRs, + ProfileRecord::PathAllocator &PA, + const FunctionCallTrie &Trie) { + using StackArray = Array<const FunctionCallTrie::Node *>; + using StackAllocator = typename StackArray::AllocatorType; + StackAllocator StackAlloc(profilingFlags()->stack_allocator_max); + StackArray DFSStack(StackAlloc); + for (const auto R : Trie.getRoots()) { + DFSStack.Append(R); + while (!DFSStack.empty()) { + auto Node = DFSStack.back(); + DFSStack.trim(1); + auto Record = PRs.AppendEmplace(PA, Node); + if (Record == nullptr) + return; + DCHECK_NE(Record, nullptr); + + // Traverse the Node's parents and as we're doing so, get the FIds in + // the order they appear. + for (auto N = Node; N != nullptr; N = N->Parent) + Record->Path->Append(N->FId); + DCHECK(!Record->Path->empty()); + + for (const auto C : Node->Callees) + DFSStack.Append(C.NodePtr); + } + } +} + +static void serializeRecords(ProfileBuffer *Buffer, const BlockHeader &Header, + const ProfileRecordArray &ProfileRecords) { + auto NextPtr = static_cast<char *>( + internal_memcpy(Buffer->Data, &Header, sizeof(Header))) + + sizeof(Header); + for (const auto &Record : ProfileRecords) { + // List of IDs follow: + for (const auto FId : *Record.Path) + NextPtr = + static_cast<char *>(internal_memcpy(NextPtr, &FId, sizeof(FId))) + + sizeof(FId); + + // Add the sentinel here. + constexpr int32_t SentinelFId = 0; + NextPtr = static_cast<char *>( + internal_memset(NextPtr, SentinelFId, sizeof(SentinelFId))) + + sizeof(SentinelFId); + + // Add the node data here. + NextPtr = + static_cast<char *>(internal_memcpy(NextPtr, &Record.Node->CallCount, + sizeof(Record.Node->CallCount))) + + sizeof(Record.Node->CallCount); + NextPtr = static_cast<char *>( + internal_memcpy(NextPtr, &Record.Node->CumulativeLocalTime, + sizeof(Record.Node->CumulativeLocalTime))) + + sizeof(Record.Node->CumulativeLocalTime); + } + + DCHECK_EQ(NextPtr - static_cast<char *>(Buffer->Data), Buffer->Size); +} + +} // namespace + +void serialize() { + SpinMutexLock Lock(&GlobalMutex); + + // Clear out the global ProfileBuffers. + for (uptr I = 0; I < ProfileBuffers->Size(); ++I) + InternalFree((*ProfileBuffers)[I].Data); + ProfileBuffers->Reset(); + + if (ThreadTries->Size() == 0) + return; + + // Then repopulate the global ProfileBuffers. + for (u32 I = 0; I < ThreadTries->Size(); ++I) { + using ProfileRecordAllocator = typename ProfileRecordArray::AllocatorType; + ProfileRecordAllocator PRAlloc(profilingFlags()->global_allocator_max); + ProfileRecord::PathAllocator PathAlloc( + profilingFlags()->global_allocator_max); + ProfileRecordArray ProfileRecords(PRAlloc); + + // First, we want to compute the amount of space we're going to need. We'll + // use a local allocator and an __xray::Array<...> to store the intermediary + // data, then compute the size as we're going along. Then we'll allocate the + // contiguous space to contain the thread buffer data. + const auto &Trie = *(*ThreadTries)[I].Trie; + if (Trie.getRoots().empty()) + continue; + populateRecords(ProfileRecords, PathAlloc, Trie); + DCHECK(!Trie.getRoots().empty()); + DCHECK(!ProfileRecords.empty()); + + // Go through each record, to compute the sizes. + // + // header size = block size (4 bytes) + // + block number (4 bytes) + // + thread id (8 bytes) + // record size = path ids (4 bytes * number of ids + sentinel 4 bytes) + // + call count (8 bytes) + // + local time (8 bytes) + // + end of record (8 bytes) + u32 CumulativeSizes = 0; + for (const auto &Record : ProfileRecords) + CumulativeSizes += 20 + (4 * Record.Path->size()); + + BlockHeader Header{16 + CumulativeSizes, I, (*ThreadTries)[I].TId}; + auto Buffer = ProfileBuffers->PushBack(); + Buffer->Size = sizeof(Header) + CumulativeSizes; + Buffer->Data = InternalAlloc(Buffer->Size, nullptr, 64); + DCHECK_NE(Buffer->Data, nullptr); + serializeRecords(Buffer, Header, ProfileRecords); + + // Now clean up the ProfileRecords array, one at a time. + for (auto &Record : ProfileRecords) { + Record.Path->~PathArray(); + InternalFree(Record.Path); + } + } +} + +void reset() { + SpinMutexLock Lock(&GlobalMutex); + if (ProfileBuffers != nullptr) { + // Clear out the profile buffers that have been serialized. + for (uptr I = 0; I < ProfileBuffers->Size(); ++I) + InternalFree((*ProfileBuffers)[I].Data); + ProfileBuffers->Reset(); + InternalFree(ProfileBuffers); + ProfileBuffers = nullptr; + } + + if (ThreadTries != nullptr) { + // Clear out the function call tries per thread. + for (uptr I = 0; I < ThreadTries->Size(); ++I) { + auto &T = (*ThreadTries)[I]; + T.Trie->~FunctionCallTrie(); + InternalFree(T.Trie); + } + ThreadTries->Reset(); + InternalFree(ThreadTries); + ThreadTries = nullptr; + } + + // Reset the global allocators. + if (GlobalAllocators != nullptr) { + GlobalAllocators->~Allocators(); + InternalFree(GlobalAllocators); + GlobalAllocators = nullptr; + } + GlobalAllocators = reinterpret_cast<FunctionCallTrie::Allocators *>( + InternalAlloc(sizeof(FunctionCallTrie::Allocators))); + new (GlobalAllocators) FunctionCallTrie::Allocators(); + *GlobalAllocators = FunctionCallTrie::InitAllocators(); + ThreadTries = reinterpret_cast<Vector<ThreadTrie> *>( + InternalAlloc(sizeof(Vector<ThreadTrie>))); + new (ThreadTries) Vector<ThreadTrie>(); + ProfileBuffers = reinterpret_cast<Vector<ProfileBuffer> *>( + InternalAlloc(sizeof(Vector<ProfileBuffer>))); + new (ProfileBuffers) Vector<ProfileBuffer>(); +} + +XRayBuffer nextBuffer(XRayBuffer B) { + SpinMutexLock Lock(&GlobalMutex); + + if (ProfileBuffers == nullptr || ProfileBuffers->Size() == 0) + return {nullptr, 0}; + + if (B.Data == nullptr) + return {(*ProfileBuffers)[0].Data, (*ProfileBuffers)[0].Size}; + + BlockHeader Header; + internal_memcpy(&Header, B.Data, sizeof(BlockHeader)); + auto NextBlock = Header.BlockNum + 1; + if (NextBlock < ProfileBuffers->Size()) + return {(*ProfileBuffers)[NextBlock].Data, + (*ProfileBuffers)[NextBlock].Size}; + return {nullptr, 0}; +} + +} // namespace profileCollectorService +} // namespace __xray diff --git a/lib/xray/xray_profile_collector.h b/lib/xray/xray_profile_collector.h new file mode 100644 index 000000000000..335043db9526 --- /dev/null +++ b/lib/xray/xray_profile_collector.h @@ -0,0 +1,88 @@ +//===-- xray_profile_collector.h -------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a dynamic runtime instrumentation system. +// +// This file defines the interface for a data collection service, for XRay +// profiling. What we implement here is an in-process service where +// FunctionCallTrie instances can be handed off by threads, to be +// consolidated/collected. +// +//===----------------------------------------------------------------------===// +#ifndef XRAY_XRAY_PROFILE_COLLECTOR_H +#define XRAY_XRAY_PROFILE_COLLECTOR_H + +#include "xray_function_call_trie.h" + +#include "xray/xray_log_interface.h" + +namespace __xray { + +/// The ProfileCollectorService implements a centralised mechanism for +/// collecting FunctionCallTrie instances, indexed by thread ID. On demand, the +/// ProfileCollectorService can be queried for the most recent state of the +/// data, in a form that allows traversal. +namespace profileCollectorService { + +/// Posts the FunctionCallTrie associated with a specific Thread ID. This +/// will: +/// +/// - Make a copy of the FunctionCallTrie and store that against the Thread +/// ID. This will use the global allocator for the service-managed +/// FunctionCallTrie instances. +/// - Queue up a pointer to the FunctionCallTrie. +/// - If the queue is long enough (longer than some arbitrary threshold) we +/// then pre-calculate a single FunctionCallTrie for the whole process. +/// +/// +/// We are making a copy of the FunctionCallTrie because the intent is to have +/// this function be called at thread exit, or soon after the profiling +/// handler is finalized through the XRay APIs. By letting threads each +/// process their own thread-local FunctionCallTrie instances, we're removing +/// the need for synchronisation across threads while we're profiling. +/// However, once we're done profiling, we can then collect copies of these +/// FunctionCallTrie instances and pay the cost of the copy. +/// +/// NOTE: In the future, if this turns out to be more costly than "moving" the +/// FunctionCallTrie instances from the owning thread to the collector +/// service, then we can change the implementation to do it this way (moving) +/// instead. +void post(const FunctionCallTrie &T, tid_t TId); + +/// The serialize will process all FunctionCallTrie instances in memory, and +/// turn those into specifically formatted blocks, each describing the +/// function call trie's contents in a compact form. In memory, this looks +/// like the following layout: +/// +/// - block size (32 bits) +/// - block number (32 bits) +/// - thread id (64 bits) +/// - list of records: +/// - function ids in leaf to root order, terminated by +/// 0 (32 bits per function id) +/// - call count (64 bit) +/// - cumulative local time (64 bit) +/// - record delimiter (64 bit, 0x0) +/// +void serialize(); + +/// The reset function will clear out any internal memory held by the +/// service. The intent is to have the resetting be done in calls to the +/// initialization routine, or explicitly through the flush log API. +void reset(); + +/// This nextBuffer function is meant to implement the iterator functionality, +/// provided in the XRay API. +XRayBuffer nextBuffer(XRayBuffer B); + +} // namespace profileCollectorService + +} // namespace __xray + +#endif // XRAY_XRAY_PROFILE_COLLECTOR_H diff --git a/lib/xray/xray_profiling.cc b/lib/xray/xray_profiling.cc new file mode 100644 index 000000000000..786084c77226 --- /dev/null +++ b/lib/xray/xray_profiling.cc @@ -0,0 +1,372 @@ +//===-- xray_profiling.cc ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a dynamic runtime instrumentation system. +// +// This is the implementation of a profiling handler. +// +//===----------------------------------------------------------------------===// +#include <memory> +#include <time.h> + +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "xray/xray_interface.h" +#include "xray/xray_log_interface.h" + +#include "xray_flags.h" +#include "xray_profile_collector.h" +#include "xray_profiling_flags.h" +#include "xray_recursion_guard.h" +#include "xray_tsc.h" +#include "xray_utils.h" +#include <pthread.h> + +namespace __xray { + +namespace { + +constexpr uptr XRayProfilingVersion = 0x20180424; + +struct XRayProfilingFileHeader { + const u64 MagicBytes = 0x7872617970726f66; // Identifier for XRay profiling + // files 'xrayprof' in hex. + const uptr Version = XRayProfilingVersion; + uptr Timestamp = 0; // System time in nanoseconds. + uptr PID = 0; // Process ID. +}; + +atomic_sint32_t ProfilerLogFlushStatus = { + XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING}; + +atomic_sint32_t ProfilerLogStatus = {XRayLogInitStatus::XRAY_LOG_UNINITIALIZED}; + +SpinMutex ProfilerOptionsMutex; + +struct alignas(64) ProfilingData { + FunctionCallTrie::Allocators *Allocators = nullptr; + FunctionCallTrie *FCT = nullptr; +}; + +static pthread_key_t ProfilingKey; + +thread_local std::aligned_storage<sizeof(ProfilingData)>::type ThreadStorage{}; +static ProfilingData &getThreadLocalData() XRAY_NEVER_INSTRUMENT { + thread_local auto ThreadOnce = [] { + new (&ThreadStorage) ProfilingData{}; + pthread_setspecific(ProfilingKey, &ThreadStorage); + return false; + }(); + (void)ThreadOnce; + + auto &TLD = *reinterpret_cast<ProfilingData *>(&ThreadStorage); + + // We need to check whether the global flag to finalizing/finalized has been + // switched. If it is, then we ought to not actually initialise the data. + auto Status = atomic_load(&ProfilerLogStatus, memory_order_acquire); + if (Status == XRayLogInitStatus::XRAY_LOG_FINALIZING || + Status == XRayLogInitStatus::XRAY_LOG_FINALIZED) + return TLD; + + // If we're live, then we re-initialize TLD if the pointers are not null. + if (UNLIKELY(TLD.Allocators == nullptr && TLD.FCT == nullptr)) { + TLD.Allocators = reinterpret_cast<FunctionCallTrie::Allocators *>( + InternalAlloc(sizeof(FunctionCallTrie::Allocators))); + new (TLD.Allocators) FunctionCallTrie::Allocators(); + *TLD.Allocators = FunctionCallTrie::InitAllocators(); + TLD.FCT = reinterpret_cast<FunctionCallTrie *>( + InternalAlloc(sizeof(FunctionCallTrie))); + new (TLD.FCT) FunctionCallTrie(*TLD.Allocators); + } + + return TLD; +} + +static void cleanupTLD() XRAY_NEVER_INSTRUMENT { + auto &TLD = *reinterpret_cast<ProfilingData *>(&ThreadStorage); + if (TLD.Allocators != nullptr && TLD.FCT != nullptr) { + TLD.FCT->~FunctionCallTrie(); + TLD.Allocators->~Allocators(); + InternalFree(TLD.FCT); + InternalFree(TLD.Allocators); + TLD.FCT = nullptr; + TLD.Allocators = nullptr; + } +} + +} // namespace + +const char *profilingCompilerDefinedFlags() XRAY_NEVER_INSTRUMENT { +#ifdef XRAY_PROFILER_DEFAULT_OPTIONS + return SANITIZER_STRINGIFY(XRAY_PROFILER_DEFAULT_OPTIONS); +#else + return ""; +#endif +} + +atomic_sint32_t ProfileFlushStatus = { + XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING}; + +XRayLogFlushStatus profilingFlush() XRAY_NEVER_INSTRUMENT { + if (atomic_load(&ProfilerLogStatus, memory_order_acquire) != + XRayLogInitStatus::XRAY_LOG_FINALIZED) { + if (Verbosity()) + Report("Not flushing profiles, profiling not been finalized.\n"); + return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING; + } + + s32 Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING; + if (!atomic_compare_exchange_strong(&ProfilerLogFlushStatus, &Result, + XRayLogFlushStatus::XRAY_LOG_FLUSHING, + memory_order_acq_rel)) { + if (Verbosity()) + Report("Not flushing profiles, implementation still finalizing.\n"); + } + + // At this point, we'll create the file that will contain the profile, but + // only if the options say so. + if (!profilingFlags()->no_flush) { + // First check whether we have data in the profile collector service + // before we try and write anything down. + XRayBuffer B = profileCollectorService::nextBuffer({nullptr, 0}); + if (B.Data == nullptr) { + if (Verbosity()) + Report("profiling: No data to flush.\n"); + } else { + int Fd = getLogFD(); + if (Fd == -1) { + if (Verbosity()) + Report("profiling: Failed to flush to file, dropping data.\n"); + } else { + XRayProfilingFileHeader Header; + Header.Timestamp = NanoTime(); + Header.PID = internal_getpid(); + retryingWriteAll(Fd, reinterpret_cast<const char *>(&Header), + reinterpret_cast<const char *>(&Header) + + sizeof(Header)); + + // Now for each of the threads, write out the profile data as we would + // see it in memory, verbatim. + while (B.Data != nullptr && B.Size != 0) { + retryingWriteAll(Fd, reinterpret_cast<const char *>(B.Data), + reinterpret_cast<const char *>(B.Data) + B.Size); + B = profileCollectorService::nextBuffer(B); + } + // Then we close out the file. + internal_close(Fd); + } + } + } + + profileCollectorService::reset(); + + // Flush the current thread's local data structures as well. + cleanupTLD(); + + atomic_store(&ProfilerLogStatus, XRayLogFlushStatus::XRAY_LOG_FLUSHED, + memory_order_release); + + return XRayLogFlushStatus::XRAY_LOG_FLUSHED; +} + +namespace { + +thread_local atomic_uint8_t ReentranceGuard{0}; + +static void postCurrentThreadFCT(ProfilingData &TLD) { + if (TLD.Allocators == nullptr || TLD.FCT == nullptr) + return; + + profileCollectorService::post(*TLD.FCT, GetTid()); + cleanupTLD(); +} + +} // namespace + +void profilingHandleArg0(int32_t FuncId, + XRayEntryType Entry) XRAY_NEVER_INSTRUMENT { + unsigned char CPU; + auto TSC = readTSC(CPU); + RecursionGuard G(ReentranceGuard); + if (!G) + return; + + auto Status = atomic_load(&ProfilerLogStatus, memory_order_acquire); + auto &TLD = getThreadLocalData(); + if (UNLIKELY(Status == XRayLogInitStatus::XRAY_LOG_FINALIZED || + Status == XRayLogInitStatus::XRAY_LOG_FINALIZING)) { + postCurrentThreadFCT(TLD); + return; + } + + switch (Entry) { + case XRayEntryType::ENTRY: + case XRayEntryType::LOG_ARGS_ENTRY: + TLD.FCT->enterFunction(FuncId, TSC); + break; + case XRayEntryType::EXIT: + case XRayEntryType::TAIL: + TLD.FCT->exitFunction(FuncId, TSC); + break; + default: + // FIXME: Handle bugs. + break; + } +} + +void profilingHandleArg1(int32_t FuncId, XRayEntryType Entry, + uint64_t) XRAY_NEVER_INSTRUMENT { + return profilingHandleArg0(FuncId, Entry); +} + +XRayLogInitStatus profilingFinalize() XRAY_NEVER_INSTRUMENT { + s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_INITIALIZED; + if (!atomic_compare_exchange_strong(&ProfilerLogStatus, &CurrentStatus, + XRayLogInitStatus::XRAY_LOG_FINALIZING, + memory_order_release)) { + if (Verbosity()) + Report("Cannot finalize profile, the profiling is not initialized.\n"); + return static_cast<XRayLogInitStatus>(CurrentStatus); + } + + // Wait a grace period to allow threads to see that we're finalizing. + SleepForMillis(profilingFlags()->grace_period_ms); + + // We also want to make sure that the current thread's data is cleaned up, + // if we have any. + auto &TLD = getThreadLocalData(); + postCurrentThreadFCT(TLD); + + // Then we force serialize the log data. + profileCollectorService::serialize(); + + atomic_store(&ProfilerLogStatus, XRayLogInitStatus::XRAY_LOG_FINALIZED, + memory_order_release); + return XRayLogInitStatus::XRAY_LOG_FINALIZED; +} + +XRayLogInitStatus +profilingLoggingInit(size_t BufferSize, size_t BufferMax, void *Options, + size_t OptionsSize) XRAY_NEVER_INSTRUMENT { + if (BufferSize != 0 || BufferMax != 0) { + if (Verbosity()) + Report("__xray_log_init() being used, and is unsupported. Use " + "__xray_log_init_mode(...) instead. Bailing out."); + return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; + } + + s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_UNINITIALIZED; + if (!atomic_compare_exchange_strong(&ProfilerLogStatus, &CurrentStatus, + XRayLogInitStatus::XRAY_LOG_INITIALIZING, + memory_order_release)) { + if (Verbosity()) + Report("Cannot initialize already initialised profiling " + "implementation.\n"); + return static_cast<XRayLogInitStatus>(CurrentStatus); + } + + { + SpinMutexLock Lock(&ProfilerOptionsMutex); + FlagParser ConfigParser; + ProfilerFlags Flags; + Flags.setDefaults(); + registerProfilerFlags(&ConfigParser, &Flags); + ConfigParser.ParseString(profilingCompilerDefinedFlags()); + const char *Env = GetEnv("XRAY_PROFILING_OPTIONS"); + if (Env == nullptr) + Env = ""; + ConfigParser.ParseString(Env); + + // Then parse the configuration string provided. + ConfigParser.ParseString(static_cast<const char *>(Options)); + if (Verbosity()) + ReportUnrecognizedFlags(); + *profilingFlags() = Flags; + } + + // We need to reset the profile data collection implementation now. + profileCollectorService::reset(); + + // We need to set up the exit handlers. + static pthread_once_t Once = PTHREAD_ONCE_INIT; + pthread_once(&Once, +[] { + pthread_key_create(&ProfilingKey, +[](void *P) { + // This is the thread-exit handler. + auto &TLD = *reinterpret_cast<ProfilingData *>(P); + if (TLD.Allocators == nullptr && TLD.FCT == nullptr) + return; + + postCurrentThreadFCT(TLD); + }); + + // We also need to set up an exit handler, so that we can get the profile + // information at exit time. We use the C API to do this, to not rely on C++ + // ABI functions for registering exit handlers. + Atexit(+[] { + // Finalize and flush. + if (profilingFinalize() != XRAY_LOG_FINALIZED) { + cleanupTLD(); + return; + } + if (profilingFlush() != XRAY_LOG_FLUSHED) { + cleanupTLD(); + return; + } + if (Verbosity()) + Report("XRay Profile flushed at exit."); + }); + }); + + __xray_log_set_buffer_iterator(profileCollectorService::nextBuffer); + __xray_set_handler(profilingHandleArg0); + __xray_set_handler_arg1(profilingHandleArg1); + + atomic_store(&ProfilerLogStatus, XRayLogInitStatus::XRAY_LOG_INITIALIZED, + memory_order_release); + if (Verbosity()) + Report("XRay Profiling init successful.\n"); + + return XRayLogInitStatus::XRAY_LOG_INITIALIZED; +} + +bool profilingDynamicInitializer() XRAY_NEVER_INSTRUMENT { + // Set up the flag defaults from the static defaults and the + // compiler-provided defaults. + { + SpinMutexLock Lock(&ProfilerOptionsMutex); + auto *F = profilingFlags(); + F->setDefaults(); + FlagParser ProfilingParser; + registerProfilerFlags(&ProfilingParser, F); + ProfilingParser.ParseString(profilingCompilerDefinedFlags()); + } + + XRayLogImpl Impl{ + profilingLoggingInit, + profilingFinalize, + profilingHandleArg0, + profilingFlush, + }; + auto RegistrationResult = __xray_log_register_mode("xray-profiling", Impl); + if (RegistrationResult != XRayLogRegisterStatus::XRAY_REGISTRATION_OK) { + if (Verbosity()) + Report("Cannot register XRay Profiling mode to 'xray-profiling'; error = " + "%d\n", + RegistrationResult); + return false; + } + + if (!internal_strcmp(flags()->xray_mode, "xray-profiling")) + __xray_log_select_mode("xray_profiling"); + return true; +} + +} // namespace __xray + +static auto UNUSED Unused = __xray::profilingDynamicInitializer(); diff --git a/lib/xray/xray_profiling_flags.cc b/lib/xray/xray_profiling_flags.cc new file mode 100644 index 000000000000..593e66a78ad2 --- /dev/null +++ b/lib/xray/xray_profiling_flags.cc @@ -0,0 +1,40 @@ +//===-- xray_flags.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a dynamic runtime instrumentation system. +// +// XRay runtime flags. +//===----------------------------------------------------------------------===// + +#include "xray_profiling_flags.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "xray_defs.h" + +namespace __xray { + +// Storage for the profiling flags. +ProfilerFlags xray_profiling_flags_dont_use_directly; + +void ProfilerFlags::setDefaults() XRAY_NEVER_INSTRUMENT { +#define XRAY_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "xray_profiling_flags.inc" +#undef XRAY_FLAG +} + +void registerProfilerFlags(FlagParser *P, + ProfilerFlags *F) XRAY_NEVER_INSTRUMENT { +#define XRAY_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(P, #Name, Description, &F->Name); +#include "xray_profiling_flags.inc" +#undef XRAY_FLAG +} + +} // namespace __xray diff --git a/lib/xray/xray_profiling_flags.h b/lib/xray/xray_profiling_flags.h new file mode 100644 index 000000000000..2f9a7514799a --- /dev/null +++ b/lib/xray/xray_profiling_flags.h @@ -0,0 +1,39 @@ +//===-- xray_profiling_flags.h ----------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a dynamic runtime instrumentation system. +// +// XRay profiling runtime flags. +//===----------------------------------------------------------------------===// + +#ifndef XRAY_PROFILER_FLAGS_H +#define XRAY_PROFILER_FLAGS_H + +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_internal_defs.h" + +namespace __xray { + +struct ProfilerFlags { +#define XRAY_FLAG(Type, Name, DefaultValue, Description) Type Name; +#include "xray_profiling_flags.inc" +#undef XRAY_FLAG + + void setDefaults(); +}; + +extern ProfilerFlags xray_profiling_flags_dont_use_directly; +inline ProfilerFlags *profilingFlags() { + return &xray_profiling_flags_dont_use_directly; +} +void registerProfilerFlags(FlagParser *P, ProfilerFlags *F); + +} // namespace __xray + +#endif // XRAY_PROFILER_FLAGS_H diff --git a/lib/xray/xray_profiling_flags.inc b/lib/xray/xray_profiling_flags.inc new file mode 100644 index 000000000000..e9230ae64187 --- /dev/null +++ b/lib/xray/xray_profiling_flags.inc @@ -0,0 +1,29 @@ +//===-- xray_profiling_flags.inc --------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// XRay profiling runtime flags. +// +//===----------------------------------------------------------------------===// +#ifndef XRAY_FLAG +#error "Define XRAY_FLAG prior to including this file!" +#endif + +XRAY_FLAG(uptr, per_thread_allocator_max, 2 << 20, + "Maximum size of any single per-thread allocator.") +XRAY_FLAG(uptr, global_allocator_max, 2 << 24, + "Maximum size of the global allocator for profile storage.") +XRAY_FLAG(uptr, stack_allocator_max, 2 << 20, + "Maximum size of the traversal stack allocator.") +XRAY_FLAG(int, grace_period_ms, 1, + "Profile collection will wait this much time in milliseconds before " + "resetting the global state. This gives a chance to threads to " + "notice that the profiler has been finalized and clean up.") +XRAY_FLAG(bool, no_flush, false, + "Set to true if we want the profiling implementation to not write " + "out files.") diff --git a/lib/xray/xray_recursion_guard.h b/lib/xray/xray_recursion_guard.h new file mode 100644 index 000000000000..6edadea563bc --- /dev/null +++ b/lib/xray/xray_recursion_guard.h @@ -0,0 +1,57 @@ +//===-- xray_recursion_guard.h ---------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a dynamic runtime instrumentation system. +// +//===----------------------------------------------------------------------===// +#ifndef XRAY_XRAY_RECURSION_GUARD_H +#define XRAY_XRAY_RECURSION_GUARD_H + +#include "sanitizer_common/sanitizer_atomic.h" + +namespace __xray { + +/// The RecursionGuard is useful for guarding against signal handlers which are +/// also potentially calling XRay-instrumented functions. To use the +/// RecursionGuard, you'll typically need a thread_local atomic_uint8_t: +/// +/// thread_local atomic_uint8_t Guard{0}; +/// +/// // In a handler function: +/// void handleArg0(int32_t F, XRayEntryType T) { +/// RecursionGuard G(Guard); +/// if (!G) +/// return; // Failed to acquire the guard. +/// ... +/// } +/// +class RecursionGuard { + atomic_uint8_t &Running; + const bool Valid; + +public: + explicit inline RecursionGuard(atomic_uint8_t &R) + : Running(R), Valid(!atomic_exchange(&R, 1, memory_order_acq_rel)) {} + + inline RecursionGuard(const RecursionGuard &) = delete; + inline RecursionGuard(RecursionGuard &&) = delete; + inline RecursionGuard &operator=(const RecursionGuard &) = delete; + inline RecursionGuard &operator=(RecursionGuard &&) = delete; + + explicit inline operator bool() const { return Valid; } + + inline ~RecursionGuard() noexcept { + if (Valid) + atomic_store(&Running, 0, memory_order_release); + } +}; + +} // namespace __xray + +#endif // XRAY_XRAY_RECURSION_GUARD_H diff --git a/lib/xray/xray_segmented_array.h b/lib/xray/xray_segmented_array.h new file mode 100644 index 000000000000..11dd794fa520 --- /dev/null +++ b/lib/xray/xray_segmented_array.h @@ -0,0 +1,375 @@ +//===-- xray_segmented_array.h ---------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of XRay, a dynamic runtime instrumentation system. +// +// Defines the implementation of a segmented array, with fixed-size segments +// backing the segments. +// +//===----------------------------------------------------------------------===// +#ifndef XRAY_SEGMENTED_ARRAY_H +#define XRAY_SEGMENTED_ARRAY_H + +#include "sanitizer_common/sanitizer_allocator.h" +#include "xray_allocator.h" +#include "xray_utils.h" +#include <cassert> +#include <type_traits> +#include <utility> + +namespace __xray { + +/// The Array type provides an interface similar to std::vector<...> but does +/// not shrink in size. Once constructed, elements can be appended but cannot be +/// removed. The implementation is heavily dependent on the contract provided by +/// the Allocator type, in that all memory will be released when the Allocator +/// is destroyed. When an Array is destroyed, it will destroy elements in the +/// backing store but will not free the memory. +template <class T> class Array { + struct SegmentBase { + SegmentBase *Prev; + SegmentBase *Next; + }; + + // We want each segment of the array to be cache-line aligned, and elements of + // the array be offset from the beginning of the segment. + struct Segment : SegmentBase { + char Data[1]; + }; + +public: + // Each segment of the array will be laid out with the following assumptions: + // + // - Each segment will be on a cache-line address boundary (kCacheLineSize + // aligned). + // + // - The elements will be accessed through an aligned pointer, dependent on + // the alignment of T. + // + // - Each element is at least two-pointers worth from the beginning of the + // Segment, aligned properly, and the rest of the elements are accessed + // through appropriate alignment. + // + // We then compute the size of the segment to follow this logic: + // + // - Compute the number of elements that can fit within + // kCacheLineSize-multiple segments, minus the size of two pointers. + // + // - Request cacheline-multiple sized elements from the allocator. + static constexpr size_t AlignedElementStorageSize = + sizeof(typename std::aligned_storage<sizeof(T), alignof(T)>::type); + + static constexpr size_t SegmentSize = + nearest_boundary(sizeof(Segment) + next_pow2(sizeof(T)), kCacheLineSize); + + using AllocatorType = Allocator<SegmentSize>; + + static constexpr size_t ElementsPerSegment = + (SegmentSize - sizeof(Segment)) / next_pow2(sizeof(T)); + + static_assert(ElementsPerSegment > 0, + "Must have at least 1 element per segment."); + + static SegmentBase SentinelSegment; + +private: + AllocatorType *Alloc; + SegmentBase *Head = &SentinelSegment; + SegmentBase *Tail = &SentinelSegment; + size_t Size = 0; + + // Here we keep track of segments in the freelist, to allow us to re-use + // segments when elements are trimmed off the end. + SegmentBase *Freelist = &SentinelSegment; + + Segment *NewSegment() { + // We need to handle the case in which enough elements have been trimmed to + // allow us to re-use segments we've allocated before. For this we look into + // the Freelist, to see whether we need to actually allocate new blocks or + // just re-use blocks we've already seen before. + if (Freelist != &SentinelSegment) { + auto *FreeSegment = Freelist; + Freelist = FreeSegment->Next; + FreeSegment->Next = &SentinelSegment; + Freelist->Prev = &SentinelSegment; + return static_cast<Segment *>(FreeSegment); + } + + auto SegmentBlock = Alloc->Allocate(); + if (SegmentBlock.Data == nullptr) + return nullptr; + + // Placement-new the Segment element at the beginning of the SegmentBlock. + auto S = reinterpret_cast<Segment *>(SegmentBlock.Data); + new (S) SegmentBase{&SentinelSegment, &SentinelSegment}; + return S; + } + + Segment *InitHeadAndTail() { + DCHECK_EQ(Head, &SentinelSegment); + DCHECK_EQ(Tail, &SentinelSegment); + auto Segment = NewSegment(); + if (Segment == nullptr) + return nullptr; + DCHECK_EQ(Segment->Next, &SentinelSegment); + DCHECK_EQ(Segment->Prev, &SentinelSegment); + Head = Tail = static_cast<SegmentBase *>(Segment); + return Segment; + } + + Segment *AppendNewSegment() { + auto S = NewSegment(); + if (S == nullptr) + return nullptr; + DCHECK_NE(Tail, &SentinelSegment); + DCHECK_EQ(Tail->Next, &SentinelSegment); + DCHECK_EQ(S->Prev, &SentinelSegment); + DCHECK_EQ(S->Next, &SentinelSegment); + Tail->Next = S; + S->Prev = Tail; + Tail = S; + return static_cast<Segment *>(Tail); + } + + // This Iterator models a BidirectionalIterator. + template <class U> class Iterator { + SegmentBase *S = &SentinelSegment; + size_t Offset = 0; + size_t Size = 0; + + public: + Iterator(SegmentBase *IS, size_t Off, size_t S) + : S(IS), Offset(Off), Size(S) {} + Iterator(const Iterator &) noexcept = default; + Iterator() noexcept = default; + Iterator(Iterator &&) noexcept = default; + Iterator &operator=(const Iterator &) = default; + Iterator &operator=(Iterator &&) = default; + ~Iterator() = default; + + Iterator &operator++() { + if (++Offset % ElementsPerSegment || Offset == Size) + return *this; + + // At this point, we know that Offset % N == 0, so we must advance the + // segment pointer. + DCHECK_EQ(Offset % ElementsPerSegment, 0); + DCHECK_NE(Offset, Size); + DCHECK_NE(S, &SentinelSegment); + DCHECK_NE(S->Next, &SentinelSegment); + S = S->Next; + DCHECK_NE(S, &SentinelSegment); + return *this; + } + + Iterator &operator--() { + DCHECK_NE(S, &SentinelSegment); + DCHECK_GT(Offset, 0); + + auto PreviousOffset = Offset--; + if (PreviousOffset != Size && PreviousOffset % ElementsPerSegment == 0) { + DCHECK_NE(S->Prev, &SentinelSegment); + S = S->Prev; + } + + return *this; + } + + Iterator operator++(int) { + Iterator Copy(*this); + ++(*this); + return Copy; + } + + Iterator operator--(int) { + Iterator Copy(*this); + --(*this); + return Copy; + } + + template <class V, class W> + friend bool operator==(const Iterator<V> &L, const Iterator<W> &R) { + return L.S == R.S && L.Offset == R.Offset; + } + + template <class V, class W> + friend bool operator!=(const Iterator<V> &L, const Iterator<W> &R) { + return !(L == R); + } + + U &operator*() const { + DCHECK_NE(S, &SentinelSegment); + auto RelOff = Offset % ElementsPerSegment; + + // We need to compute the character-aligned pointer, offset from the + // segment's Data location to get the element in the position of Offset. + auto Base = static_cast<Segment *>(S)->Data; + auto AlignedOffset = Base + (RelOff * AlignedElementStorageSize); + return *reinterpret_cast<U *>(AlignedOffset); + } + + U *operator->() const { return &(**this); } + }; + +public: + explicit Array(AllocatorType &A) : Alloc(&A) {} + + Array(const Array &) = delete; + Array(Array &&O) NOEXCEPT : Alloc(O.Alloc), + Head(O.Head), + Tail(O.Tail), + Size(O.Size) { + O.Head = &SentinelSegment; + O.Tail = &SentinelSegment; + O.Size = 0; + } + + bool empty() const { return Size == 0; } + + AllocatorType &allocator() const { + DCHECK_NE(Alloc, nullptr); + return *Alloc; + } + + size_t size() const { return Size; } + + T *Append(const T &E) { + if (UNLIKELY(Head == &SentinelSegment)) + if (InitHeadAndTail() == nullptr) + return nullptr; + + auto Offset = Size % ElementsPerSegment; + if (UNLIKELY(Size != 0 && Offset == 0)) + if (AppendNewSegment() == nullptr) + return nullptr; + + auto Base = static_cast<Segment *>(Tail)->Data; + auto AlignedOffset = Base + (Offset * AlignedElementStorageSize); + auto Position = reinterpret_cast<T *>(AlignedOffset); + *Position = E; + ++Size; + return Position; + } + + template <class... Args> T *AppendEmplace(Args &&... args) { + if (UNLIKELY(Head == &SentinelSegment)) + if (InitHeadAndTail() == nullptr) + return nullptr; + + auto Offset = Size % ElementsPerSegment; + auto *LatestSegment = Tail; + if (UNLIKELY(Size != 0 && Offset == 0)) { + LatestSegment = AppendNewSegment(); + if (LatestSegment == nullptr) + return nullptr; + } + + DCHECK_NE(Tail, &SentinelSegment); + auto Base = static_cast<Segment *>(LatestSegment)->Data; + auto AlignedOffset = Base + (Offset * AlignedElementStorageSize); + auto Position = reinterpret_cast<T *>(AlignedOffset); + + // In-place construct at Position. + new (Position) T{std::forward<Args>(args)...}; + ++Size; + return reinterpret_cast<T *>(Position); + } + + T &operator[](size_t Offset) const { + DCHECK_LE(Offset, Size); + // We need to traverse the array enough times to find the element at Offset. + auto S = Head; + while (Offset >= ElementsPerSegment) { + S = S->Next; + Offset -= ElementsPerSegment; + DCHECK_NE(S, &SentinelSegment); + } + auto Base = static_cast<Segment *>(S)->Data; + auto AlignedOffset = Base + (Offset * AlignedElementStorageSize); + auto Position = reinterpret_cast<T *>(AlignedOffset); + return *reinterpret_cast<T *>(Position); + } + + T &front() const { + DCHECK_NE(Head, &SentinelSegment); + DCHECK_NE(Size, 0u); + return *begin(); + } + + T &back() const { + DCHECK_NE(Tail, &SentinelSegment); + DCHECK_NE(Size, 0u); + auto It = end(); + --It; + return *It; + } + + template <class Predicate> T *find_element(Predicate P) const { + if (empty()) + return nullptr; + + auto E = end(); + for (auto I = begin(); I != E; ++I) + if (P(*I)) + return &(*I); + + return nullptr; + } + + /// Remove N Elements from the end. This leaves the blocks behind, and not + /// require allocation of new blocks for new elements added after trimming. + void trim(size_t Elements) { + DCHECK_LE(Elements, Size); + DCHECK_GT(Size, 0); + auto OldSize = Size; + Size -= Elements; + + DCHECK_NE(Head, &SentinelSegment); + DCHECK_NE(Tail, &SentinelSegment); + + for (auto SegmentsToTrim = (nearest_boundary(OldSize, ElementsPerSegment) - + nearest_boundary(Size, ElementsPerSegment)) / + ElementsPerSegment; + SegmentsToTrim > 0; --SegmentsToTrim) { + DCHECK_NE(Head, &SentinelSegment); + DCHECK_NE(Tail, &SentinelSegment); + // Put the tail into the Freelist. + auto *FreeSegment = Tail; + Tail = Tail->Prev; + if (Tail == &SentinelSegment) + Head = Tail; + else + Tail->Next = &SentinelSegment; + + DCHECK_EQ(Tail->Next, &SentinelSegment); + FreeSegment->Next = Freelist; + FreeSegment->Prev = &SentinelSegment; + if (Freelist != &SentinelSegment) + Freelist->Prev = FreeSegment; + Freelist = FreeSegment; + } + } + + // Provide iterators. + Iterator<T> begin() const { return Iterator<T>(Head, 0, Size); } + Iterator<T> end() const { return Iterator<T>(Tail, Size, Size); } + Iterator<const T> cbegin() const { return Iterator<const T>(Head, 0, Size); } + Iterator<const T> cend() const { return Iterator<const T>(Tail, Size, Size); } +}; + +// We need to have this storage definition out-of-line so that the compiler can +// ensure that storage for the SentinelSegment is defined and has a single +// address. +template <class T> +typename Array<T>::SegmentBase Array<T>::SentinelSegment{ + &Array<T>::SentinelSegment, &Array<T>::SentinelSegment}; + +} // namespace __xray + +#endif // XRAY_SEGMENTED_ARRAY_H diff --git a/lib/xray/xray_trampoline_x86_64.S b/lib/xray/xray_trampoline_x86_64.S index 350afd9265fd..99ad3966ee3a 100644 --- a/lib/xray/xray_trampoline_x86_64.S +++ b/lib/xray/xray_trampoline_x86_64.S @@ -19,47 +19,56 @@ .macro SAVE_REGISTERS - subq $192, %rsp - CFI_DEF_CFA_OFFSET(200) - // At this point, the stack pointer should be aligned to an 8-byte boundary, - // because any call instructions that come after this will add another 8 - // bytes and therefore align it to 16-bytes. - movq %rbp, 184(%rsp) - movupd %xmm0, 168(%rsp) - movupd %xmm1, 152(%rsp) - movupd %xmm2, 136(%rsp) - movupd %xmm3, 120(%rsp) - movupd %xmm4, 104(%rsp) - movupd %xmm5, 88(%rsp) - movupd %xmm6, 72(%rsp) - movupd %xmm7, 56(%rsp) - movq %rdi, 48(%rsp) - movq %rax, 40(%rsp) - movq %rdx, 32(%rsp) - movq %rsi, 24(%rsp) - movq %rcx, 16(%rsp) - movq %r8, 8(%rsp) - movq %r9, 0(%rsp) + subq $240, %rsp + CFI_DEF_CFA_OFFSET(248) + movq %rbp, 232(%rsp) + movupd %xmm0, 216(%rsp) + movupd %xmm1, 200(%rsp) + movupd %xmm2, 184(%rsp) + movupd %xmm3, 168(%rsp) + movupd %xmm4, 152(%rsp) + movupd %xmm5, 136(%rsp) + movupd %xmm6, 120(%rsp) + movupd %xmm7, 104(%rsp) + movq %rdi, 96(%rsp) + movq %rax, 88(%rsp) + movq %rdx, 80(%rsp) + movq %rsi, 72(%rsp) + movq %rcx, 64(%rsp) + movq %r8, 56(%rsp) + movq %r9, 48(%rsp) + movq %r10, 40(%rsp) + movq %r11, 32(%rsp) + movq %r12, 24(%rsp) + movq %r13, 16(%rsp) + movq %r14, 8(%rsp) + movq %r15, 0(%rsp) .endm .macro RESTORE_REGISTERS - movq 184(%rsp), %rbp - movupd 168(%rsp), %xmm0 - movupd 152(%rsp), %xmm1 - movupd 136(%rsp), %xmm2 - movupd 120(%rsp), %xmm3 - movupd 104(%rsp), %xmm4 - movupd 88(%rsp), %xmm5 - movupd 72(%rsp) , %xmm6 - movupd 56(%rsp) , %xmm7 - movq 48(%rsp), %rdi - movq 40(%rsp), %rax - movq 32(%rsp), %rdx - movq 24(%rsp), %rsi - movq 16(%rsp), %rcx - movq 8(%rsp), %r8 - movq 0(%rsp), %r9 - addq $192, %rsp + movq 232(%rsp), %rbp + movupd 216(%rsp), %xmm0 + movupd 200(%rsp), %xmm1 + movupd 184(%rsp), %xmm2 + movupd 168(%rsp), %xmm3 + movupd 152(%rsp), %xmm4 + movupd 136(%rsp), %xmm5 + movupd 120(%rsp) , %xmm6 + movupd 104(%rsp) , %xmm7 + movq 96(%rsp), %rdi + movq 88(%rsp), %rax + movq 80(%rsp), %rdx + movq 72(%rsp), %rsi + movq 64(%rsp), %rcx + movq 56(%rsp), %r8 + movq 48(%rsp), %r9 + movq 40(%rsp), %r10 + movq 32(%rsp), %r11 + movq 24(%rsp), %r12 + movq 16(%rsp), %r13 + movq 8(%rsp), %r14 + movq 0(%rsp), %r15 + addq $240, %rsp CFI_DEF_CFA_OFFSET(8) .endm @@ -90,6 +99,7 @@ .globl ASM_SYMBOL(__xray_FunctionEntry) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_FunctionEntry) +# LLVM-MCA-BEGIN __xray_FunctionEntry ASM_SYMBOL(__xray_FunctionEntry): CFI_STARTPROC SAVE_REGISTERS @@ -100,7 +110,7 @@ ASM_SYMBOL(__xray_FunctionEntry): testq %rax, %rax je .Ltmp0 - // The patched function prolog puts its xray_instr_map index into %r10d. + // The patched function prologue puts its xray_instr_map index into %r10d. movl %r10d, %edi xor %esi,%esi ALIGNED_CALL_RAX @@ -108,6 +118,7 @@ ASM_SYMBOL(__xray_FunctionEntry): .Ltmp0: RESTORE_REGISTERS retq +# LLVM-MCA-END ASM_SIZE(__xray_FunctionEntry) CFI_ENDPROC @@ -116,6 +127,7 @@ ASM_SYMBOL(__xray_FunctionEntry): .globl ASM_SYMBOL(__xray_FunctionExit) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_FunctionExit) +# LLVM-MCA-BEGIN __xray_FunctionExit ASM_SYMBOL(__xray_FunctionExit): CFI_STARTPROC // Save the important registers first. Since we're assuming that this @@ -146,6 +158,7 @@ ASM_SYMBOL(__xray_FunctionExit): addq $56, %rsp CFI_DEF_CFA_OFFSET(8) retq +# LLVM-MCA-END ASM_SIZE(__xray_FunctionExit) CFI_ENDPROC @@ -154,6 +167,7 @@ ASM_SYMBOL(__xray_FunctionExit): .globl ASM_SYMBOL(__xray_FunctionTailExit) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_FunctionTailExit) +# LLVM-MCA-BEGIN __xray_FunctionTailExit ASM_SYMBOL(__xray_FunctionTailExit): CFI_STARTPROC SAVE_REGISTERS @@ -170,6 +184,7 @@ ASM_SYMBOL(__xray_FunctionTailExit): .Ltmp4: RESTORE_REGISTERS retq +# LLVM-MCA-END ASM_SIZE(__xray_FunctionTailExit) CFI_ENDPROC @@ -178,6 +193,7 @@ ASM_SYMBOL(__xray_FunctionTailExit): .globl ASM_SYMBOL(__xray_ArgLoggerEntry) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_ArgLoggerEntry) +# LLVM-MCA-BEGIN __xray_ArgLoggerEntry ASM_SYMBOL(__xray_ArgLoggerEntry): CFI_STARTPROC SAVE_REGISTERS @@ -207,6 +223,7 @@ ASM_SYMBOL(__xray_ArgLoggerEntry): .Larg1entryFail: RESTORE_REGISTERS retq +# LLVM-MCA-END ASM_SIZE(__xray_ArgLoggerEntry) CFI_ENDPROC @@ -215,13 +232,13 @@ ASM_SYMBOL(__xray_ArgLoggerEntry): .global ASM_SYMBOL(__xray_CustomEvent) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_CustomEvent) +# LLVM-MCA-BEGIN __xray_CustomEvent ASM_SYMBOL(__xray_CustomEvent): CFI_STARTPROC SAVE_REGISTERS // We take two arguments to this trampoline, which should be in rdi and rsi - // already. We also make sure that we stash %rax because we use that register - // to call the logging handler. + // already. movq ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)(%rip), %rax testq %rax,%rax je .LcustomEventCleanup @@ -231,7 +248,35 @@ ASM_SYMBOL(__xray_CustomEvent): .LcustomEventCleanup: RESTORE_REGISTERS retq +# LLVM-MCA-END ASM_SIZE(__xray_CustomEvent) CFI_ENDPROC +//===----------------------------------------------------------------------===// + + .global ASM_SYMBOL(__xray_TypedEvent) + .align 16, 0x90 + ASM_TYPE_FUNCTION(__xray_TypedEvent) +# LLVM-MCA-BEGIN __xray_TypedEvent +ASM_SYMBOL(__xray_TypedEvent): + CFI_STARTPROC + SAVE_REGISTERS + + // We pass three arguments to this trampoline, which should be in rdi, rsi + // and rdx without our intervention. + movq ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)(%rip), %rax + testq %rax,%rax + je .LtypedEventCleanup + + ALIGNED_CALL_RAX + +.LtypedEventCleanup: + RESTORE_REGISTERS + retq +# LLVM-MCA-END + ASM_SIZE(__xray_TypedEvent) + CFI_ENDPROC + +//===----------------------------------------------------------------------===// + NO_EXEC_STACK_DIRECTIVE diff --git a/lib/xray/xray_utils.cc b/lib/xray/xray_utils.cc index cf800d3aeaf8..68f4e8c1094c 100644 --- a/lib/xray/xray_utils.cc +++ b/lib/xray/xray_utils.cc @@ -15,11 +15,11 @@ #include "sanitizer_common/sanitizer_common.h" #include "xray_defs.h" #include "xray_flags.h" -#include <stdlib.h> #include <cstdio> #include <errno.h> #include <fcntl.h> #include <iterator> +#include <stdlib.h> #include <sys/types.h> #include <tuple> #include <unistd.h> @@ -31,7 +31,7 @@ void printToStdErr(const char *Buffer) XRAY_NEVER_INSTRUMENT { fprintf(stderr, "%s", Buffer); } -void retryingWriteAll(int Fd, char *Begin, char *End) XRAY_NEVER_INSTRUMENT { +void retryingWriteAll(int Fd, const char *Begin, const char *End) XRAY_NEVER_INSTRUMENT { if (Begin == End) return; auto TotalBytes = std::distance(Begin, End); @@ -82,7 +82,7 @@ bool readValueFromFile(const char *Filename, if (!Success) return false; close(Fd); - char *End = nullptr; + const char *End = nullptr; long long Tmp = internal_simple_strtoll(Line, &End, 10); bool Result = false; if (Line[0] != '\0' && (*End == '\n' || *End == '\0')) { @@ -94,10 +94,10 @@ bool readValueFromFile(const char *Filename, int getLogFD() XRAY_NEVER_INSTRUMENT { // Open a temporary file once for the log. - static char TmpFilename[256] = {}; - static char TmpWildcardPattern[] = "XXXXXX"; - auto Argv = GetArgv(); - const char *Progname = Argv[0] == nullptr ? "(unknown)" : Argv[0]; + char TmpFilename[256] = {}; + char TmpWildcardPattern[] = "XXXXXX"; + auto **Argv = GetArgv(); + const char *Progname = !Argv ? "(unknown)" : Argv[0]; const char *LastSlash = internal_strrchr(Progname, '/'); if (LastSlash != nullptr) @@ -117,7 +117,7 @@ int getLogFD() XRAY_NEVER_INSTRUMENT { TmpFilename); return -1; } - if (__sanitizer::Verbosity()) + if (Verbosity()) Report("XRay: Log file in '%s'\n", TmpFilename); return Fd; diff --git a/lib/xray/xray_utils.h b/lib/xray/xray_utils.h index 1ecc74a2dce8..eafa16e1a9d5 100644 --- a/lib/xray/xray_utils.h +++ b/lib/xray/xray_utils.h @@ -15,6 +15,8 @@ #ifndef XRAY_UTILS_H #define XRAY_UTILS_H +#include <cstddef> +#include <cstdint> #include <sys/types.h> #include <utility> @@ -24,7 +26,7 @@ namespace __xray { void printToStdErr(const char *Buffer); // EINTR-safe write routine, provided a file descriptor and a character range. -void retryingWriteAll(int Fd, char *Begin, char *End); +void retryingWriteAll(int Fd, const char *Begin, const char *End); // Reads a long long value from a provided file. bool readValueFromFile(const char *Filename, long long *Value); @@ -36,6 +38,32 @@ std::pair<ssize_t, bool> retryingReadSome(int Fd, char *Begin, char *End); // file. int getLogFD(); +constexpr size_t gcd(size_t a, size_t b) { + return (b == 0) ? a : gcd(b, a % b); +} + +constexpr size_t lcm(size_t a, size_t b) { return a * b / gcd(a, b); } + +constexpr size_t nearest_boundary(size_t number, size_t multiple) { + return multiple * ((number / multiple) + (number % multiple ? 1 : 0)); +} + +constexpr size_t next_pow2_helper(size_t num, size_t acc) { + return (1u << acc) >= num ? (1u << acc) : next_pow2_helper(num, acc + 1); +} + +constexpr size_t next_pow2(size_t number) { + return next_pow2_helper(number, 1); +} + +template <class T> constexpr T &max(T &A, T &B) { return A > B ? A : B; } + +template <class T> constexpr T &min(T &A, T &B) { return A <= B ? A : B; } + +constexpr ptrdiff_t diff(uintptr_t A, uintptr_t B) { + return max(A, B) - min(A, B); +} + } // namespace __xray #endif // XRAY_UTILS_H diff --git a/lib/xray/xray_x86_64.cc b/lib/xray/xray_x86_64.cc index e17f00ac3a62..51dc4ce43b1c 100644 --- a/lib/xray/xray_x86_64.cc +++ b/lib/xray/xray_x86_64.cc @@ -3,6 +3,15 @@ #include "xray_defs.h" #include "xray_interface_internal.h" +#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD +#include <sys/types.h> +#if SANITIZER_OPENBSD +#include <sys/time.h> +#include <machine/cpu.h> +#endif +#include <sys/sysctl.h> +#endif + #include <atomic> #include <cstdint> #include <errno.h> @@ -14,6 +23,7 @@ namespace __xray { +#if SANITIZER_LINUX static std::pair<ssize_t, bool> retryingReadSome(int Fd, char *Begin, char *End) XRAY_NEVER_INSTRUMENT { auto BytesToRead = std::distance(Begin, End); @@ -47,7 +57,7 @@ static bool readValueFromFile(const char *Filename, close(Fd); if (!Success) return false; - char *End = nullptr; + const char *End = nullptr; long long Tmp = internal_simple_strtoll(Line, &End, 10); bool Result = false; if (Line[0] != '\0' && (*End == '\n' || *End == '\0')) { @@ -71,6 +81,31 @@ uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT { } return TSCFrequency == -1 ? 0 : static_cast<uint64_t>(TSCFrequency); } +#elif SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD +uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT { + long long TSCFrequency = -1; + size_t tscfreqsz = sizeof(TSCFrequency); +#if SANITIZER_OPENBSD + int Mib[2] = { CTL_MACHDEP, CPU_TSCFREQ }; + if (sysctl(Mib, 2, &TSCFrequency, &tscfreqsz, NULL, 0) != -1) { + +#else + if (sysctlbyname("machdep.tsc_freq", &TSCFrequency, &tscfreqsz, + NULL, 0) != -1) { +#endif + return static_cast<uint64_t>(TSCFrequency); + } else { + Report("Unable to determine CPU frequency for TSC accounting.\n"); + } + + return 0; +} +#else +uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT { + /* Not supported */ + return 0; +} +#endif static constexpr uint8_t CallOpCode = 0xe8; static constexpr uint16_t MovR10Seq = 0xba41; @@ -184,8 +219,8 @@ bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId, reinterpret_cast<int64_t>(__xray_FunctionTailExit) - (static_cast<int64_t>(Sled.Address) + 11); if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) { - Report("XRay Exit trampoline (%p) too far from sled (%p)\n", - __xray_FunctionExit, reinterpret_cast<void *>(Sled.Address)); + Report("XRay Tail Exit trampoline (%p) too far from sled (%p)\n", + __xray_FunctionTailExit, reinterpret_cast<void *>(Sled.Address)); return false; } if (Enable) { @@ -251,6 +286,37 @@ bool patchCustomEvent(const bool Enable, const uint32_t FuncId, return false; } +bool patchTypedEvent(const bool Enable, const uint32_t FuncId, + const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { + // Here we do the dance of replacing the following sled: + // + // xray_sled_n: + // jmp +20 // 2 byte instruction + // ... + // + // With the following: + // + // nopw // 2 bytes + // ... + // + // + // The "unpatch" should just turn the 'nopw' back to a 'jmp +20'. + // The 20 byte sled stashes three argument registers, calls the trampoline, + // unstashes the registers and returns. If the arguments are already in + // the correct registers, the stashing and unstashing become equivalently + // sized nops. + if (Enable) { + std::atomic_store_explicit( + reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), NopwSeq, + std::memory_order_release); + } else { + std::atomic_store_explicit( + reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp20Seq, + std::memory_order_release); + } + return false; +} + // We determine whether the CPU we're running on has the correct features we // need. In x86_64 this will be rdtscp support. bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { @@ -259,7 +325,8 @@ bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { // We check whether rdtscp support is enabled. According to the x86_64 manual, // level should be set at 0x80000001, and we should have a look at bit 27 in // EDX. That's 0x8000000 (or 1u << 27). - __get_cpuid(0x80000001, &EAX, &EBX, &ECX, &EDX); + __asm__ __volatile__("cpuid" : "=a"(EAX), "=b"(EBX), "=c"(ECX), "=d"(EDX) + : "0"(0x80000001)); if (!(EDX & (1u << 27))) { Report("Missing rdtscp support.\n"); return false; diff --git a/lib/xray/xray_x86_64.inc b/lib/xray/xray_x86_64.inc index 4ad3f9810946..b3c475f9110c 100644 --- a/lib/xray/xray_x86_64.inc +++ b/lib/xray/xray_x86_64.inc @@ -21,9 +21,10 @@ namespace __xray { ALWAYS_INLINE uint64_t readTSC(uint8_t &CPU) XRAY_NEVER_INSTRUMENT { unsigned LongCPU; - uint64_t TSC = __rdtscp(&LongCPU); + unsigned long Rax, Rdx; + __asm__ __volatile__("rdtscp\n" : "=a"(Rax), "=d"(Rdx), "=c"(LongCPU) ::); CPU = LongCPU; - return TSC; + return (Rdx << 32) + Rax; } uint64_t getTSCFrequency(); |