diff options
Diffstat (limited to 'lib/sanitizer_common/sanitizer_fuchsia.cc')
-rw-r--r-- | lib/sanitizer_common/sanitizer_fuchsia.cc | 163 |
1 files changed, 69 insertions, 94 deletions
diff --git a/lib/sanitizer_common/sanitizer_fuchsia.cc b/lib/sanitizer_common/sanitizer_fuchsia.cc index 936ec794b8e8..391620690f30 100644 --- a/lib/sanitizer_common/sanitizer_fuchsia.cc +++ b/lib/sanitizer_common/sanitizer_fuchsia.cc @@ -1,16 +1,16 @@ -//===-- sanitizer_fuchsia.cc ---------------------------------------------===// +//===-- sanitizer_fuchsia.cc ----------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // -//===---------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// // // This file is shared between AddressSanitizer and other sanitizer // run-time libraries and implements Fuchsia-specific functions from // sanitizer_common.h. -//===---------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// #include "sanitizer_fuchsia.h" #if SANITIZER_FUCHSIA @@ -18,19 +18,20 @@ #include "sanitizer_common.h" #include "sanitizer_libc.h" #include "sanitizer_mutex.h" -#include "sanitizer_stacktrace.h" #include <limits.h> #include <pthread.h> #include <stdlib.h> #include <unistd.h> -#include <unwind.h> #include <zircon/errors.h> #include <zircon/process.h> #include <zircon/syscalls.h> namespace __sanitizer { +// TODO(phosek): remove this and replace it with ZX_TIME_INFINITE +#define ZX_TIME_INFINITE_OLD INT64_MAX + void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); } uptr internal_sched_yield() { @@ -49,9 +50,9 @@ unsigned int internal_sleep(unsigned int seconds) { return 0; } -u64 NanoTime() { return _zx_time_get(ZX_CLOCK_UTC); } +u64 NanoTime() { return _zx_clock_get(ZX_CLOCK_UTC); } -u64 MonotonicNanoTime() { return _zx_time_get(ZX_CLOCK_MONOTONIC); } +u64 MonotonicNanoTime() { return _zx_clock_get(ZX_CLOCK_MONOTONIC); } uptr internal_getpid() { zx_info_handle_basic_t info; @@ -66,7 +67,7 @@ uptr internal_getpid() { uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); } -uptr GetTid() { return GetThreadSelf(); } +tid_t GetTid() { return GetThreadSelf(); } void Abort() { abort(); } @@ -89,13 +90,10 @@ void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) { } void MaybeReexec() {} -void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {} +void CheckASLR() {} +void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {} void DisableCoreDumperIfNecessary() {} void InstallDeadlySignalHandlers(SignalHandlerType handler) {} -void StartReportDeadlySignal() {} -void ReportDeadlySignal(const SignalContext &sig, u32 tid, - UnwindSignalStackCallbackType unwind, - const void *unwind_context) {} void SetAlternateSignalStack() {} void UnsetAlternateSignalStack() {} void InitTlsSize() {} @@ -106,42 +104,6 @@ bool SignalContext::IsStackOverflow() const { return false; } void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); } const char *SignalContext::Describe() const { UNIMPLEMENTED(); } -struct UnwindTraceArg { - BufferedStackTrace *stack; - u32 max_depth; -}; - -_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) { - UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param); - CHECK_LT(arg->stack->size, arg->max_depth); - uptr pc = _Unwind_GetIP(ctx); - if (pc < PAGE_SIZE) return _URC_NORMAL_STOP; - arg->stack->trace_buffer[arg->stack->size++] = pc; - return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP - : _URC_NO_REASON); -} - -void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) { - CHECK_GE(max_depth, 2); - size = 0; - UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)}; - _Unwind_Backtrace(Unwind_Trace, &arg); - CHECK_GT(size, 0); - // We need to pop a few frames so that pc is on top. - uptr to_pop = LocatePcInTrace(pc); - // trace_buffer[0] belongs to the current function so we always pop it, - // unless there is only 1 frame in the stack trace (1 frame is always better - // than 0!). - PopStackFrames(Min(to_pop, static_cast<uptr>(1))); - trace_buffer[0] = pc; -} - -void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context, - u32 max_depth) { - CHECK_NE(context, nullptr); - UNREACHABLE("signal context doesn't exist"); -} - enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 }; BlockingMutex::BlockingMutex() { @@ -161,7 +123,7 @@ void BlockingMutex::Lock() { return; while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) { zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), - MtxSleeping, ZX_TIME_INFINITE); + MtxSleeping, ZX_TIME_INFINITE_OLD); if (status != ZX_ERR_BAD_STATE) // Normal race. CHECK_EQ(status, ZX_OK); } @@ -212,8 +174,9 @@ static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type, // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that? uintptr_t addr; - status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, size, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); + status = + _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, size, + ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); _zx_handle_close(vmo); if (status != ZX_OK) { @@ -247,10 +210,10 @@ uptr ReservedAddressRange::Init(uptr init_size, const char *name, uintptr_t base; zx_handle_t vmar; zx_status_t status = - _zx_vmar_allocate(_zx_vmar_root_self(), 0, init_size, - ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE | - ZX_VM_FLAG_CAN_MAP_SPECIFIC, - &vmar, &base); + _zx_vmar_allocate_old(_zx_vmar_root_self(), 0, init_size, + ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE | + ZX_VM_FLAG_CAN_MAP_SPECIFIC, + &vmar, &base); if (status != ZX_OK) ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status); base_ = reinterpret_cast<void *>(base); @@ -272,11 +235,11 @@ static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size, ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status); return 0; } - _zx_object_set_property(vmo, ZX_PROP_NAME, name, sizeof(name) - 1); + _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name)); DCHECK_GE(base + size_, map_size + offset); uintptr_t addr; - status = _zx_vmar_map( + status = _zx_vmar_map_old( vmar, offset, vmo, 0, map_size, ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | ZX_VM_FLAG_SPECIFIC, &addr); @@ -316,20 +279,16 @@ void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) { DecreaseTotalMmap(size); } -void ReservedAddressRange::Unmap(uptr fixed_addr, uptr size) { - uptr offset = fixed_addr - reinterpret_cast<uptr>(base_); - uptr addr = reinterpret_cast<uptr>(base_) + offset; - void *addr_as_void = reinterpret_cast<void *>(addr); - uptr base_as_uptr = reinterpret_cast<uptr>(base_); - // Only unmap at the beginning or end of the range. - CHECK((addr_as_void == base_) || (addr + size == base_as_uptr + size_)); +void ReservedAddressRange::Unmap(uptr addr, uptr size) { CHECK_LE(size, size_); + if (addr == reinterpret_cast<uptr>(base_)) + // If we unmap the whole range, just null out the base. + base_ = (size == size_) ? nullptr : reinterpret_cast<void*>(addr + size); + else + CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_); + size_ -= size; UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, static_cast<zx_handle_t>(os_handle_)); - if (addr_as_void == base_) { - base_ = reinterpret_cast<void *>(addr + size); - } - size_ = size_ - size; } // This should never be called. @@ -361,8 +320,9 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, // beginning of the VMO, and unmap the excess before and after. size_t map_size = size + alignment; uintptr_t addr; - status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); + status = + _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, map_size, + ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); if (status == ZX_OK) { uintptr_t map_addr = addr; uintptr_t map_end = map_addr + map_size; @@ -374,11 +334,11 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, sizeof(info), NULL, NULL); if (status == ZX_OK) { uintptr_t new_addr; - status = - _zx_vmar_map(_zx_vmar_root_self(), addr - info.base, vmo, 0, size, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | - ZX_VM_FLAG_SPECIFIC_OVERWRITE, - &new_addr); + status = _zx_vmar_map_old(_zx_vmar_root_self(), addr - info.base, vmo, + 0, size, + ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | + ZX_VM_FLAG_SPECIFIC_OVERWRITE, + &new_addr); if (status == ZX_OK) CHECK_EQ(new_addr, addr); } } @@ -418,16 +378,7 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) { zx_handle_t vmo; zx_status_t status = _zx_vmo_create(size, 0, &vmo); if (status == ZX_OK) { - while (size > 0) { - size_t wrote; - status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size, - &wrote); - if (status != ZX_OK) break; - CHECK_GT(wrote, 0); - CHECK_LE(wrote, size); - beg += wrote; - size -= wrote; - } + status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size); _zx_handle_close(vmo); } return status == ZX_OK; @@ -447,8 +398,8 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, if (vmo_size < max_len) max_len = vmo_size; size_t map_size = RoundUpTo(max_len, PAGE_SIZE); uintptr_t addr; - status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size, - ZX_VM_FLAG_PERM_READ, &addr); + status = _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, map_size, + ZX_VM_FLAG_PERM_READ, &addr); if (status == ZX_OK) { *buff = reinterpret_cast<char *>(addr); *buff_size = map_size; @@ -462,7 +413,31 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, } void RawWrite(const char *buffer) { - __sanitizer_log_write(buffer, internal_strlen(buffer)); + constexpr size_t size = 128; + static _Thread_local char line[size]; + static _Thread_local size_t lastLineEnd = 0; + static _Thread_local size_t cur = 0; + + while (*buffer) { + if (cur >= size) { + if (lastLineEnd == 0) + lastLineEnd = size; + __sanitizer_log_write(line, lastLineEnd); + internal_memmove(line, line + lastLineEnd, cur - lastLineEnd); + cur = cur - lastLineEnd; + lastLineEnd = 0; + } + if (*buffer == '\n') + lastLineEnd = cur + 1; + line[cur++] = *buffer++; + } + // Flush all complete lines before returning. + if (lastLineEnd != 0) { + __sanitizer_log_write(line, lastLineEnd); + internal_memmove(line, line + lastLineEnd, cur - lastLineEnd); + cur = cur - lastLineEnd; + lastLineEnd = 0; + } } void CatastrophicErrorWrite(const char *buffer, uptr length) { @@ -486,8 +461,10 @@ const char *GetEnv(const char *name) { } uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) { - const char *argv0 = StoredArgv[0]; - if (!argv0) argv0 = "<UNKNOWN>"; + const char *argv0 = "<UNKNOWN>"; + if (StoredArgv && StoredArgv[0]) { + argv0 = StoredArgv[0]; + } internal_strncpy(buf, argv0, buf_len); return internal_strlen(buf); } @@ -500,9 +477,7 @@ uptr MainThreadStackBase, MainThreadStackSize; bool GetRandom(void *buffer, uptr length, bool blocking) { CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN); - size_t size; - CHECK_EQ(_zx_cprng_draw(buffer, length, &size), ZX_OK); - CHECK_EQ(size, length); + _zx_cprng_draw(buffer, length); return true; } |