diff options
Diffstat (limited to 'lib/xray/xray_interface.cc')
| -rw-r--r-- | lib/xray/xray_interface.cc | 131 | 
1 files changed, 89 insertions, 42 deletions
diff --git a/lib/xray/xray_interface.cc b/lib/xray/xray_interface.cc index 766313e85c584..01bf6ddc607e9 100644 --- a/lib/xray/xray_interface.cc +++ b/lib/xray/xray_interface.cc @@ -19,9 +19,12 @@  #include <cstdio>  #include <errno.h>  #include <limits> +#include <string.h>  #include <sys/mman.h> +#include "sanitizer_common/sanitizer_addrhashmap.h"  #include "sanitizer_common/sanitizer_common.h" +  #include "xray_defs.h"  #include "xray_flags.h" @@ -48,26 +51,40 @@ static const int16_t cSledLength = 8;  #endif /* CPU architecture */  // This is the function to call when we encounter the entry or exit sleds. -__sanitizer::atomic_uintptr_t XRayPatchedFunction{0}; +atomic_uintptr_t XRayPatchedFunction{0};  // This is the function to call from the arg1-enabled sleds/trampolines. -__sanitizer::atomic_uintptr_t XRayArgLogger{0}; +atomic_uintptr_t XRayArgLogger{0};  // This is the function to call when we encounter a custom event log call. -__sanitizer::atomic_uintptr_t XRayPatchedCustomEvent{0}; +atomic_uintptr_t XRayPatchedCustomEvent{0}; + +// This is the function to call when we encounter a typed event log call. +atomic_uintptr_t XRayPatchedTypedEvent{0};  // This is the global status to determine whether we are currently  // patching/unpatching. -__sanitizer::atomic_uint8_t XRayPatching{0}; +atomic_uint8_t XRayPatching{0}; + +struct TypeDescription { +  uint32_t type_id; +  std::size_t description_string_length; +}; + +using TypeDescriptorMapType = AddrHashMap<TypeDescription, 11>; +// An address map from immutable descriptors to type ids. +TypeDescriptorMapType TypeDescriptorAddressMap{}; -// MProtectHelper is an RAII wrapper for calls to mprotect(...) that will undo -// any successful mprotect(...) changes. This is used to make a page writeable -// and executable, and upon destruction if it was successful in doing so returns -// the page into a read-only and executable page. +atomic_uint32_t TypeEventDescriptorCounter{0}; + +// MProtectHelper is an RAII wrapper for calls to mprotect(...) that will +// undo any successful mprotect(...) changes. This is used to make a page +// writeable and executable, and upon destruction if it was successful in +// doing so returns the page into a read-only and executable page.  //  // This is only used specifically for runtime-patching of the XRay -// instrumentation points. This assumes that the executable pages are originally -// read-and-execute only. +// instrumentation points. This assumes that the executable pages are +// originally read-and-execute only.  class MProtectHelper {    void *PageAlignedAddr;    std::size_t MProtectLen; @@ -116,6 +133,9 @@ bool patchSled(const XRaySledEntry &Sled, bool Enable,    case XRayEntryType::CUSTOM_EVENT:      Success = patchCustomEvent(Enable, FuncId, Sled);      break; +  case XRayEntryType::TYPED_EVENT: +    Success = patchTypedEvent(Enable, FuncId, Sled); +    break;    default:      Report("Unsupported sled kind '%d' @%04x\n", Sled.Address, int(Sled.Kind));      return false; @@ -125,19 +145,19 @@ bool patchSled(const XRaySledEntry &Sled, bool Enable,  XRayPatchingStatus patchFunction(int32_t FuncId,                                   bool Enable) XRAY_NEVER_INSTRUMENT { -  if (!__sanitizer::atomic_load(&XRayInitialized, -                                __sanitizer::memory_order_acquire)) +  if (!atomic_load(&XRayInitialized, +                                memory_order_acquire))      return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.    uint8_t NotPatching = false; -  if (!__sanitizer::atomic_compare_exchange_strong( -          &XRayPatching, &NotPatching, true, __sanitizer::memory_order_acq_rel)) +  if (!atomic_compare_exchange_strong( +          &XRayPatching, &NotPatching, true, memory_order_acq_rel))      return XRayPatchingStatus::ONGOING; // Already patching.    // Next, we look for the function index.    XRaySledMap InstrMap;    { -    __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex); +    SpinMutexLock Guard(&XRayInstrMapMutex);      InstrMap = XRayInstrMap;    } @@ -161,8 +181,8 @@ XRayPatchingStatus patchFunction(int32_t FuncId,    while (f != e)      SucceedOnce |= patchSled(*f++, Enable, FuncId); -  __sanitizer::atomic_store(&XRayPatching, false, -                            __sanitizer::memory_order_release); +  atomic_store(&XRayPatching, false, +                            memory_order_release);    if (!SucceedOnce) {      Report("Failed patching any sled for function '%d'.", FuncId); @@ -176,26 +196,26 @@ XRayPatchingStatus patchFunction(int32_t FuncId,  // implementation. |Enable| defines whether we're enabling or disabling the  // runtime XRay instrumentation.  XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT { -  if (!__sanitizer::atomic_load(&XRayInitialized, -                                __sanitizer::memory_order_acquire)) +  if (!atomic_load(&XRayInitialized, +                                memory_order_acquire))      return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.    uint8_t NotPatching = false; -  if (!__sanitizer::atomic_compare_exchange_strong( -          &XRayPatching, &NotPatching, true, __sanitizer::memory_order_acq_rel)) +  if (!atomic_compare_exchange_strong( +          &XRayPatching, &NotPatching, true, memory_order_acq_rel))      return XRayPatchingStatus::ONGOING; // Already patching.    uint8_t PatchingSuccess = false;    auto XRayPatchingStatusResetter = -      __sanitizer::at_scope_exit([&PatchingSuccess] { +      at_scope_exit([&PatchingSuccess] {          if (!PatchingSuccess) -          __sanitizer::atomic_store(&XRayPatching, false, -                                    __sanitizer::memory_order_release); +          atomic_store(&XRayPatching, false, +                                    memory_order_release);        });    XRaySledMap InstrMap;    { -    __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex); +    SpinMutexLock Guard(&XRayInstrMapMutex);      InstrMap = XRayInstrMap;    }    if (InstrMap.Entries == 0) @@ -251,8 +271,8 @@ XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {      }      patchSled(Sled, Enable, FuncId);    } -  __sanitizer::atomic_store(&XRayPatching, false, -                            __sanitizer::memory_order_release); +  atomic_store(&XRayPatching, false, +                            memory_order_release);    PatchingSuccess = true;    return XRayPatchingStatus::SUCCESS;  } @@ -261,7 +281,7 @@ XRayPatchingStatus mprotectAndPatchFunction(int32_t FuncId,                                              bool Enable) XRAY_NEVER_INSTRUMENT {    XRaySledMap InstrMap;    { -    __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex); +    SpinMutexLock Guard(&XRayInstrMapMutex);      InstrMap = XRayInstrMap;    } @@ -318,12 +338,12 @@ using namespace __xray;  int __xray_set_handler(void (*entry)(int32_t,                                       XRayEntryType)) XRAY_NEVER_INSTRUMENT { -  if (__sanitizer::atomic_load(&XRayInitialized, -                               __sanitizer::memory_order_acquire)) { +  if (atomic_load(&XRayInitialized, +                               memory_order_acquire)) { -    __sanitizer::atomic_store(&__xray::XRayPatchedFunction, +    atomic_store(&__xray::XRayPatchedFunction,                                reinterpret_cast<uintptr_t>(entry), -                              __sanitizer::memory_order_release); +                              memory_order_release);      return 1;    }    return 0; @@ -331,11 +351,23 @@ int __xray_set_handler(void (*entry)(int32_t,  int __xray_set_customevent_handler(void (*entry)(void *, size_t))      XRAY_NEVER_INSTRUMENT { -  if (__sanitizer::atomic_load(&XRayInitialized, -                               __sanitizer::memory_order_acquire)) { -    __sanitizer::atomic_store(&__xray::XRayPatchedCustomEvent, +  if (atomic_load(&XRayInitialized, +                               memory_order_acquire)) { +    atomic_store(&__xray::XRayPatchedCustomEvent, +                              reinterpret_cast<uintptr_t>(entry), +                              memory_order_release); +    return 1; +  } +  return 0; +} + +int __xray_set_typedevent_handler(void (*entry)( +    uint16_t, const void *, size_t)) XRAY_NEVER_INSTRUMENT { +  if (atomic_load(&XRayInitialized, +                               memory_order_acquire)) { +    atomic_store(&__xray::XRayPatchedTypedEvent,                                reinterpret_cast<uintptr_t>(entry), -                              __sanitizer::memory_order_release); +                              memory_order_release);      return 1;    }    return 0; @@ -349,6 +381,21 @@ int __xray_remove_customevent_handler() XRAY_NEVER_INSTRUMENT {    return __xray_set_customevent_handler(nullptr);  } +int __xray_remove_typedevent_handler() XRAY_NEVER_INSTRUMENT { +  return __xray_set_typedevent_handler(nullptr); +} + +uint16_t __xray_register_event_type( +    const char *const event_type) XRAY_NEVER_INSTRUMENT { +  TypeDescriptorMapType::Handle h(&TypeDescriptorAddressMap, (uptr)event_type); +  if (h.created()) { +    h->type_id = atomic_fetch_add( +        &TypeEventDescriptorCounter, 1, memory_order_acq_rel); +    h->description_string_length = strnlen(event_type, 1024); +  } +  return h->type_id; +} +  XRayPatchingStatus __xray_patch() XRAY_NEVER_INSTRUMENT {    return controlPatching(true);  } @@ -367,22 +414,22 @@ __xray_unpatch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {  }  int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, uint64_t)) { -  if (!__sanitizer::atomic_load(&XRayInitialized, -                                __sanitizer::memory_order_acquire)) +  if (!atomic_load(&XRayInitialized, +                                memory_order_acquire))      return 0;    // A relaxed write might not be visible even if the current thread gets    // scheduled on a different CPU/NUMA node.  We need to wait for everyone to    // have this handler installed for consistency of collected data across CPUs. -  __sanitizer::atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry), -                            __sanitizer::memory_order_release); +  atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry), +                            memory_order_release);    return 1;  }  int __xray_remove_handler_arg1() { return __xray_set_handler_arg1(nullptr); }  uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT { -  __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex); +  SpinMutexLock Guard(&XRayInstrMapMutex);    if (FuncId <= 0 || static_cast<size_t>(FuncId) > XRayInstrMap.Functions)      return 0;    return XRayInstrMap.SledsIndex[FuncId - 1].Begin->Function @@ -396,6 +443,6 @@ uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT {  }  size_t __xray_max_function_id() XRAY_NEVER_INSTRUMENT { -  __sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex); +  SpinMutexLock Guard(&XRayInstrMapMutex);    return XRayInstrMap.Functions;  }  | 
