summaryrefslogtreecommitdiff
path: root/lib/interception
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2016-07-23 20:45:36 +0000
committerDimitry Andric <dim@FreeBSD.org>2016-07-23 20:45:36 +0000
commit6f08730ec5f639f05f2f15354171e4a3c9af9dc1 (patch)
tree7374e9d4448083010ada98d17976199c7e945d47 /lib/interception
parentc003a57e2e4a1ad9be0338806bc1038b6987155f (diff)
Notes
Diffstat (limited to 'lib/interception')
-rw-r--r--lib/interception/CMakeLists.txt6
-rw-r--r--lib/interception/Makefile.mk23
-rw-r--r--lib/interception/interception_win.cc891
-rw-r--r--lib/interception/interception_win.h30
-rw-r--r--lib/interception/tests/CMakeLists.txt142
-rw-r--r--lib/interception/tests/interception_linux_test.cc65
-rw-r--r--lib/interception/tests/interception_test_main.cc22
-rw-r--r--lib/interception/tests/interception_win_test.cc592
8 files changed, 1621 insertions, 150 deletions
diff --git a/lib/interception/CMakeLists.txt b/lib/interception/CMakeLists.txt
index 16b41c976d6bc..18d25948105d7 100644
--- a/lib/interception/CMakeLists.txt
+++ b/lib/interception/CMakeLists.txt
@@ -10,10 +10,14 @@ set(INTERCEPTION_SOURCES
include_directories(..)
set(INTERCEPTION_CFLAGS ${SANITIZER_COMMON_CFLAGS})
-append_no_rtti_flag(INTERCEPTION_CFLAGS)
+append_rtti_flag(OFF INTERCEPTION_CFLAGS)
add_compiler_rt_object_libraries(RTInterception
OS ${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${SANITIZER_COMMON_SUPPORTED_ARCH}
SOURCES ${INTERCEPTION_SOURCES}
CFLAGS ${INTERCEPTION_CFLAGS})
+
+if(COMPILER_RT_INCLUDE_TESTS)
+ add_subdirectory(tests)
+endif()
diff --git a/lib/interception/Makefile.mk b/lib/interception/Makefile.mk
deleted file mode 100644
index 88aa6cbc26d1f..0000000000000
--- a/lib/interception/Makefile.mk
+++ /dev/null
@@ -1,23 +0,0 @@
-#===- lib/interception/Makefile.mk -------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := interception
-SubDirs :=
-
-Sources := $(foreach file,$(wildcard $(Dir)/*.cc),$(notdir $(file)))
-ObjNames := $(Sources:%.cc=%.o)
-
-Implementation := Generic
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard $(Dir)/*.h)
-Dependencies += $(wildcard $(Dir)/../sanitizer_common/*.h)
-
-# Define a convenience variable for all the interception functions.
-InterceptionFunctions := $(Sources:%.cc=%)
diff --git a/lib/interception/interception_win.cc b/lib/interception/interception_win.cc
index 4c04c83b982b6..8977d59ac4f17 100644
--- a/lib/interception/interception_win.cc
+++ b/lib/interception/interception_win.cc
@@ -10,16 +10,160 @@
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Windows-specific interception methods.
+//
+// This file is implementing several hooking techniques to intercept calls
+// to functions. The hooks are dynamically installed by modifying the assembly
+// code.
+//
+// The hooking techniques are making assumptions on the way the code is
+// generated and are safe under these assumptions.
+//
+// On 64-bit architecture, there is no direct 64-bit jump instruction. To allow
+// arbitrary branching on the whole memory space, the notion of trampoline
+// region is used. A trampoline region is a memory space withing 2G boundary
+// where it is safe to add custom assembly code to build 64-bit jumps.
+//
+// Hooking techniques
+// ==================
+//
+// 1) Detour
+//
+// The Detour hooking technique is assuming the presence of an header with
+// padding and an overridable 2-bytes nop instruction (mov edi, edi). The
+// nop instruction can safely be replaced by a 2-bytes jump without any need
+// to save the instruction. A jump to the target is encoded in the function
+// header and the nop instruction is replaced by a short jump to the header.
+//
+// head: 5 x nop head: jmp <hook>
+// func: mov edi, edi --> func: jmp short <head>
+// [...] real: [...]
+//
+// This technique is only implemented on 32-bit architecture.
+// Most of the time, Windows API are hookable with the detour technique.
+//
+// 2) Redirect Jump
+//
+// The redirect jump is applicable when the first instruction is a direct
+// jump. The instruction is replaced by jump to the hook.
+//
+// func: jmp <label> --> func: jmp <hook>
+//
+// On an 64-bit architecture, a trampoline is inserted.
+//
+// func: jmp <label> --> func: jmp <tramp>
+// [...]
+//
+// [trampoline]
+// tramp: jmp QWORD [addr]
+// addr: .bytes <hook>
+//
+// Note: <real> is equilavent to <label>.
+//
+// 3) HotPatch
+//
+// The HotPatch hooking is assuming the presence of an header with padding
+// and a first instruction with at least 2-bytes.
+//
+// The reason to enforce the 2-bytes limitation is to provide the minimal
+// space to encode a short jump. HotPatch technique is only rewriting one
+// instruction to avoid breaking a sequence of instructions containing a
+// branching target.
+//
+// Assumptions are enforced by MSVC compiler by using the /HOTPATCH flag.
+// see: https://msdn.microsoft.com/en-us/library/ms173507.aspx
+// Default padding length is 5 bytes in 32-bits and 6 bytes in 64-bits.
+//
+// head: 5 x nop head: jmp <hook>
+// func: <instr> --> func: jmp short <head>
+// [...] body: [...]
+//
+// [trampoline]
+// real: <instr>
+// jmp <body>
+//
+// On an 64-bit architecture:
+//
+// head: 6 x nop head: jmp QWORD [addr1]
+// func: <instr> --> func: jmp short <head>
+// [...] body: [...]
+//
+// [trampoline]
+// addr1: .bytes <hook>
+// real: <instr>
+// jmp QWORD [addr2]
+// addr2: .bytes <body>
+//
+// 4) Trampoline
+//
+// The Trampoline hooking technique is the most aggressive one. It is
+// assuming that there is a sequence of instructions that can be safely
+// replaced by a jump (enough room and no incoming branches).
+//
+// Unfortunately, these assumptions can't be safely presumed and code may
+// be broken after hooking.
+//
+// func: <instr> --> func: jmp <hook>
+// <instr>
+// [...] body: [...]
+//
+// [trampoline]
+// real: <instr>
+// <instr>
+// jmp <body>
+//
+// On an 64-bit architecture:
+//
+// func: <instr> --> func: jmp QWORD [addr1]
+// <instr>
+// [...] body: [...]
+//
+// [trampoline]
+// addr1: .bytes <hook>
+// real: <instr>
+// <instr>
+// jmp QWORD [addr2]
+// addr2: .bytes <body>
//===----------------------------------------------------------------------===//
#ifdef _WIN32
#include "interception.h"
+#include "sanitizer_common/sanitizer_platform.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
namespace __interception {
+static const int kAddressLength = FIRST_32_SECOND_64(4, 8);
+static const int kJumpInstructionLength = 5;
+static const int kShortJumpInstructionLength = 2;
+static const int kIndirectJumpInstructionLength = 6;
+static const int kBranchLength =
+ FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength);
+static const int kDirectBranchLength = kBranchLength + kAddressLength;
+
+static void InterceptionFailed() {
+ // Do we have a good way to abort with an error message here?
+ __debugbreak();
+}
+
+static bool DistanceIsWithin2Gig(uptr from, uptr target) {
+ if (from < target)
+ return target - from <= (uptr)0x7FFFFFFFU;
+ else
+ return from - target <= (uptr)0x80000000U;
+}
+
+static uptr GetMmapGranularity() {
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwAllocationGranularity;
+}
+
+static uptr RoundUpTo(uptr size, uptr boundary) {
+ return (size + boundary - 1) & ~(boundary - 1);
+}
+
// FIXME: internal_str* and internal_mem* functions should be moved from the
// ASan sources into interception/.
@@ -35,163 +179,593 @@ static void _memcpy(void *dst, void *src, size_t sz) {
dst_c[i] = src_c[i];
}
-static void WriteJumpInstruction(char *jmp_from, char *to) {
- // jmp XXYYZZWW = E9 WW ZZ YY XX, where XXYYZZWW is an offset fromt jmp_from
- // to the next instruction to the destination.
- ptrdiff_t offset = to - jmp_from - 5;
- *jmp_from = '\xE9';
- *(ptrdiff_t*)(jmp_from + 1) = offset;
-}
-
-static char *GetMemoryForTrampoline(size_t size) {
- // Trampolines are allocated from a common pool.
- const int POOL_SIZE = 1024;
- static char *pool = NULL;
- static size_t pool_used = 0;
- if (!pool) {
- pool = (char *)VirtualAlloc(NULL, POOL_SIZE, MEM_RESERVE | MEM_COMMIT,
- PAGE_EXECUTE_READWRITE);
- // FIXME: Might want to apply PAGE_EXECUTE_READ access after all the
- // interceptors are in place.
- if (!pool)
- return NULL;
- _memset(pool, 0xCC /* int 3 */, POOL_SIZE);
+static bool ChangeMemoryProtection(
+ uptr address, uptr size, DWORD *old_protection) {
+ return ::VirtualProtect((void*)address, size,
+ PAGE_EXECUTE_READWRITE,
+ old_protection) != FALSE;
+}
+
+static bool RestoreMemoryProtection(
+ uptr address, uptr size, DWORD old_protection) {
+ DWORD unused;
+ return ::VirtualProtect((void*)address, size,
+ old_protection,
+ &unused) != FALSE;
+}
+
+static bool IsMemoryPadding(uptr address, uptr size) {
+ u8* function = (u8*)address;
+ for (size_t i = 0; i < size; ++i)
+ if (function[i] != 0x90 && function[i] != 0xCC)
+ return false;
+ return true;
+}
+
+static const u8 kHintNop10Bytes[] = {
+ 0x66, 0x66, 0x0F, 0x1F, 0x84,
+ 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+template<class T>
+static bool FunctionHasPrefix(uptr address, const T &pattern) {
+ u8* function = (u8*)address - sizeof(pattern);
+ for (size_t i = 0; i < sizeof(pattern); ++i)
+ if (function[i] != pattern[i])
+ return false;
+ return true;
+}
+
+static bool FunctionHasPadding(uptr address, uptr size) {
+ if (IsMemoryPadding(address - size, size))
+ return true;
+ if (size <= sizeof(kHintNop10Bytes) &&
+ FunctionHasPrefix(address, kHintNop10Bytes))
+ return true;
+ return false;
+}
+
+static void WritePadding(uptr from, uptr size) {
+ _memset((void*)from, 0xCC, (size_t)size);
+}
+
+static void CopyInstructions(uptr from, uptr to, uptr size) {
+ _memcpy((void*)from, (void*)to, (size_t)size);
+}
+
+static void WriteJumpInstruction(uptr from, uptr target) {
+ if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target))
+ InterceptionFailed();
+ ptrdiff_t offset = target - from - kJumpInstructionLength;
+ *(u8*)from = 0xE9;
+ *(u32*)(from + 1) = offset;
+}
+
+static void WriteShortJumpInstruction(uptr from, uptr target) {
+ sptr offset = target - from - kShortJumpInstructionLength;
+ if (offset < -128 || offset > 127)
+ InterceptionFailed();
+ *(u8*)from = 0xEB;
+ *(u8*)(from + 1) = (u8)offset;
+}
+
+#if SANITIZER_WINDOWS64
+static void WriteIndirectJumpInstruction(uptr from, uptr indirect_target) {
+ // jmp [rip + <offset>] = FF 25 <offset> where <offset> is a relative
+ // offset.
+ // The offset is the distance from then end of the jump instruction to the
+ // memory location containing the targeted address. The displacement is still
+ // 32-bit in x64, so indirect_target must be located within +/- 2GB range.
+ int offset = indirect_target - from - kIndirectJumpInstructionLength;
+ if (!DistanceIsWithin2Gig(from + kIndirectJumpInstructionLength,
+ indirect_target)) {
+ InterceptionFailed();
}
+ *(u16*)from = 0x25FF;
+ *(u32*)(from + 2) = offset;
+}
+#endif
- if (pool_used + size > POOL_SIZE)
- return NULL;
+static void WriteBranch(
+ uptr from, uptr indirect_target, uptr target) {
+#if SANITIZER_WINDOWS64
+ WriteIndirectJumpInstruction(from, indirect_target);
+ *(u64*)indirect_target = target;
+#else
+ (void)indirect_target;
+ WriteJumpInstruction(from, target);
+#endif
+}
- char *ret = pool + pool_used;
- pool_used += size;
- return ret;
+static void WriteDirectBranch(uptr from, uptr target) {
+#if SANITIZER_WINDOWS64
+ // Emit an indirect jump through immediately following bytes:
+ // jmp [rip + kBranchLength]
+ // .quad <target>
+ WriteBranch(from, from + kBranchLength, target);
+#else
+ WriteJumpInstruction(from, target);
+#endif
}
-// Returns 0 on error.
-static size_t RoundUpToInstrBoundary(size_t size, char *code) {
- size_t cursor = 0;
- while (cursor < size) {
- switch (code[cursor]) {
- case '\x51': // push ecx
- case '\x52': // push edx
- case '\x53': // push ebx
- case '\x54': // push esp
- case '\x55': // push ebp
- case '\x56': // push esi
- case '\x57': // push edi
- case '\x5D': // pop ebp
- cursor++;
- continue;
- case '\x6A': // 6A XX = push XX
- cursor += 2;
- continue;
- case '\xE9': // E9 XX YY ZZ WW = jmp WWZZYYXX
- case '\xB8': // B8 XX YY ZZ WW = mov eax, WWZZYYXX
- cursor += 5;
- continue;
- }
- switch (*(unsigned short*)(code + cursor)) { // NOLINT
- case 0xFF8B: // 8B FF = mov edi, edi
- case 0xEC8B: // 8B EC = mov ebp, esp
- case 0xC033: // 33 C0 = xor eax, eax
- cursor += 2;
- continue;
- case 0x458B: // 8B 45 XX = mov eax, dword ptr [ebp+XXh]
- case 0x5D8B: // 8B 5D XX = mov ebx, dword ptr [ebp+XXh]
- case 0xEC83: // 83 EC XX = sub esp, XX
- case 0x75FF: // FF 75 XX = push dword ptr [ebp+XXh]
- cursor += 3;
- continue;
- case 0xC1F7: // F7 C1 XX YY ZZ WW = test ecx, WWZZYYXX
- case 0x25FF: // FF 25 XX YY ZZ WW = jmp dword ptr ds:[WWZZYYXX]
- cursor += 6;
- continue;
- case 0x3D83: // 83 3D XX YY ZZ WW TT = cmp TT, WWZZYYXX
- cursor += 7;
- continue;
+struct TrampolineMemoryRegion {
+ uptr content;
+ uptr allocated_size;
+ uptr max_size;
+};
+
+static const uptr kTrampolineScanLimitRange = 1 << 30; // 1 gig
+static const int kMaxTrampolineRegion = 1024;
+static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];
+
+static void *AllocateTrampolineRegion(uptr image_address, size_t granularity) {
+#if SANITIZER_WINDOWS64
+ uptr address = image_address;
+ uptr scanned = 0;
+ while (scanned < kTrampolineScanLimitRange) {
+ MEMORY_BASIC_INFORMATION info;
+ if (!::VirtualQuery((void*)address, &info, sizeof(info)))
+ return nullptr;
+
+ // Check whether a region can be allocated at |address|.
+ if (info.State == MEM_FREE && info.RegionSize >= granularity) {
+ void *page = ::VirtualAlloc((void*)RoundUpTo(address, granularity),
+ granularity,
+ MEM_RESERVE | MEM_COMMIT,
+ PAGE_EXECUTE_READWRITE);
+ return page;
}
- switch (0x00FFFFFF & *(unsigned int*)(code + cursor)) {
- case 0x24448A: // 8A 44 24 XX = mov eal, dword ptr [esp+XXh]
- case 0x24448B: // 8B 44 24 XX = mov eax, dword ptr [esp+XXh]
- case 0x244C8B: // 8B 4C 24 XX = mov ecx, dword ptr [esp+XXh]
- case 0x24548B: // 8B 54 24 XX = mov edx, dword ptr [esp+XXh]
- case 0x24748B: // 8B 74 24 XX = mov esi, dword ptr [esp+XXh]
- case 0x247C8B: // 8B 7C 24 XX = mov edi, dword ptr [esp+XXh]
- cursor += 4;
- continue;
+
+ // Move to the next region.
+ address = (uptr)info.BaseAddress + info.RegionSize;
+ scanned += info.RegionSize;
+ }
+ return nullptr;
+#else
+ return ::VirtualAlloc(nullptr,
+ granularity,
+ MEM_RESERVE | MEM_COMMIT,
+ PAGE_EXECUTE_READWRITE);
+#endif
+}
+
+// Used by unittests to release mapped memory space.
+void TestOnlyReleaseTrampolineRegions() {
+ for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) {
+ TrampolineMemoryRegion *current = &TrampolineRegions[bucket];
+ if (current->content == 0)
+ return;
+ ::VirtualFree((void*)current->content, 0, MEM_RELEASE);
+ current->content = 0;
+ }
+}
+
+static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
+ // Find a region within 2G with enough space to allocate |size| bytes.
+ TrampolineMemoryRegion *region = nullptr;
+ for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) {
+ TrampolineMemoryRegion* current = &TrampolineRegions[bucket];
+ if (current->content == 0) {
+ // No valid region found, allocate a new region.
+ size_t bucket_size = GetMmapGranularity();
+ void *content = AllocateTrampolineRegion(image_address, bucket_size);
+ if (content == nullptr)
+ return 0U;
+
+ current->content = (uptr)content;
+ current->allocated_size = 0;
+ current->max_size = bucket_size;
+ region = current;
+ break;
+ } else if (current->max_size - current->allocated_size > size) {
+#if SANITIZER_WINDOWS64
+ // In 64-bits, the memory space must be allocated within 2G boundary.
+ uptr next_address = current->content + current->allocated_size;
+ if (next_address < image_address ||
+ next_address - image_address >= 0x7FFF0000)
+ continue;
+#endif
+ // The space can be allocated in the current region.
+ region = current;
+ break;
}
+ }
- // Unknown instruction!
- // FIXME: Unknown instruction failures might happen when we add a new
- // interceptor or a new compiler version. In either case, they should result
- // in visible and readable error messages. However, merely calling abort()
- // leads to an infinite recursion in CheckFailed.
- // Do we have a good way to abort with an error message here?
- __debugbreak();
- return 0;
+ // Failed to find a region.
+ if (region == nullptr)
+ return 0U;
+
+ // Allocate the space in the current region.
+ uptr allocated_space = region->content + region->allocated_size;
+ region->allocated_size += size;
+ WritePadding(allocated_space, size);
+
+ return allocated_space;
+}
+
+// Returns 0 on error.
+static size_t GetInstructionSize(uptr address) {
+ switch (*(u64*)address) {
+ case 0x90909090909006EB: // stub: jmp over 6 x nop.
+ return 8;
}
+ switch (*(u8*)address) {
+ case 0x90: // 90 : nop
+ return 1;
+
+ case 0x50: // push eax / rax
+ case 0x51: // push ecx / rcx
+ case 0x52: // push edx / rdx
+ case 0x53: // push ebx / rbx
+ case 0x54: // push esp / rsp
+ case 0x55: // push ebp / rbp
+ case 0x56: // push esi / rsi
+ case 0x57: // push edi / rdi
+ case 0x5D: // pop ebp / rbp
+ return 1;
+
+ case 0x6A: // 6A XX = push XX
+ return 2;
+
+ case 0xb8: // b8 XX XX XX XX : mov eax, XX XX XX XX
+ case 0xB9: // b9 XX XX XX XX : mov ecx, XX XX XX XX
+ case 0xA1: // A1 XX XX XX XX : mov eax, dword ptr ds:[XXXXXXXX]
+ return 5;
+
+ // Cannot overwrite control-instruction. Return 0 to indicate failure.
+ case 0xE9: // E9 XX XX XX XX : jmp <label>
+ case 0xE8: // E8 XX XX XX XX : call <func>
+ case 0xC3: // C3 : ret
+ case 0xEB: // EB XX : jmp XX (short jump)
+ case 0x70: // 7Y YY : jy XX (short conditional jump)
+ case 0x71:
+ case 0x72:
+ case 0x73:
+ case 0x74:
+ case 0x75:
+ case 0x76:
+ case 0x77:
+ case 0x78:
+ case 0x79:
+ case 0x7A:
+ case 0x7B:
+ case 0x7C:
+ case 0x7D:
+ case 0x7E:
+ case 0x7F:
+ return 0;
+ }
+
+ switch (*(u16*)(address)) {
+ case 0xFF8B: // 8B FF : mov edi, edi
+ case 0xEC8B: // 8B EC : mov ebp, esp
+ case 0xc889: // 89 C8 : mov eax, ecx
+ case 0xC18B: // 8B C1 : mov eax, ecx
+ case 0xC033: // 33 C0 : xor eax, eax
+ case 0xC933: // 33 C9 : xor ecx, ecx
+ case 0xD233: // 33 D2 : xor edx, edx
+ return 2;
+
+ // Cannot overwrite control-instruction. Return 0 to indicate failure.
+ case 0x25FF: // FF 25 XX XX XX XX : jmp [XXXXXXXX]
+ return 0;
+ }
+
+#if SANITIZER_WINDOWS64
+ switch (*(u16*)address) {
+ case 0x5040: // push rax
+ case 0x5140: // push rcx
+ case 0x5240: // push rdx
+ case 0x5340: // push rbx
+ case 0x5440: // push rsp
+ case 0x5540: // push rbp
+ case 0x5640: // push rsi
+ case 0x5740: // push rdi
+ case 0x5441: // push r12
+ case 0x5541: // push r13
+ case 0x5641: // push r14
+ case 0x5741: // push r15
+ case 0x9066: // Two-byte NOP
+ return 2;
+ }
+
+ switch (0x00FFFFFF & *(u32*)address) {
+ case 0xe58948: // 48 8b c4 : mov rbp, rsp
+ case 0xc18b48: // 48 8b c1 : mov rax, rcx
+ case 0xc48b48: // 48 8b c4 : mov rax, rsp
+ case 0xd9f748: // 48 f7 d9 : neg rcx
+ case 0xd12b48: // 48 2b d1 : sub rdx, rcx
+ case 0x07c1f6: // f6 c1 07 : test cl, 0x7
+ case 0xc0854d: // 4d 85 c0 : test r8, r8
+ case 0xc2b60f: // 0f b6 c2 : movzx eax, dl
+ case 0xc03345: // 45 33 c0 : xor r8d, r8d
+ case 0xd98b4c: // 4c 8b d9 : mov r11, rcx
+ case 0xd28b4c: // 4c 8b d2 : mov r10, rdx
+ case 0xd2b60f: // 0f b6 d2 : movzx edx, dl
+ case 0xca2b48: // 48 2b ca : sub rcx, rdx
+ case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax]
+ case 0xc00b4d: // 3d 0b c0 : or r8, r8
+ case 0xd18b48: // 48 8b d1 : mov rdx, rcx
+ case 0xdc8b4c: // 4c 8b dc : mov r11,rsp
+ case 0xd18b4c: // 4c 8b d1 : mov r10, rcx
+ return 3;
+
+ case 0xec8348: // 48 83 ec XX : sub rsp, XX
+ case 0xf88349: // 49 83 f8 XX : cmp r8, XX
+ case 0x588948: // 48 89 58 XX : mov QWORD PTR[rax + XX], rbx
+ return 4;
+
+ case 0x058b48: // 48 8b 05 XX XX XX XX :
+ // mov rax, QWORD PTR [rip + XXXXXXXX]
+ case 0x25ff48: // 48 ff 25 XX XX XX XX :
+ // rex.W jmp QWORD PTR [rip + XXXXXXXX]
+ return 7;
+ }
+
+ switch (*(u32*)(address)) {
+ case 0x24448b48: // 48 8b 44 24 XX : mov rax, QWORD ptr [rsp + XX]
+ case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp
+ case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx
+ case 0x24748948: // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi
+ return 5;
+ }
+
+#else
+
+ switch (*(u16*)address) {
+ case 0x458B: // 8B 45 XX : mov eax, dword ptr [ebp + XX]
+ case 0x5D8B: // 8B 5D XX : mov ebx, dword ptr [ebp + XX]
+ case 0x7D8B: // 8B 7D XX : mov edi, dword ptr [ebp + XX]
+ case 0xEC83: // 83 EC XX : sub esp, XX
+ case 0x75FF: // FF 75 XX : push dword ptr [ebp + XX]
+ return 3;
+ case 0xC1F7: // F7 C1 XX YY ZZ WW : test ecx, WWZZYYXX
+ case 0x25FF: // FF 25 XX YY ZZ WW : jmp dword ptr ds:[WWZZYYXX]
+ return 6;
+ case 0x3D83: // 83 3D XX YY ZZ WW TT : cmp TT, WWZZYYXX
+ return 7;
+ case 0x7D83: // 83 7D XX YY : cmp dword ptr [ebp + XX], YY
+ return 4;
+ }
+
+ switch (0x00FFFFFF & *(u32*)address) {
+ case 0x24448A: // 8A 44 24 XX : mov eal, dword ptr [esp + XX]
+ case 0x24448B: // 8B 44 24 XX : mov eax, dword ptr [esp + XX]
+ case 0x244C8B: // 8B 4C 24 XX : mov ecx, dword ptr [esp + XX]
+ case 0x24548B: // 8B 54 24 XX : mov edx, dword ptr [esp + XX]
+ case 0x24748B: // 8B 74 24 XX : mov esi, dword ptr [esp + XX]
+ case 0x247C8B: // 8B 7C 24 XX : mov edi, dword ptr [esp + XX]
+ return 4;
+ }
+
+ switch (*(u32*)address) {
+ case 0x2444B60F: // 0F B6 44 24 XX : movzx eax, byte ptr [esp + XX]
+ return 5;
+ }
+#endif
+
+ // Unknown instruction!
+ // FIXME: Unknown instruction failures might happen when we add a new
+ // interceptor or a new compiler version. In either case, they should result
+ // in visible and readable error messages. However, merely calling abort()
+ // leads to an infinite recursion in CheckFailed.
+ InterceptionFailed();
+ return 0;
+}
+
+// Returns 0 on error.
+static size_t RoundUpToInstrBoundary(size_t size, uptr address) {
+ size_t cursor = 0;
+ while (cursor < size) {
+ size_t instruction_size = GetInstructionSize(address + cursor);
+ if (!instruction_size)
+ return 0;
+ cursor += instruction_size;
+ }
return cursor;
}
-bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
-#ifdef _WIN64
-#error OverrideFunction is not yet supported on x64
+#if !SANITIZER_WINDOWS64
+bool OverrideFunctionWithDetour(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+ const int kDetourHeaderLen = 5;
+ const u16 kDetourInstruction = 0xFF8B;
+
+ uptr header = (uptr)old_func - kDetourHeaderLen;
+ uptr patch_length = kDetourHeaderLen + kShortJumpInstructionLength;
+
+ // Validate that the function is hookable.
+ if (*(u16*)old_func != kDetourInstruction ||
+ !IsMemoryPadding(header, kDetourHeaderLen))
+ return false;
+
+ // Change memory protection to writable.
+ DWORD protection = 0;
+ if (!ChangeMemoryProtection(header, patch_length, &protection))
+ return false;
+
+ // Write a relative jump to the redirected function.
+ WriteJumpInstruction(header, new_func);
+
+ // Write the short jump to the function prefix.
+ WriteShortJumpInstruction(old_func, header);
+
+ // Restore previous memory protection.
+ if (!RestoreMemoryProtection(header, patch_length, protection))
+ return false;
+
+ if (orig_old_func)
+ *orig_old_func = old_func + kShortJumpInstructionLength;
+
+ return true;
+}
+#endif
+
+bool OverrideFunctionWithRedirectJump(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+ // Check whether the first instruction is a relative jump.
+ if (*(u8*)old_func != 0xE9)
+ return false;
+
+ if (orig_old_func) {
+ uptr relative_offset = *(u32*)(old_func + 1);
+ uptr absolute_target = old_func + relative_offset + kJumpInstructionLength;
+ *orig_old_func = absolute_target;
+ }
+
+#if SANITIZER_WINDOWS64
+ // If needed, get memory space for a trampoline jump.
+ uptr trampoline = AllocateMemoryForTrampoline(old_func, kDirectBranchLength);
+ if (!trampoline)
+ return false;
+ WriteDirectBranch(trampoline, new_func);
#endif
- // Function overriding works basically like this:
- // We write "jmp <new_func>" (5 bytes) at the beginning of the 'old_func'
- // to override it.
- // We might want to be able to execute the original 'old_func' from the
- // wrapper, in this case we need to keep the leading 5+ bytes ('head')
- // of the original code somewhere with a "jmp <old_func+head>".
- // We call these 'head'+5 bytes of instructions a "trampoline".
- char *old_bytes = (char *)old_func;
-
- // We'll need at least 5 bytes for a 'jmp'.
- size_t head = 5;
+
+ // Change memory protection to writable.
+ DWORD protection = 0;
+ if (!ChangeMemoryProtection(old_func, kJumpInstructionLength, &protection))
+ return false;
+
+ // Write a relative jump to the redirected function.
+ WriteJumpInstruction(old_func, FIRST_32_SECOND_64(new_func, trampoline));
+
+ // Restore previous memory protection.
+ if (!RestoreMemoryProtection(old_func, kJumpInstructionLength, protection))
+ return false;
+
+ return true;
+}
+
+bool OverrideFunctionWithHotPatch(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+ const int kHotPatchHeaderLen = kBranchLength;
+
+ uptr header = (uptr)old_func - kHotPatchHeaderLen;
+ uptr patch_length = kHotPatchHeaderLen + kShortJumpInstructionLength;
+
+ // Validate that the function is hot patchable.
+ size_t instruction_size = GetInstructionSize(old_func);
+ if (instruction_size < kShortJumpInstructionLength ||
+ !FunctionHasPadding(old_func, kHotPatchHeaderLen))
+ return false;
+
+ if (orig_old_func) {
+ // Put the needed instructions into the trampoline bytes.
+ uptr trampoline_length = instruction_size + kDirectBranchLength;
+ uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
+ if (!trampoline)
+ return false;
+ CopyInstructions(trampoline, old_func, instruction_size);
+ WriteDirectBranch(trampoline + instruction_size,
+ old_func + instruction_size);
+ *orig_old_func = trampoline;
+ }
+
+ // If needed, get memory space for indirect address.
+ uptr indirect_address = 0;
+#if SANITIZER_WINDOWS64
+ indirect_address = AllocateMemoryForTrampoline(old_func, kAddressLength);
+ if (!indirect_address)
+ return false;
+#endif
+
+ // Change memory protection to writable.
+ DWORD protection = 0;
+ if (!ChangeMemoryProtection(header, patch_length, &protection))
+ return false;
+
+ // Write jumps to the redirected function.
+ WriteBranch(header, indirect_address, new_func);
+ WriteShortJumpInstruction(old_func, header);
+
+ // Restore previous memory protection.
+ if (!RestoreMemoryProtection(header, patch_length, protection))
+ return false;
+
+ return true;
+}
+
+bool OverrideFunctionWithTrampoline(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+
+ size_t instructions_length = kBranchLength;
+ size_t padding_length = 0;
+ uptr indirect_address = 0;
+
if (orig_old_func) {
// Find out the number of bytes of the instructions we need to copy
- // to the trampoline and store it in 'head'.
- head = RoundUpToInstrBoundary(head, old_bytes);
- if (!head)
+ // to the trampoline.
+ instructions_length = RoundUpToInstrBoundary(kBranchLength, old_func);
+ if (!instructions_length)
return false;
// Put the needed instructions into the trampoline bytes.
- char *trampoline = GetMemoryForTrampoline(head + 5);
+ uptr trampoline_length = instructions_length + kDirectBranchLength;
+ uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
if (!trampoline)
return false;
- _memcpy(trampoline, old_bytes, head);
- WriteJumpInstruction(trampoline + head, old_bytes + head);
- *orig_old_func = (uptr)trampoline;
+ CopyInstructions(trampoline, old_func, instructions_length);
+ WriteDirectBranch(trampoline + instructions_length,
+ old_func + instructions_length);
+ *orig_old_func = trampoline;
}
- // Now put the "jmp <new_func>" instruction at the original code location.
- // We should preserve the EXECUTE flag as some of our own code might be
- // located in the same page (sic!). FIXME: might consider putting the
- // __interception code into a separate section or something?
- DWORD old_prot, unused_prot;
- if (!VirtualProtect((void *)old_bytes, head, PAGE_EXECUTE_READWRITE,
- &old_prot))
+#if SANITIZER_WINDOWS64
+ // Check if the targeted address can be encoded in the function padding.
+ // Otherwise, allocate it in the trampoline region.
+ if (IsMemoryPadding(old_func - kAddressLength, kAddressLength)) {
+ indirect_address = old_func - kAddressLength;
+ padding_length = kAddressLength;
+ } else {
+ indirect_address = AllocateMemoryForTrampoline(old_func, kAddressLength);
+ if (!indirect_address)
+ return false;
+ }
+#endif
+
+ // Change memory protection to writable.
+ uptr patch_address = old_func - padding_length;
+ uptr patch_length = instructions_length + padding_length;
+ DWORD protection = 0;
+ if (!ChangeMemoryProtection(patch_address, patch_length, &protection))
return false;
- WriteJumpInstruction(old_bytes, (char *)new_func);
- _memset(old_bytes + 5, 0xCC /* int 3 */, head - 5);
+ // Patch the original function.
+ WriteBranch(old_func, indirect_address, new_func);
- // Restore the original permissions.
- if (!VirtualProtect((void *)old_bytes, head, old_prot, &unused_prot))
- return false; // not clear if this failure bothers us.
+ // Restore previous memory protection.
+ if (!RestoreMemoryProtection(patch_address, patch_length, protection))
+ return false;
return true;
}
+bool OverrideFunction(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+#if !SANITIZER_WINDOWS64
+ if (OverrideFunctionWithDetour(old_func, new_func, orig_old_func))
+ return true;
+#endif
+ if (OverrideFunctionWithRedirectJump(old_func, new_func, orig_old_func))
+ return true;
+ if (OverrideFunctionWithHotPatch(old_func, new_func, orig_old_func))
+ return true;
+ if (OverrideFunctionWithTrampoline(old_func, new_func, orig_old_func))
+ return true;
+ return false;
+}
+
static void **InterestingDLLsAvailable() {
- const char *InterestingDLLs[] = {
- "kernel32.dll",
- "msvcr110.dll", // VS2012
- "msvcr120.dll", // VS2013
- // NTDLL should go last as it exports some functions that we should override
- // in the CRT [presumably only used internally].
- "ntdll.dll", NULL
- };
+ static const char *InterestingDLLs[] = {
+ "kernel32.dll",
+ "msvcr110.dll", // VS2012
+ "msvcr120.dll", // VS2013
+ "vcruntime140.dll", // VS2015
+ "ucrtbase.dll", // Universal CRT
+ // NTDLL should go last as it exports some functions that we should
+ // override in the CRT [presumably only used internally].
+ "ntdll.dll", NULL};
static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };
if (!result[0]) {
for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {
@@ -268,6 +842,71 @@ bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func) {
return OverrideFunction(orig_func, new_func, orig_old_func);
}
+bool OverrideImportedFunction(const char *module_to_patch,
+ const char *imported_module,
+ const char *function_name, uptr new_function,
+ uptr *orig_old_func) {
+ HMODULE module = GetModuleHandleA(module_to_patch);
+ if (!module)
+ return false;
+
+ // Check that the module header is full and present.
+ RVAPtr<IMAGE_DOS_HEADER> dos_stub(module, 0);
+ RVAPtr<IMAGE_NT_HEADERS> headers(module, dos_stub->e_lfanew);
+ if (!module || dos_stub->e_magic != IMAGE_DOS_SIGNATURE || // "MZ"
+ headers->Signature != IMAGE_NT_SIGNATURE || // "PE\0\0"
+ headers->FileHeader.SizeOfOptionalHeader <
+ sizeof(IMAGE_OPTIONAL_HEADER)) {
+ return false;
+ }
+
+ IMAGE_DATA_DIRECTORY *import_directory =
+ &headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT];
+
+ // Iterate the list of imported DLLs. FirstThunk will be null for the last
+ // entry.
+ RVAPtr<IMAGE_IMPORT_DESCRIPTOR> imports(module,
+ import_directory->VirtualAddress);
+ for (; imports->FirstThunk != 0; ++imports) {
+ RVAPtr<const char> modname(module, imports->Name);
+ if (_stricmp(&*modname, imported_module) == 0)
+ break;
+ }
+ if (imports->FirstThunk == 0)
+ return false;
+
+ // We have two parallel arrays: the import address table (IAT) and the table
+ // of names. They start out containing the same data, but the loader rewrites
+ // the IAT to hold imported addresses and leaves the name table in
+ // OriginalFirstThunk alone.
+ RVAPtr<IMAGE_THUNK_DATA> name_table(module, imports->OriginalFirstThunk);
+ RVAPtr<IMAGE_THUNK_DATA> iat(module, imports->FirstThunk);
+ for (; name_table->u1.Ordinal != 0; ++name_table, ++iat) {
+ if (!IMAGE_SNAP_BY_ORDINAL(name_table->u1.Ordinal)) {
+ RVAPtr<IMAGE_IMPORT_BY_NAME> import_by_name(
+ module, name_table->u1.ForwarderString);
+ const char *funcname = &import_by_name->Name[0];
+ if (strcmp(funcname, function_name) == 0)
+ break;
+ }
+ }
+ if (name_table->u1.Ordinal == 0)
+ return false;
+
+ // Now we have the correct IAT entry. Do the swap. We have to make the page
+ // read/write first.
+ if (orig_old_func)
+ *orig_old_func = iat->u1.AddressOfData;
+ DWORD old_prot, unused_prot;
+ if (!VirtualProtect(&iat->u1.AddressOfData, 4, PAGE_EXECUTE_READWRITE,
+ &old_prot))
+ return false;
+ iat->u1.AddressOfData = new_function;
+ if (!VirtualProtect(&iat->u1.AddressOfData, 4, old_prot, &unused_prot))
+ return false; // Not clear if this failure bothers us.
+ return true;
+}
+
} // namespace __interception
#endif // _WIN32
diff --git a/lib/interception/interception_win.h b/lib/interception/interception_win.h
index 96c4a0c0f5a34..9061f9ed4c215 100644
--- a/lib/interception/interception_win.h
+++ b/lib/interception/interception_win.h
@@ -34,6 +34,31 @@ bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0);
// Windows-only replacement for GetProcAddress. Useful for some sanitizers.
uptr InternalGetProcAddress(void *module, const char *func_name);
+// Overrides a function only when it is called from a specific DLL. For example,
+// this is used to override calls to HeapAlloc/HeapFree from ucrtbase without
+// affecting other third party libraries.
+bool OverrideImportedFunction(const char *module_to_patch,
+ const char *imported_module,
+ const char *function_name, uptr new_function,
+ uptr *orig_old_func);
+
+#if !SANITIZER_WINDOWS64
+// Exposed for unittests
+bool OverrideFunctionWithDetour(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+#endif
+
+// Exposed for unittests
+bool OverrideFunctionWithRedirectJump(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+bool OverrideFunctionWithHotPatch(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+bool OverrideFunctionWithTrampoline(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+
+// Exposed for unittests
+void TestOnlyReleaseTrampolineRegions();
+
} // namespace __interception
#if defined(INTERCEPTION_DYNAMIC_CRT)
@@ -50,5 +75,10 @@ uptr InternalGetProcAddress(void *module, const char *func_name);
#define INTERCEPT_FUNCTION_VER_WIN(func, symver) INTERCEPT_FUNCTION_WIN(func)
+#define INTERCEPT_FUNCTION_DLLIMPORT(user_dll, provider_dll, func) \
+ ::__interception::OverrideImportedFunction( \
+ user_dll, provider_dll, #func, (::__interception::uptr)WRAP(func), \
+ (::__interception::uptr *)&REAL(func))
+
#endif // INTERCEPTION_WIN_H
#endif // _WIN32
diff --git a/lib/interception/tests/CMakeLists.txt b/lib/interception/tests/CMakeLists.txt
new file mode 100644
index 0000000000000..bfe41fed2fed5
--- /dev/null
+++ b/lib/interception/tests/CMakeLists.txt
@@ -0,0 +1,142 @@
+include(CompilerRTCompile)
+
+filter_available_targets(INTERCEPTION_UNITTEST_SUPPORTED_ARCH x86_64 i386 mips64 mips64el)
+
+set(INTERCEPTION_UNITTESTS
+ interception_linux_test.cc
+ interception_test_main.cc
+ interception_win_test.cc
+)
+
+set(INTERCEPTION_TEST_HEADERS)
+
+set(INTERCEPTION_TEST_CFLAGS_COMMON
+ ${COMPILER_RT_UNITTEST_CFLAGS}
+ ${COMPILER_RT_GTEST_CFLAGS}
+ -I${COMPILER_RT_SOURCE_DIR}/include
+ -I${COMPILER_RT_SOURCE_DIR}/lib
+ -I${COMPILER_RT_SOURCE_DIR}/lib/interception
+ -fno-rtti
+ -O2
+ -Werror=sign-compare
+ -Wno-non-virtual-dtor)
+
+# -gline-tables-only must be enough for these tests, so use it if possible.
+if(COMPILER_RT_TEST_COMPILER_ID MATCHES "Clang")
+ list(APPEND INTERCEPTION_TEST_CFLAGS_COMMON -gline-tables-only)
+else()
+ list(APPEND INTERCEPTION_TEST_CFLAGS_COMMON -g)
+endif()
+if(MSVC)
+ list(APPEND INTERCEPTION_TEST_CFLAGS_COMMON -gcodeview)
+endif()
+list(APPEND INTERCEPTION_TEST_LINK_FLAGS_COMMON -g)
+
+if(NOT MSVC)
+ list(APPEND INTERCEPTION_TEST_LINK_FLAGS_COMMON --driver-mode=g++)
+endif()
+
+if(ANDROID)
+ list(APPEND INTERCEPTION_TEST_LINK_FLAGS_COMMON -pie)
+endif()
+
+set(INTERCEPTION_TEST_LINK_LIBS)
+append_list_if(COMPILER_RT_HAS_LIBLOG log INTERCEPTION_TEST_LINK_LIBS)
+# NDK r10 requires -latomic almost always.
+append_list_if(ANDROID atomic INTERCEPTION_TEST_LINK_LIBS)
+
+append_list_if(COMPILER_RT_HAS_LIBDL -ldl INTERCEPTION_TEST_LINK_FLAGS_COMMON)
+append_list_if(COMPILER_RT_HAS_LIBRT -lrt INTERCEPTION_TEST_LINK_FLAGS_COMMON)
+append_list_if(COMPILER_RT_HAS_LIBPTHREAD -pthread INTERCEPTION_TEST_LINK_FLAGS_COMMON)
+# x86_64 FreeBSD 9.2 additionally requires libc++ to build the tests. Also,
+# 'libm' shall be specified explicitly to build i386 tests.
+if(CMAKE_SYSTEM MATCHES "FreeBSD-9.2-RELEASE")
+ list(APPEND INTERCEPTION_TEST_LINK_FLAGS_COMMON "-lc++ -lm")
+endif()
+
+include_directories(..)
+include_directories(../..)
+
+# Adds static library which contains interception object file
+# (universal binary on Mac and arch-specific object files on Linux).
+macro(add_interceptor_lib library)
+ add_library(${library} STATIC ${ARGN})
+ set_target_properties(${library} PROPERTIES
+ ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ FOLDER "Compiler-RT Runtime tests")
+endmacro()
+
+function(get_interception_lib_for_arch arch lib lib_name)
+ if(APPLE)
+ set(tgt_name "RTInterception.test.osx")
+ else()
+ set(tgt_name "RTInterception.test.${arch}")
+ endif()
+ set(${lib} "${tgt_name}" PARENT_SCOPE)
+ if(CMAKE_CONFIGURATION_TYPES)
+ set(configuration_path "${CMAKE_CFG_INTDIR}/")
+ else()
+ set(configuration_path "")
+ endif()
+ if(NOT MSVC)
+ set(${lib_name} "${configuration_path}lib${tgt_name}.a" PARENT_SCOPE)
+ else()
+ set(${lib_name} "${configuration_path}${tgt_name}.lib" PARENT_SCOPE)
+ endif()
+endfunction()
+
+# Interception unit tests testsuite.
+add_custom_target(InterceptionUnitTests)
+set_target_properties(InterceptionUnitTests PROPERTIES
+ FOLDER "Compiler-RT Tests")
+
+# Adds interception tests for architecture.
+macro(add_interception_tests_for_arch arch)
+ get_target_flags_for_arch(${arch} TARGET_FLAGS)
+ set(INTERCEPTION_TEST_SOURCES ${INTERCEPTION_UNITTESTS}
+ ${COMPILER_RT_GTEST_SOURCE})
+ set(INTERCEPTION_TEST_COMPILE_DEPS ${INTERCEPTION_TEST_HEADERS})
+ if(NOT COMPILER_RT_STANDALONE_BUILD)
+ list(APPEND INTERCEPTION_TEST_COMPILE_DEPS gtest)
+ endif()
+ set(INTERCEPTION_TEST_OBJECTS)
+ foreach(source ${INTERCEPTION_TEST_SOURCES})
+ get_filename_component(basename ${source} NAME)
+ if(CMAKE_CONFIGURATION_TYPES)
+ set(output_obj "${CMAKE_CFG_INTDIR}/${basename}.${arch}.o")
+ else()
+ set(output_obj "${basename}.${arch}.o")
+ endif()
+ clang_compile(${output_obj} ${source}
+ CFLAGS ${INTERCEPTION_TEST_CFLAGS_COMMON} ${TARGET_FLAGS}
+ DEPS ${INTERCEPTION_TEST_COMPILE_DEPS})
+ list(APPEND INTERCEPTION_TEST_OBJECTS ${output_obj})
+ endforeach()
+ get_interception_lib_for_arch(${arch} INTERCEPTION_COMMON_LIB
+ INTERCEPTION_COMMON_LIB_NAME)
+ # Add unittest target.
+ set(INTERCEPTION_TEST_NAME "Interception-${arch}-Test")
+ add_compiler_rt_test(InterceptionUnitTests ${INTERCEPTION_TEST_NAME}
+ OBJECTS ${INTERCEPTION_TEST_OBJECTS}
+ ${INTERCEPTION_COMMON_LIB_NAME}
+ DEPS ${INTERCEPTION_TEST_OBJECTS} ${INTERCEPTION_COMMON_LIB}
+ LINK_FLAGS ${INTERCEPTION_TEST_LINK_FLAGS_COMMON}
+ ${TARGET_FLAGS})
+endmacro()
+
+if(COMPILER_RT_CAN_EXECUTE_TESTS AND NOT ANDROID AND NOT APPLE)
+ # We use just-built clang to build interception unittests, so we must
+ # be sure that produced binaries would work.
+ if(APPLE)
+ add_interceptor_lib("RTInterception.test.osx"
+ $<TARGET_OBJECTS:RTInterception.osx>)
+ else()
+ foreach(arch ${INTERCEPTION_UNITTEST_SUPPORTED_ARCH})
+ add_interceptor_lib("RTInterception.test.${arch}"
+ $<TARGET_OBJECTS:RTInterception.${arch}>)
+ endforeach()
+ endif()
+ foreach(arch ${INTERCEPTION_UNITTEST_SUPPORTED_ARCH})
+ add_interception_tests_for_arch(${arch})
+ endforeach()
+endif()
diff --git a/lib/interception/tests/interception_linux_test.cc b/lib/interception/tests/interception_linux_test.cc
new file mode 100644
index 0000000000000..4a1ae785d16f4
--- /dev/null
+++ b/lib/interception/tests/interception_linux_test.cc
@@ -0,0 +1,65 @@
+//===-- interception_linux_test.cc ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Tests for interception_linux.h.
+//
+//===----------------------------------------------------------------------===//
+#include "interception/interception.h"
+
+#include "gtest/gtest.h"
+
+// Too slow for debug build
+#if !SANITIZER_DEBUG
+#if SANITIZER_LINUX
+
+static int InterceptorFunctionCalled;
+
+DECLARE_REAL(int, isdigit, int);
+
+INTERCEPTOR(int, isdigit, int d) {
+ ++InterceptorFunctionCalled;
+ return d >= '0' && d <= '9';
+}
+
+namespace __interception {
+
+TEST(Interception, GetRealFunctionAddress) {
+ uptr expected_malloc_address = (uptr)(void*)&malloc;
+ uptr malloc_address = 0;
+ EXPECT_TRUE(GetRealFunctionAddress("malloc", &malloc_address, 0, 0));
+ EXPECT_EQ(expected_malloc_address, malloc_address);
+
+ uptr dummy_address = 0;
+ EXPECT_TRUE(
+ GetRealFunctionAddress("dummy_doesnt_exist__", &dummy_address, 0, 0));
+ EXPECT_EQ(0U, dummy_address);
+}
+
+TEST(Interception, Basic) {
+ ASSERT_TRUE(INTERCEPT_FUNCTION(isdigit));
+
+ // After interception, the counter should be incremented.
+ InterceptorFunctionCalled = 0;
+ EXPECT_NE(0, isdigit('1'));
+ EXPECT_EQ(1, InterceptorFunctionCalled);
+ EXPECT_EQ(0, isdigit('a'));
+ EXPECT_EQ(2, InterceptorFunctionCalled);
+
+ // Calling the REAL function should not affect the counter.
+ InterceptorFunctionCalled = 0;
+ EXPECT_NE(0, REAL(isdigit)('1'));
+ EXPECT_EQ(0, REAL(isdigit)('a'));
+ EXPECT_EQ(0, InterceptorFunctionCalled);
+}
+
+} // namespace __interception
+
+#endif // SANITIZER_LINUX
+#endif // #if !SANITIZER_DEBUG
diff --git a/lib/interception/tests/interception_test_main.cc b/lib/interception/tests/interception_test_main.cc
new file mode 100644
index 0000000000000..311da51ecfcec
--- /dev/null
+++ b/lib/interception/tests/interception_test_main.cc
@@ -0,0 +1,22 @@
+//===-- interception_test_main.cc------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Testing the machinery for providing replacements/wrappers for system
+// functions.
+//===----------------------------------------------------------------------===//
+
+#include "gtest/gtest.h"
+
+int main(int argc, char **argv) {
+ testing::GTEST_FLAG(death_test_style) = "threadsafe";
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/lib/interception/tests/interception_win_test.cc b/lib/interception/tests/interception_win_test.cc
new file mode 100644
index 0000000000000..611354f03d12b
--- /dev/null
+++ b/lib/interception/tests/interception_win_test.cc
@@ -0,0 +1,592 @@
+//===-- interception_win_test.cc ------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Tests for interception_win.h.
+//
+//===----------------------------------------------------------------------===//
+#include "interception/interception.h"
+
+#include "gtest/gtest.h"
+
+// Too slow for debug build
+#if !SANITIZER_DEBUG
+#if SANITIZER_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+namespace __interception {
+namespace {
+
+enum FunctionPrefixKind {
+ FunctionPrefixNone,
+ FunctionPrefixPadding,
+ FunctionPrefixHotPatch,
+ FunctionPrefixDetour,
+};
+
+typedef bool (*TestOverrideFunction)(uptr, uptr, uptr*);
+typedef int (*IdentityFunction)(int);
+
+#if SANITIZER_WINDOWS64
+
+const u8 kIdentityCodeWithPrologue[] = {
+ 0x55, // push rbp
+ 0x48, 0x89, 0xE5, // mov rbp,rsp
+ 0x8B, 0xC1, // mov eax,ecx
+ 0x5D, // pop rbp
+ 0xC3, // ret
+};
+
+const u8 kIdentityCodeWithPushPop[] = {
+ 0x55, // push rbp
+ 0x48, 0x89, 0xE5, // mov rbp,rsp
+ 0x53, // push rbx
+ 0x50, // push rax
+ 0x58, // pop rax
+ 0x8B, 0xC1, // mov rax,rcx
+ 0x5B, // pop rbx
+ 0x5D, // pop rbp
+ 0xC3, // ret
+};
+
+const u8 kIdentityTwiceOffset = 16;
+const u8 kIdentityTwice[] = {
+ 0x55, // push rbp
+ 0x48, 0x89, 0xE5, // mov rbp,rsp
+ 0x8B, 0xC1, // mov eax,ecx
+ 0x5D, // pop rbp
+ 0xC3, // ret
+ 0x90, 0x90, 0x90, 0x90,
+ 0x90, 0x90, 0x90, 0x90,
+ 0x55, // push rbp
+ 0x48, 0x89, 0xE5, // mov rbp,rsp
+ 0x8B, 0xC1, // mov eax,ecx
+ 0x5D, // pop rbp
+ 0xC3, // ret
+};
+
+const u8 kIdentityCodeWithMov[] = {
+ 0x89, 0xC8, // mov eax, ecx
+ 0xC3, // ret
+};
+
+const u8 kIdentityCodeWithJump[] = {
+ 0xE9, 0x04, 0x00, 0x00,
+ 0x00, // jmp + 4
+ 0xCC, 0xCC, 0xCC, 0xCC,
+ 0x89, 0xC8, // mov eax, ecx
+ 0xC3, // ret
+};
+
+#else
+
+const u8 kIdentityCodeWithPrologue[] = {
+ 0x55, // push ebp
+ 0x8B, 0xEC, // mov ebp,esp
+ 0x8B, 0x45, 0x08, // mov eax,dword ptr [ebp + 8]
+ 0x5D, // pop ebp
+ 0xC3, // ret
+};
+
+const u8 kIdentityCodeWithPushPop[] = {
+ 0x55, // push ebp
+ 0x8B, 0xEC, // mov ebp,esp
+ 0x53, // push ebx
+ 0x50, // push eax
+ 0x58, // pop eax
+ 0x8B, 0x45, 0x08, // mov eax,dword ptr [ebp + 8]
+ 0x5B, // pop ebx
+ 0x5D, // pop ebp
+ 0xC3, // ret
+};
+
+const u8 kIdentityTwiceOffset = 8;
+const u8 kIdentityTwice[] = {
+ 0x55, // push ebp
+ 0x8B, 0xEC, // mov ebp,esp
+ 0x8B, 0x45, 0x08, // mov eax,dword ptr [ebp + 8]
+ 0x5D, // pop ebp
+ 0xC3, // ret
+ 0x55, // push ebp
+ 0x8B, 0xEC, // mov ebp,esp
+ 0x8B, 0x45, 0x08, // mov eax,dword ptr [ebp + 8]
+ 0x5D, // pop ebp
+ 0xC3, // ret
+};
+
+const u8 kIdentityCodeWithMov[] = {
+ 0x8B, 0x44, 0x24, 0x04, // mov eax,dword ptr [esp + 4]
+ 0xC3, // ret
+};
+
+const u8 kIdentityCodeWithJump[] = {
+ 0xE9, 0x04, 0x00, 0x00,
+ 0x00, // jmp + 4
+ 0xCC, 0xCC, 0xCC, 0xCC,
+ 0x8B, 0x44, 0x24, 0x04, // mov eax,dword ptr [esp + 4]
+ 0xC3, // ret
+};
+
+#endif
+
+const u8 kPatchableCode1[] = {
+ 0xB8, 0x4B, 0x00, 0x00, 0x00, // mov eax,4B
+ 0x33, 0xC9, // xor ecx,ecx
+ 0xC3, // ret
+};
+
+const u8 kPatchableCode2[] = {
+ 0x55, // push ebp
+ 0x8B, 0xEC, // mov ebp,esp
+ 0x33, 0xC0, // xor eax,eax
+ 0x5D, // pop ebp
+ 0xC3, // ret
+};
+
+const u8 kPatchableCode3[] = {
+ 0x55, // push ebp
+ 0x8B, 0xEC, // mov ebp,esp
+ 0x6A, 0x00, // push 0
+ 0xE8, 0x3D, 0xFF, 0xFF, 0xFF, // call <func>
+};
+
+const u8 kPatchableCode4[] = {
+ 0xE9, 0xCC, 0xCC, 0xCC, 0xCC, // jmp <label>
+ 0x90, 0x90, 0x90, 0x90,
+};
+
+const u8 kUnpatchableCode1[] = {
+ 0xC3, // ret
+};
+
+const u8 kUnpatchableCode2[] = {
+ 0x33, 0xC9, // xor ecx,ecx
+ 0xC3, // ret
+};
+
+const u8 kUnpatchableCode3[] = {
+ 0x75, 0xCC, // jne <label>
+ 0x33, 0xC9, // xor ecx,ecx
+ 0xC3, // ret
+};
+
+const u8 kUnpatchableCode4[] = {
+ 0x74, 0xCC, // jne <label>
+ 0x33, 0xC9, // xor ecx,ecx
+ 0xC3, // ret
+};
+
+const u8 kUnpatchableCode5[] = {
+ 0xEB, 0x02, // jmp <label>
+ 0x33, 0xC9, // xor ecx,ecx
+ 0xC3, // ret
+};
+
+const u8 kUnpatchableCode6[] = {
+ 0xE8, 0xCC, 0xCC, 0xCC, 0xCC, // call <func>
+ 0x90, 0x90, 0x90, 0x90,
+};
+
+// A buffer holding the dynamically generated code under test.
+u8* ActiveCode;
+size_t ActiveCodeLength = 4096;
+
+template<class T>
+static void LoadActiveCode(
+ const T &code,
+ uptr *entry_point,
+ FunctionPrefixKind prefix_kind = FunctionPrefixNone) {
+ if (ActiveCode == nullptr) {
+ ActiveCode =
+ (u8*)::VirtualAlloc(nullptr, ActiveCodeLength,
+ MEM_COMMIT | MEM_RESERVE,
+ PAGE_EXECUTE_READWRITE);
+ ASSERT_NE(ActiveCode, nullptr);
+ }
+
+ size_t position = 0;
+
+ // Add padding to avoid memory violation when scanning the prefix.
+ for (int i = 0; i < 16; ++i)
+ ActiveCode[position++] = 0xC3; // Instruction 'ret'.
+
+ // Add function padding.
+ size_t padding = 0;
+ if (prefix_kind == FunctionPrefixPadding)
+ padding = 16;
+ else if (prefix_kind == FunctionPrefixDetour ||
+ prefix_kind == FunctionPrefixHotPatch)
+ padding = FIRST_32_SECOND_64(5, 6);
+ // Insert |padding| instructions 'nop'.
+ for (size_t i = 0; i < padding; ++i)
+ ActiveCode[position++] = 0x90;
+
+ // Keep track of the entry point.
+ *entry_point = (uptr)&ActiveCode[position];
+
+ // Add the detour instruction (i.e. mov edi, edi)
+ if (prefix_kind == FunctionPrefixDetour) {
+#if SANITIZER_WINDOWS64
+ // Note that "mov edi,edi" is NOP in 32-bit only, in 64-bit it clears
+ // higher bits of RDI.
+ // Use 66,90H as NOP for Windows64.
+ ActiveCode[position++] = 0x66;
+ ActiveCode[position++] = 0x90;
+#else
+ // mov edi,edi.
+ ActiveCode[position++] = 0x8B;
+ ActiveCode[position++] = 0xFF;
+#endif
+
+ }
+
+ // Copy the function body.
+ for (size_t i = 0; i < sizeof(T); ++i)
+ ActiveCode[position++] = code[i];
+}
+
+int InterceptorFunctionCalled;
+IdentityFunction InterceptedRealFunction;
+
+int InterceptorFunction(int x) {
+ ++InterceptorFunctionCalled;
+ return InterceptedRealFunction(x);
+}
+
+} // namespace
+
+// Tests for interception_win.h
+TEST(Interception, InternalGetProcAddress) {
+ HMODULE ntdll_handle = ::GetModuleHandle("ntdll");
+ ASSERT_NE(nullptr, ntdll_handle);
+ uptr DbgPrint_expected = (uptr)::GetProcAddress(ntdll_handle, "DbgPrint");
+ uptr isdigit_expected = (uptr)::GetProcAddress(ntdll_handle, "isdigit");
+ uptr DbgPrint_adddress = InternalGetProcAddress(ntdll_handle, "DbgPrint");
+ uptr isdigit_address = InternalGetProcAddress(ntdll_handle, "isdigit");
+
+ EXPECT_EQ(DbgPrint_expected, DbgPrint_adddress);
+ EXPECT_EQ(isdigit_expected, isdigit_address);
+ EXPECT_NE(DbgPrint_adddress, isdigit_address);
+}
+
+template<class T>
+static void TestIdentityFunctionPatching(
+ const T &code,
+ TestOverrideFunction override,
+ FunctionPrefixKind prefix_kind = FunctionPrefixNone) {
+ uptr identity_address;
+ LoadActiveCode(code, &identity_address, prefix_kind);
+ IdentityFunction identity = (IdentityFunction)identity_address;
+
+ // Validate behavior before dynamic patching.
+ InterceptorFunctionCalled = 0;
+ EXPECT_EQ(0, identity(0));
+ EXPECT_EQ(42, identity(42));
+ EXPECT_EQ(0, InterceptorFunctionCalled);
+
+ // Patch the function.
+ uptr real_identity_address = 0;
+ bool success = override(identity_address,
+ (uptr)&InterceptorFunction,
+ &real_identity_address);
+ EXPECT_TRUE(success);
+ EXPECT_NE(0U, real_identity_address);
+ IdentityFunction real_identity = (IdentityFunction)real_identity_address;
+ InterceptedRealFunction = real_identity;
+
+ // Don't run tests if hooking failed or the real function is not valid.
+ if (!success || !real_identity_address)
+ return;
+
+ // Calling the redirected function.
+ InterceptorFunctionCalled = 0;
+ EXPECT_EQ(0, identity(0));
+ EXPECT_EQ(42, identity(42));
+ EXPECT_EQ(2, InterceptorFunctionCalled);
+
+ // Calling the real function.
+ InterceptorFunctionCalled = 0;
+ EXPECT_EQ(0, real_identity(0));
+ EXPECT_EQ(42, real_identity(42));
+ EXPECT_EQ(0, InterceptorFunctionCalled);
+
+ TestOnlyReleaseTrampolineRegions();
+}
+
+#if !SANITIZER_WINDOWS64
+TEST(Interception, OverrideFunctionWithDetour) {
+ TestOverrideFunction override = OverrideFunctionWithDetour;
+ FunctionPrefixKind prefix = FunctionPrefixDetour;
+ TestIdentityFunctionPatching(kIdentityCodeWithPrologue, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithPushPop, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithMov, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithJump, override, prefix);
+}
+#endif // !SANITIZER_WINDOWS64
+
+TEST(Interception, OverrideFunctionWithRedirectJump) {
+ TestOverrideFunction override = OverrideFunctionWithRedirectJump;
+ TestIdentityFunctionPatching(kIdentityCodeWithJump, override);
+}
+
+TEST(Interception, OverrideFunctionWithHotPatch) {
+ TestOverrideFunction override = OverrideFunctionWithHotPatch;
+ FunctionPrefixKind prefix = FunctionPrefixHotPatch;
+ TestIdentityFunctionPatching(kIdentityCodeWithMov, override, prefix);
+}
+
+TEST(Interception, OverrideFunctionWithTrampoline) {
+ TestOverrideFunction override = OverrideFunctionWithTrampoline;
+ FunctionPrefixKind prefix = FunctionPrefixNone;
+ TestIdentityFunctionPatching(kIdentityCodeWithPrologue, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithPushPop, override, prefix);
+
+ prefix = FunctionPrefixPadding;
+ TestIdentityFunctionPatching(kIdentityCodeWithPrologue, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithPushPop, override, prefix);
+}
+
+TEST(Interception, OverrideFunction) {
+ TestOverrideFunction override = OverrideFunction;
+ FunctionPrefixKind prefix = FunctionPrefixNone;
+ TestIdentityFunctionPatching(kIdentityCodeWithPrologue, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithPushPop, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithJump, override, prefix);
+
+ prefix = FunctionPrefixPadding;
+ TestIdentityFunctionPatching(kIdentityCodeWithPrologue, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithPushPop, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithMov, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithJump, override, prefix);
+
+ prefix = FunctionPrefixHotPatch;
+ TestIdentityFunctionPatching(kIdentityCodeWithPrologue, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithPushPop, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithMov, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithJump, override, prefix);
+
+ prefix = FunctionPrefixDetour;
+ TestIdentityFunctionPatching(kIdentityCodeWithPrologue, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithPushPop, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithMov, override, prefix);
+ TestIdentityFunctionPatching(kIdentityCodeWithJump, override, prefix);
+}
+
+template<class T>
+static void TestIdentityFunctionMultiplePatching(
+ const T &code,
+ TestOverrideFunction override,
+ FunctionPrefixKind prefix_kind = FunctionPrefixNone) {
+ uptr identity_address;
+ LoadActiveCode(code, &identity_address, prefix_kind);
+
+ // Patch the function.
+ uptr real_identity_address = 0;
+ bool success = override(identity_address,
+ (uptr)&InterceptorFunction,
+ &real_identity_address);
+ EXPECT_TRUE(success);
+ EXPECT_NE(0U, real_identity_address);
+
+ // Re-patching the function should not work.
+ success = override(identity_address,
+ (uptr)&InterceptorFunction,
+ &real_identity_address);
+ EXPECT_FALSE(success);
+
+ TestOnlyReleaseTrampolineRegions();
+}
+
+TEST(Interception, OverrideFunctionMultiplePatchingIsFailing) {
+#if !SANITIZER_WINDOWS64
+ TestIdentityFunctionMultiplePatching(kIdentityCodeWithPrologue,
+ OverrideFunctionWithDetour,
+ FunctionPrefixDetour);
+#endif
+
+ TestIdentityFunctionMultiplePatching(kIdentityCodeWithMov,
+ OverrideFunctionWithHotPatch,
+ FunctionPrefixHotPatch);
+
+ TestIdentityFunctionMultiplePatching(kIdentityCodeWithPushPop,
+ OverrideFunctionWithTrampoline,
+ FunctionPrefixPadding);
+}
+
+TEST(Interception, OverrideFunctionTwice) {
+ uptr identity_address1;
+ LoadActiveCode(kIdentityTwice, &identity_address1);
+ uptr identity_address2 = identity_address1 + kIdentityTwiceOffset;
+ IdentityFunction identity1 = (IdentityFunction)identity_address1;
+ IdentityFunction identity2 = (IdentityFunction)identity_address2;
+
+ // Patch the two functions.
+ uptr real_identity_address = 0;
+ EXPECT_TRUE(OverrideFunction(identity_address1,
+ (uptr)&InterceptorFunction,
+ &real_identity_address));
+ EXPECT_TRUE(OverrideFunction(identity_address2,
+ (uptr)&InterceptorFunction,
+ &real_identity_address));
+ IdentityFunction real_identity = (IdentityFunction)real_identity_address;
+ InterceptedRealFunction = real_identity;
+
+ // Calling the redirected function.
+ InterceptorFunctionCalled = 0;
+ EXPECT_EQ(42, identity1(42));
+ EXPECT_EQ(42, identity2(42));
+ EXPECT_EQ(2, InterceptorFunctionCalled);
+
+ TestOnlyReleaseTrampolineRegions();
+}
+
+template<class T>
+static bool TestFunctionPatching(
+ const T &code,
+ TestOverrideFunction override,
+ FunctionPrefixKind prefix_kind = FunctionPrefixNone) {
+ uptr address;
+ LoadActiveCode(code, &address, prefix_kind);
+ uptr unused_real_address = 0;
+ bool result = override(
+ address, (uptr)&InterceptorFunction, &unused_real_address);
+
+ TestOnlyReleaseTrampolineRegions();
+ return result;
+}
+
+TEST(Interception, PatchableFunction) {
+ TestOverrideFunction override = OverrideFunction;
+ // Test without function padding.
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode1, override));
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode2, override));
+#if SANITIZER_WINDOWS64
+ EXPECT_FALSE(TestFunctionPatching(kPatchableCode3, override));
+#else
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode3, override));
+#endif
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode4, override));
+
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode2, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode3, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode4, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode5, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode6, override));
+}
+
+#if !SANITIZER_WINDOWS64
+TEST(Interception, PatchableFunctionWithDetour) {
+ TestOverrideFunction override = OverrideFunctionWithDetour;
+ // Without the prefix, no function can be detoured.
+ EXPECT_FALSE(TestFunctionPatching(kPatchableCode1, override));
+ EXPECT_FALSE(TestFunctionPatching(kPatchableCode2, override));
+ EXPECT_FALSE(TestFunctionPatching(kPatchableCode3, override));
+ EXPECT_FALSE(TestFunctionPatching(kPatchableCode4, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode2, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode3, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode4, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode5, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode6, override));
+
+ // With the prefix, all functions can be detoured.
+ FunctionPrefixKind prefix = FunctionPrefixDetour;
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode1, override, prefix));
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode2, override, prefix));
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode3, override, prefix));
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode4, override, prefix));
+ EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode1, override, prefix));
+ EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode2, override, prefix));
+ EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode3, override, prefix));
+ EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode4, override, prefix));
+ EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode5, override, prefix));
+ EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode6, override, prefix));
+}
+#endif // !SANITIZER_WINDOWS64
+
+TEST(Interception, PatchableFunctionWithRedirectJump) {
+ TestOverrideFunction override = OverrideFunctionWithRedirectJump;
+ EXPECT_FALSE(TestFunctionPatching(kPatchableCode1, override));
+ EXPECT_FALSE(TestFunctionPatching(kPatchableCode2, override));
+ EXPECT_FALSE(TestFunctionPatching(kPatchableCode3, override));
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode4, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode2, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode3, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode4, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode5, override));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode6, override));
+}
+
+TEST(Interception, PatchableFunctionWithHotPatch) {
+ TestOverrideFunction override = OverrideFunctionWithHotPatch;
+ FunctionPrefixKind prefix = FunctionPrefixHotPatch;
+
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode1, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kPatchableCode2, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kPatchableCode3, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kPatchableCode4, override, prefix));
+
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override, prefix));
+ EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode2, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode3, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode4, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode5, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode6, override, prefix));
+}
+
+TEST(Interception, PatchableFunctionWithTrampoline) {
+ TestOverrideFunction override = OverrideFunctionWithTrampoline;
+ FunctionPrefixKind prefix = FunctionPrefixPadding;
+
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode1, override, prefix));
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode2, override, prefix));
+#if SANITIZER_WINDOWS64
+ EXPECT_FALSE(TestFunctionPatching(kPatchableCode3, override, prefix));
+#else
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode3, override, prefix));
+#endif
+ EXPECT_FALSE(TestFunctionPatching(kPatchableCode4, override, prefix));
+
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode2, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode3, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode4, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode5, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode6, override, prefix));
+}
+
+TEST(Interception, PatchableFunctionPadding) {
+ TestOverrideFunction override = OverrideFunction;
+ FunctionPrefixKind prefix = FunctionPrefixPadding;
+
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode1, override, prefix));
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode2, override, prefix));
+#if SANITIZER_WINDOWS64
+ EXPECT_FALSE(TestFunctionPatching(kPatchableCode3, override, prefix));
+#else
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode3, override, prefix));
+#endif
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode4, override, prefix));
+
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override, prefix));
+ EXPECT_TRUE(TestFunctionPatching(kUnpatchableCode2, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode3, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode4, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode5, override, prefix));
+ EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode6, override, prefix));
+}
+
+} // namespace __interception
+
+#endif // SANITIZER_WINDOWS
+#endif // #if !SANITIZER_DEBUG