diff options
Diffstat (limited to 'lib/xray/xray_trampoline_x86_64.S')
-rw-r--r-- | lib/xray/xray_trampoline_x86_64.S | 104 |
1 files changed, 51 insertions, 53 deletions
diff --git a/lib/xray/xray_trampoline_x86_64.S b/lib/xray/xray_trampoline_x86_64.S index da0aae326bdc9..847ecef8d4250 100644 --- a/lib/xray/xray_trampoline_x86_64.S +++ b/lib/xray/xray_trampoline_x86_64.S @@ -16,41 +16,48 @@ #include "../builtins/assembly.h" .macro SAVE_REGISTERS - subq $200, %rsp - movupd %xmm0, 184(%rsp) - movupd %xmm1, 168(%rsp) - movupd %xmm2, 152(%rsp) - movupd %xmm3, 136(%rsp) - movupd %xmm4, 120(%rsp) - movupd %xmm5, 104(%rsp) - movupd %xmm6, 88(%rsp) - movupd %xmm7, 72(%rsp) - movq %rdi, 64(%rsp) - movq %rax, 56(%rsp) - movq %rdx, 48(%rsp) - movq %rsi, 40(%rsp) - movq %rcx, 32(%rsp) - movq %r8, 24(%rsp) - movq %r9, 16(%rsp) + subq $192, %rsp + .cfi_def_cfa_offset 200 + // At this point, the stack pointer should be aligned to an 8-byte boundary, + // because any call instructions that come after this will add another 8 + // bytes and therefore align it to 16-bytes. + movq %rbp, 184(%rsp) + movupd %xmm0, 168(%rsp) + movupd %xmm1, 152(%rsp) + movupd %xmm2, 136(%rsp) + movupd %xmm3, 120(%rsp) + movupd %xmm4, 104(%rsp) + movupd %xmm5, 88(%rsp) + movupd %xmm6, 72(%rsp) + movupd %xmm7, 56(%rsp) + movq %rdi, 48(%rsp) + movq %rax, 40(%rsp) + movq %rdx, 32(%rsp) + movq %rsi, 24(%rsp) + movq %rcx, 16(%rsp) + movq %r8, 8(%rsp) + movq %r9, 0(%rsp) .endm .macro RESTORE_REGISTERS - movupd 184(%rsp), %xmm0 - movupd 168(%rsp), %xmm1 - movupd 152(%rsp), %xmm2 - movupd 136(%rsp), %xmm3 - movupd 120(%rsp), %xmm4 - movupd 104(%rsp), %xmm5 - movupd 88(%rsp) , %xmm6 - movupd 72(%rsp) , %xmm7 - movq 64(%rsp), %rdi - movq 56(%rsp), %rax - movq 48(%rsp), %rdx - movq 40(%rsp), %rsi - movq 32(%rsp), %rcx - movq 24(%rsp), %r8 - movq 16(%rsp), %r9 - addq $200, %rsp + movq 184(%rsp), %rbp + movupd 168(%rsp), %xmm0 + movupd 152(%rsp), %xmm1 + movupd 136(%rsp), %xmm2 + movupd 120(%rsp), %xmm3 + movupd 104(%rsp), %xmm4 + movupd 88(%rsp), %xmm5 + movupd 72(%rsp) , %xmm6 + movupd 56(%rsp) , %xmm7 + movq 48(%rsp), %rdi + movq 40(%rsp), %rax + movq 32(%rsp), %rdx + movq 24(%rsp), %rsi + movq 16(%rsp), %rcx + movq 8(%rsp), %r8 + movq 0(%rsp), %r9 + addq $192, %rsp + .cfi_def_cfa_offset 8 .endm .text @@ -64,8 +71,6 @@ __xray_FunctionEntry: .cfi_startproc - pushq %rbp - .cfi_def_cfa_offset 16 SAVE_REGISTERS // This load has to be atomic, it's concurrent with __xray_patch(). @@ -80,7 +85,6 @@ __xray_FunctionEntry: callq *%rax .Ltmp0: RESTORE_REGISTERS - popq %rbp retq .Ltmp1: .size __xray_FunctionEntry, .Ltmp1-__xray_FunctionEntry @@ -96,14 +100,13 @@ __xray_FunctionExit: // Save the important registers first. Since we're assuming that this // function is only jumped into, we only preserve the registers for // returning. - pushq %rbp - .cfi_def_cfa_offset 16 subq $56, %rsp - .cfi_def_cfa_offset 32 - movupd %xmm0, 40(%rsp) - movupd %xmm1, 24(%rsp) - movq %rax, 16(%rsp) - movq %rdx, 8(%rsp) + .cfi_def_cfa_offset 64 + movq %rbp, 48(%rsp) + movupd %xmm0, 32(%rsp) + movupd %xmm1, 16(%rsp) + movq %rax, 8(%rsp) + movq %rdx, 0(%rsp) movq _ZN6__xray19XRayPatchedFunctionE(%rip), %rax testq %rax,%rax je .Ltmp2 @@ -113,12 +116,13 @@ __xray_FunctionExit: callq *%rax .Ltmp2: // Restore the important registers. - movupd 40(%rsp), %xmm0 - movupd 24(%rsp), %xmm1 - movq 16(%rsp), %rax - movq 8(%rsp), %rdx + movq 48(%rsp), %rbp + movupd 32(%rsp), %xmm0 + movupd 16(%rsp), %xmm1 + movq 8(%rsp), %rax + movq 0(%rsp), %rdx addq $56, %rsp - popq %rbp + .cfi_def_cfa_offset 8 retq .Ltmp3: .size __xray_FunctionExit, .Ltmp3-__xray_FunctionExit @@ -135,8 +139,6 @@ __xray_FunctionTailExit: // this is an exit. In the future, we will introduce a new entry type that // differentiates between a normal exit and a tail exit, but we'd have to do // this and increment the version number for the header. - pushq %rbp - .cfi_def_cfa_offset 16 SAVE_REGISTERS movq _ZN6__xray19XRayPatchedFunctionE(%rip), %rax @@ -149,7 +151,6 @@ __xray_FunctionTailExit: .Ltmp4: RESTORE_REGISTERS - popq %rbp retq .Ltmp5: .size __xray_FunctionTailExit, .Ltmp5-__xray_FunctionTailExit @@ -162,8 +163,6 @@ __xray_FunctionTailExit: .type __xray_ArgLoggerEntry,@function __xray_ArgLoggerEntry: .cfi_startproc - pushq %rbp - .cfi_def_cfa_offset 16 SAVE_REGISTERS // Again, these function pointer loads must be atomic; MOV is fine. @@ -184,7 +183,6 @@ __xray_ArgLoggerEntry: .Larg1entryFail: RESTORE_REGISTERS - popq %rbp retq .Larg1entryEnd: |