diff options
Diffstat (limited to 'test/CodeGen/X86/movtopush.ll')
-rw-r--r-- | test/CodeGen/X86/movtopush.ll | 93 |
1 files changed, 53 insertions, 40 deletions
diff --git a/test/CodeGen/X86/movtopush.ll b/test/CodeGen/X86/movtopush.ll index f89e52457f35..b02f9ec45e7f 100644 --- a/test/CodeGen/X86/movtopush.ll +++ b/test/CodeGen/X86/movtopush.ll @@ -2,11 +2,15 @@ ; RUN: llc < %s -mtriple=x86_64-windows | FileCheck %s -check-prefix=X64 ; RUN: llc < %s -mtriple=i686-windows -force-align-stack -stack-alignment=32 | FileCheck %s -check-prefix=ALIGNED +%class.Class = type { i32 } +%struct.s = type { i64 } + declare void @good(i32 %a, i32 %b, i32 %c, i32 %d) declare void @inreg(i32 %a, i32 inreg %b, i32 %c, i32 %d) +declare x86_thiscallcc void @thiscall(%class.Class* %class, i32 %a, i32 %b, i32 %c, i32 %d) declare void @oneparam(i32 %a) declare void @eightparams(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) - +declare void @struct(%struct.s* byval %a, i32 %b, i32 %c, i32 %d) ; Here, we should have a reserved frame, so we don't expect pushes ; NORMAL-LABEL: test1: @@ -108,13 +112,12 @@ entry: ret void } -; We don't support weird calling conventions +; We support weird calling conventions ; NORMAL-LABEL: test4: -; NORMAL: subl $12, %esp -; NORMAL-NEXT: movl $4, 8(%esp) -; NORMAL-NEXT: movl $3, 4(%esp) -; NORMAL-NEXT: movl $1, (%esp) -; NORMAL-NEXT: movl $2, %eax +; NORMAL: movl $2, %eax +; NORMAL-NEXT: pushl $4 +; NORMAL-NEXT: pushl $3 +; NORMAL-NEXT: pushl $1 ; NORMAL-NEXT: call ; NORMAL-NEXT: addl $12, %esp define void @test4() optsize { @@ -123,6 +126,20 @@ entry: ret void } +; NORMAL-LABEL: test4b: +; NORMAL: movl 4(%esp), %ecx +; NORMAL-NEXT: pushl $4 +; NORMAL-NEXT: pushl $3 +; NORMAL-NEXT: pushl $2 +; NORMAL-NEXT: pushl $1 +; NORMAL-NEXT: call +; NORMAL-NEXT: ret +define void @test4b(%class.Class* %f) optsize { +entry: + call x86_thiscallcc void @thiscall(%class.Class* %f, i32 1, i32 2, i32 3, i32 4) + ret void +} + ; When there is no reserved call frame, check that additional alignment ; is added when the pushes don't add up to the required alignment. ; ALIGNED-LABEL: test5: @@ -229,20 +246,27 @@ entry: ; NORMAL-NEXT: pushl $1 ; NORMAL-NEXT: call ; NORMAL-NEXT: addl $16, %esp -; NORMAL-NEXT: subl $16, %esp -; NORMAL-NEXT: leal 16(%esp), [[EAX:%e..]] -; NORMAL-NEXT: movl [[EAX]], 12(%esp) -; NORMAL-NEXT: movl $7, 8(%esp) -; NORMAL-NEXT: movl $6, 4(%esp) -; NORMAL-NEXT: movl $5, (%esp) +; NORMAL-NEXT: subl $20, %esp +; NORMAL-NEXT: movl 20(%esp), [[E1:%e..]] +; NORMAL-NEXT: movl 24(%esp), [[E2:%e..]] +; NORMAL-NEXT: movl [[E2]], 4(%esp) +; NORMAL-NEXT: movl [[E1]], (%esp) +; NORMAL-NEXT: leal 32(%esp), [[E3:%e..]] +; NORMAL-NEXT: movl [[E3]], 16(%esp) +; NORMAL-NEXT: leal 28(%esp), [[E4:%e..]] +; NORMAL-NEXT: movl [[E4]], 12(%esp) +; NORMAL-NEXT: movl $6, 8(%esp) ; NORMAL-NEXT: call -; NORMAL-NEXT: addl $16, %esp +; NORMAL-NEXT: addl $20, %esp define void @test9() optsize { entry: %p = alloca i32, align 4 + %q = alloca i32, align 4 + %s = alloca %struct.s, align 4 call void @good(i32 1, i32 2, i32 3, i32 4) - %0 = ptrtoint i32* %p to i32 - call void @good(i32 5, i32 6, i32 7, i32 %0) + %pv = ptrtoint i32* %p to i32 + %qv = ptrtoint i32* %q to i32 + call void @struct(%struct.s* byval %s, i32 6, i32 %qv, i32 %pv) ret void } @@ -291,28 +315,17 @@ define void @test11() optsize { ; Converting one mov into a push isn't worth it when ; doing so forces too much overhead for other calls. ; NORMAL-LABEL: test12: -; NORMAL: subl $16, %esp -; NORMAL-NEXT: movl $4, 8(%esp) -; NORMAL-NEXT: movl $3, 4(%esp) -; NORMAL-NEXT: movl $1, (%esp) -; NORMAL-NEXT: movl $2, %eax -; NORMAL-NEXT: calll _inreg -; NORMAL-NEXT: movl $8, 12(%esp) +; NORMAL: movl $8, 12(%esp) ; NORMAL-NEXT: movl $7, 8(%esp) ; NORMAL-NEXT: movl $6, 4(%esp) ; NORMAL-NEXT: movl $5, (%esp) ; NORMAL-NEXT: calll _good -; NORMAL-NEXT: movl $12, 8(%esp) -; NORMAL-NEXT: movl $11, 4(%esp) -; NORMAL-NEXT: movl $9, (%esp) -; NORMAL-NEXT: movl $10, %eax -; NORMAL-NEXT: calll _inreg -; NORMAL-NEXT: addl $16, %esp define void @test12() optsize { entry: - call void @inreg(i32 1, i32 2, i32 3, i32 4) + %s = alloca %struct.s, align 4 + call void @struct(%struct.s* %s, i32 2, i32 3, i32 4) call void @good(i32 5, i32 6, i32 7, i32 8) - call void @inreg(i32 9, i32 10, i32 11, i32 12) + call void @struct(%struct.s* %s, i32 10, i32 11, i32 12) ret void } @@ -324,13 +337,12 @@ entry: ; NORMAL-NEXT: pushl $1 ; NORMAL-NEXT: calll _good ; NORMAL-NEXT: addl $16, %esp -; NORMAL-NEXT: subl $12, %esp -; NORMAL-NEXT: movl $8, 8(%esp) -; NORMAL-NEXT: movl $7, 4(%esp) -; NORMAL-NEXT: movl $5, (%esp) -; NORMAL-NEXT: movl $6, %eax -; NORMAL-NEXT: calll _inreg -; NORMAL-NEXT: addl $12, %esp +; NORMAL-NEXT: subl $20, %esp +; NORMAL: movl $8, 16(%esp) +; NORMAL-NEXT: movl $7, 12(%esp) +; NORMAL-NEXT: movl $6, 8(%esp) +; NORMAL-NEXT: calll _struct +; NORMAL-NEXT: addl $20, %esp ; NORMAL-NEXT: pushl $12 ; NORMAL-NEXT: pushl $11 ; NORMAL-NEXT: pushl $10 @@ -339,8 +351,9 @@ entry: ; NORMAL-NEXT: addl $16, %esp define void @test12b() optsize { entry: - call void @good(i32 1, i32 2, i32 3, i32 4) - call void @inreg(i32 5, i32 6, i32 7, i32 8) + %s = alloca %struct.s, align 4 + call void @good(i32 1, i32 2, i32 3, i32 4) + call void @struct(%struct.s* %s, i32 6, i32 7, i32 8) call void @good(i32 9, i32 10, i32 11, i32 12) ret void } |