diff options
Diffstat (limited to 'test/CodeGen/X86/sse-schedule.ll')
-rw-r--r-- | test/CodeGen/X86/sse-schedule.ll | 327 |
1 files changed, 326 insertions, 1 deletions
diff --git a/test/CodeGen/X86/sse-schedule.ll b/test/CodeGen/X86/sse-schedule.ll index c41acd43b3ab6..29f726c3df6a7 100644 --- a/test/CodeGen/X86/sse-schedule.ll +++ b/test/CodeGen/X86/sse-schedule.ll @@ -7,7 +7,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=haswell | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=skylake | FileCheck %s --check-prefix=CHECK --check-prefix=HASWELL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=btver2 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=BTVER2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -print-schedule -mcpu=znver1 | FileCheck %s --check-prefix=CHECK --check-prefix=ZNVER1 define <4 x float> @test_addps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { ; GENERIC-LABEL: test_addps: @@ -45,6 +45,12 @@ define <4 x float> @test_addps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [8:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_addps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddps (%rdi), %xmm0, %xmm0 # sched: [10:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = fadd <4 x float> %a0, %a1 %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = fadd <4 x float> %1, %2 @@ -87,6 +93,12 @@ define float @test_addss(float %a0, float %a1, float *%a2) { ; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [8:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_addss: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: vaddss (%rdi), %xmm0, %xmm0 # sched: [10:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = fadd float %a0, %a1 %2 = load float, float *%a2, align 4 %3 = fadd float %1, %2 @@ -137,6 +149,12 @@ define <4 x float> @test_andps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a ; BTVER2-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.50] ; BTVER2-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_andps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vandps %xmm1, %xmm0, %xmm0 # sched: [1:0.25] +; ZNVER1-NEXT: vandps (%rdi), %xmm0, %xmm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = bitcast <4 x float> %a0 to <4 x i32> %2 = bitcast <4 x float> %a1 to <4 x i32> %3 = and <4 x i32> %1, %2 @@ -191,6 +209,12 @@ define <4 x float> @test_andnotps(<4 x float> %a0, <4 x float> %a1, <4 x float> ; BTVER2-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.50] ; BTVER2-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_andnotps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vandnps %xmm1, %xmm0, %xmm0 # sched: [1:0.25] +; ZNVER1-NEXT: vandnps (%rdi), %xmm0, %xmm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = bitcast <4 x float> %a0 to <4 x i32> %2 = bitcast <4 x float> %a1 to <4 x i32> %3 = xor <4 x i32> %1, <i32 -1, i32 -1, i32 -1, i32 -1> @@ -245,6 +269,13 @@ define <4 x float> @test_cmpps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a ; BTVER2-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [8:1.00] ; BTVER2-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_cmpps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vcmpeqps %xmm1, %xmm0, %xmm1 # sched: [3:1.00] +; ZNVER1-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 # sched: [10:1.00] +; ZNVER1-NEXT: vorps %xmm0, %xmm1, %xmm0 # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = fcmp oeq <4 x float> %a0, %a1 %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = fcmp oeq <4 x float> %a0, %2 @@ -290,6 +321,12 @@ define float @test_cmpss(float %a0, float %a1, float *%a2) { ; BTVER2-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [8:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_cmpss: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: vcmpeqss (%rdi), %xmm0, %xmm0 # sched: [10:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = insertelement <4 x float> undef, float %a0, i32 0 %2 = insertelement <4 x float> undef, float %a1, i32 0 %3 = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %1, <4 x float> %2, i8 0) @@ -385,6 +422,20 @@ define i32 @test_comiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { ; BTVER2-NEXT: orb %cl, %dl # sched: [1:0.50] ; BTVER2-NEXT: movzbl %dl, %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_comiss: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vcomiss %xmm1, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: setnp %al # sched: [1:0.25] +; ZNVER1-NEXT: sete %cl # sched: [1:0.25] +; ZNVER1-NEXT: andb %al, %cl # sched: [1:0.25] +; ZNVER1-NEXT: vcomiss (%rdi), %xmm0 # sched: [10:1.00] +; ZNVER1-NEXT: setnp %al # sched: [1:0.25] +; ZNVER1-NEXT: sete %dl # sched: [1:0.25] +; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25] +; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25] +; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 4 %3 = call i32 @llvm.x86.sse.comieq.ss(<4 x float> %a0, <4 x float> %2) @@ -435,6 +486,13 @@ define float @test_cvtsi2ss(i32 %a0, i32 *%a1) { ; BTVER2-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [8:1.00] ; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_cvtsi2ss: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [5:1.00] +; ZNVER1-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [12:1.00] +; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = sitofp i32 %a0 to float %2 = load i32, i32 *%a1, align 4 %3 = sitofp i32 %2 to float @@ -484,6 +542,13 @@ define float @test_cvtsi2ssq(i64 %a0, i64 *%a1) { ; BTVER2-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [8:1.00] ; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_cvtsi2ssq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [5:1.00] +; ZNVER1-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [12:1.00] +; ZNVER1-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = sitofp i64 %a0 to float %2 = load i64, i64 *%a1, align 8 %3 = sitofp i64 %2 to float @@ -533,6 +598,13 @@ define i32 @test_cvtss2si(float %a0, float *%a1) { ; BTVER2-NEXT: vcvtss2si %xmm0, %ecx # sched: [3:1.00] ; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_cvtss2si: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vcvtss2si (%rdi), %eax # sched: [12:1.00] +; ZNVER1-NEXT: vcvtss2si %xmm0, %ecx # sched: [5:1.00] +; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = insertelement <4 x float> undef, float %a0, i32 0 %2 = call i32 @llvm.x86.sse.cvtss2si(<4 x float> %1) %3 = load float, float *%a1, align 4 @@ -585,6 +657,13 @@ define i64 @test_cvtss2siq(float %a0, float *%a1) { ; BTVER2-NEXT: vcvtss2si %xmm0, %rcx # sched: [3:1.00] ; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_cvtss2siq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vcvtss2si (%rdi), %rax # sched: [12:1.00] +; ZNVER1-NEXT: vcvtss2si %xmm0, %rcx # sched: [5:1.00] +; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = insertelement <4 x float> undef, float %a0, i32 0 %2 = call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %1) %3 = load float, float *%a1, align 4 @@ -637,6 +716,13 @@ define i32 @test_cvttss2si(float %a0, float *%a1) { ; BTVER2-NEXT: vcvttss2si %xmm0, %ecx # sched: [3:1.00] ; BTVER2-NEXT: addl %ecx, %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_cvttss2si: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vcvttss2si (%rdi), %eax # sched: [12:1.00] +; ZNVER1-NEXT: vcvttss2si %xmm0, %ecx # sched: [5:1.00] +; ZNVER1-NEXT: addl %ecx, %eax # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = fptosi float %a0 to i32 %2 = load float, float *%a1, align 4 %3 = fptosi float %2 to i32 @@ -686,6 +772,13 @@ define i64 @test_cvttss2siq(float %a0, float *%a1) { ; BTVER2-NEXT: vcvttss2si %xmm0, %rcx # sched: [3:1.00] ; BTVER2-NEXT: addq %rcx, %rax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_cvttss2siq: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vcvttss2si (%rdi), %rax # sched: [12:1.00] +; ZNVER1-NEXT: vcvttss2si %xmm0, %rcx # sched: [5:1.00] +; ZNVER1-NEXT: addq %rcx, %rax # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = fptosi float %a0 to i64 %2 = load float, float *%a1, align 4 %3 = fptosi float %2 to i64 @@ -729,6 +822,12 @@ define <4 x float> @test_divps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a ; BTVER2-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [19:19.00] ; BTVER2-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [24:19.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_divps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vdivps %xmm1, %xmm0, %xmm0 # sched: [15:1.00] +; ZNVER1-NEXT: vdivps (%rdi), %xmm0, %xmm0 # sched: [22:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = fdiv <4 x float> %a0, %a1 %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = fdiv <4 x float> %1, %2 @@ -771,6 +870,12 @@ define float @test_divss(float %a0, float %a1, float *%a2) { ; BTVER2-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [19:19.00] ; BTVER2-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [24:19.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_divss: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vdivss %xmm1, %xmm0, %xmm0 # sched: [15:1.00] +; ZNVER1-NEXT: vdivss (%rdi), %xmm0, %xmm0 # sched: [22:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = fdiv float %a0, %a1 %2 = load float, float *%a2, align 4 %3 = fdiv float %1, %2 @@ -813,6 +918,12 @@ define void @test_ldmxcsr(i32 %a0) { ; BTVER2-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:1.00] ; BTVER2-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [5:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_ldmxcsr: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: movl %edi, -{{[0-9]+}}(%rsp) # sched: [1:0.50] +; ZNVER1-NEXT: vldmxcsr -{{[0-9]+}}(%rsp) # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = alloca i32, align 4 %2 = bitcast i32* %1 to i8* store i32 %a0, i32* %1 @@ -857,6 +968,12 @@ define <4 x float> @test_maxps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a ; BTVER2-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [8:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_maxps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmaxps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: vmaxps (%rdi), %xmm0, %xmm0 # sched: [10:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %1, <4 x float> %2) @@ -900,6 +1017,12 @@ define <4 x float> @test_maxss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a ; BTVER2-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [8:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_maxss: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmaxss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: vmaxss (%rdi), %xmm0, %xmm0 # sched: [10:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %1, <4 x float> %2) @@ -943,6 +1066,12 @@ define <4 x float> @test_minps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a ; BTVER2-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [8:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_minps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vminps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: vminps (%rdi), %xmm0, %xmm0 # sched: [10:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %1, <4 x float> %2) @@ -986,6 +1115,12 @@ define <4 x float> @test_minss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a ; BTVER2-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [8:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_minss: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vminss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: vminss (%rdi), %xmm0, %xmm0 # sched: [10:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %1, <4 x float> %2) @@ -1035,6 +1170,13 @@ define void @test_movaps(<4 x float> *%a0, <4 x float> *%a1) { ; BTVER2-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_movaps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmovaps (%rdi), %xmm0 # sched: [8:0.50] +; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: vmovaps %xmm0, (%rsi) # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = load <4 x float>, <4 x float> *%a0, align 16 %2 = fadd <4 x float> %1, %1 store <4 x float> %2, <4 x float> *%a1, align 16 @@ -1079,6 +1221,11 @@ define <4 x float> @test_movhlps(<4 x float> %a0, <4 x float> %a1) { ; BTVER2: # BB#0: ; BTVER2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_movhlps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 6, i32 7, i32 2, i32 3> ret <4 x float> %1 } @@ -1129,6 +1276,13 @@ define void @test_movhps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) { ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_movhps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0] sched: [8:0.50] +; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: vpextrq $1, %xmm0, (%rdi) # sched: [8:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = bitcast x86_mmx* %a2 to <2 x float>* %2 = load <2 x float>, <2 x float> *%1, align 8 %3 = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> @@ -1177,6 +1331,12 @@ define <4 x float> @test_movlhps(<4 x float> %a0, <4 x float> %a1) { ; BTVER2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50] ; BTVER2-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_movlhps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] sched: [1:0.50] +; ZNVER1-NEXT: vaddps %xmm0, %xmm1, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 1, i32 4, i32 5> %2 = fadd <4 x float> %a1, %1 ret <4 x float> %2 @@ -1224,6 +1384,13 @@ define void @test_movlps(<4 x float> %a0, <4 x float> %a1, x86_mmx *%a2) { ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_movlps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmovlpd {{.*#+}} xmm1 = mem[0],xmm1[1] sched: [8:0.50] +; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: vmovlps %xmm0, (%rdi) # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = bitcast x86_mmx* %a2 to <2 x float>* %2 = load <2 x float>, <2 x float> *%1, align 8 %3 = shufflevector <2 x float> %2, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> @@ -1266,6 +1433,11 @@ define i32 @test_movmskps(<4 x float> %a0) { ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovmskps %xmm0, %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_movmskps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmovmskps %xmm0, %eax # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0) ret i32 %1 } @@ -1307,6 +1479,11 @@ define void @test_movntps(<4 x float> %a0, <4 x float> *%a1) { ; BTVER2: # BB#0: ; BTVER2-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_movntps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmovntps %xmm0, (%rdi) # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] store <4 x float> %a0, <4 x float> *%a1, align 16, !nontemporal !0 ret void } @@ -1353,6 +1530,13 @@ define void @test_movss_mem(float* %a0, float* %a1) { ; BTVER2-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vmovss %xmm0, (%rsi) # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_movss_mem: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero sched: [8:0.50] +; ZNVER1-NEXT: vaddss %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: vmovss %xmm0, (%rsi) # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = load float, float* %a0, align 1 %2 = fadd float %1, %1 store float %2, float *%a1, align 1 @@ -1395,6 +1579,11 @@ define <4 x float> @test_movss_reg(<4 x float> %a0, <4 x float> %a1) { ; BTVER2: # BB#0: ; BTVER2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_movss_reg: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ret <4 x float> %1 } @@ -1441,6 +1630,13 @@ define void @test_movups(<4 x float> *%a0, <4 x float> *%a1) { ; BTVER2-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vmovups %xmm0, (%rsi) # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_movups: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmovups (%rdi), %xmm0 # sched: [8:0.50] +; ZNVER1-NEXT: vaddps %xmm0, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: vmovups %xmm0, (%rsi) # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = load <4 x float>, <4 x float> *%a0, align 1 %2 = fadd <4 x float> %1, %1 store <4 x float> %2, <4 x float> *%a1, align 1 @@ -1483,6 +1679,12 @@ define <4 x float> @test_mulps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a ; BTVER2-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [2:1.00] ; BTVER2-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [7:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_mulps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmulps %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; ZNVER1-NEXT: vmulps (%rdi), %xmm0, %xmm0 # sched: [12:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = fmul <4 x float> %a0, %a1 %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = fmul <4 x float> %1, %2 @@ -1525,6 +1727,12 @@ define float @test_mulss(float %a0, float %a1, float *%a2) { ; BTVER2-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [2:1.00] ; BTVER2-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [7:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_mulss: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmulss %xmm1, %xmm0, %xmm0 # sched: [5:1.00] +; ZNVER1-NEXT: vmulss (%rdi), %xmm0, %xmm0 # sched: [12:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = fmul float %a0, %a1 %2 = load float, float *%a2, align 4 %3 = fmul float %1, %2 @@ -1575,6 +1783,12 @@ define <4 x float> @test_orps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2 ; BTVER2-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.50] ; BTVER2-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_orps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25] +; ZNVER1-NEXT: vorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = bitcast <4 x float> %a0 to <4 x i32> %2 = bitcast <4 x float> %a1 to <4 x i32> %3 = or <4 x i32> %1, %2 @@ -1621,6 +1835,11 @@ define void @test_prefetchnta(i8* %a0) { ; BTVER2: # BB#0: ; BTVER2-NEXT: prefetchnta (%rdi) # sched: [5:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_prefetchnta: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: prefetchnta (%rdi) # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] call void @llvm.prefetch(i8* %a0, i32 0, i32 0, i32 1) ret void } @@ -1670,6 +1889,13 @@ define <4 x float> @test_rcpps(<4 x float> %a0, <4 x float> *%a1) { ; BTVER2-NEXT: vrcpps %xmm0, %xmm0 # sched: [2:1.00] ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_rcpps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vrcpps (%rdi), %xmm1 # sched: [12:0.50] +; ZNVER1-NEXT: vrcpps %xmm0, %xmm0 # sched: [5:0.50] +; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0) %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %2) @@ -1728,6 +1954,14 @@ define <4 x float> @test_rcpss(float %a0, float *%a1) { ; BTVER2-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [7:1.00] ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_rcpss: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50] +; ZNVER1-NEXT: vrcpss %xmm0, %xmm0, %xmm0 # sched: [12:0.50] +; ZNVER1-NEXT: vrcpss %xmm1, %xmm1, %xmm1 # sched: [12:0.50] +; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = insertelement <4 x float> undef, float %a0, i32 0 %2 = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %1) %3 = load float, float *%a1, align 4 @@ -1782,6 +2016,13 @@ define <4 x float> @test_rsqrtps(<4 x float> %a0, <4 x float> *%a1) { ; BTVER2-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [2:1.00] ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_rsqrtps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vrsqrtps (%rdi), %xmm1 # sched: [12:0.50] +; ZNVER1-NEXT: vrsqrtps %xmm0, %xmm0 # sched: [5:0.50] +; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0) %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %2) @@ -1840,6 +2081,14 @@ define <4 x float> @test_rsqrtss(float %a0, float *%a1) { ; BTVER2-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [7:1.00] ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_rsqrtss: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero sched: [8:0.50] +; ZNVER1-NEXT: vrsqrtss %xmm0, %xmm0, %xmm0 # sched: [12:0.50] +; ZNVER1-NEXT: vrsqrtss %xmm1, %xmm1, %xmm1 # sched: [12:0.50] +; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = insertelement <4 x float> undef, float %a0, i32 0 %2 = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %1) %3 = load float, float *%a1, align 4 @@ -1886,6 +2135,11 @@ define void @test_sfence() { ; BTVER2: # BB#0: ; BTVER2-NEXT: sfence # sched: [1:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_sfence: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: sfence # sched: [1:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] call void @llvm.x86.sse.sfence() ret void } @@ -1931,6 +2185,12 @@ define <4 x float> @test_shufps(<4 x float> %a0, <4 x float> %a1, <4 x float> *% ; BTVER2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:0.50] ; BTVER2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_shufps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[0,0] sched: [1:0.50] +; ZNVER1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],mem[0,0] sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 0, i32 4, i32 4> %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 3, i32 4, i32 4> @@ -1980,6 +2240,13 @@ define <4 x float> @test_sqrtps(<4 x float> %a0, <4 x float> *%a1) { ; BTVER2-NEXT: vsqrtps %xmm0, %xmm0 # sched: [21:21.00] ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_sqrtps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vsqrtps (%rdi), %xmm1 # sched: [27:1.00] +; ZNVER1-NEXT: vsqrtps %xmm0, %xmm0 # sched: [20:1.00] +; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %a0) %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = call <4 x float> @llvm.x86.sse.sqrt.ps(<4 x float> %2) @@ -2038,6 +2305,14 @@ define <4 x float> @test_sqrtss(<4 x float> %a0, <4 x float> *%a1) { ; BTVER2-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [26:21.00] ; BTVER2-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_sqrtss: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vmovaps (%rdi), %xmm1 # sched: [8:0.50] +; ZNVER1-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 # sched: [27:1.00] +; ZNVER1-NEXT: vsqrtss %xmm1, %xmm1, %xmm1 # sched: [27:1.00] +; ZNVER1-NEXT: vaddps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %a0) %2 = load <4 x float>, <4 x float> *%a1, align 16 %3 = call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %2) @@ -2082,6 +2357,12 @@ define i32 @test_stmxcsr() { ; BTVER2-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:1.00] ; BTVER2-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [5:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_stmxcsr: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vstmxcsr -{{[0-9]+}}(%rsp) # sched: [1:0.50] +; ZNVER1-NEXT: movl -{{[0-9]+}}(%rsp), %eax # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = alloca i32, align 4 %2 = bitcast i32* %1 to i8* call void @llvm.x86.sse.stmxcsr(i8* %2) @@ -2126,6 +2407,12 @@ define <4 x float> @test_subps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a ; BTVER2-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [8:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_subps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vsubps %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: vsubps (%rdi), %xmm0, %xmm0 # sched: [10:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = fsub <4 x float> %a0, %a1 %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = fsub <4 x float> %1, %2 @@ -2168,6 +2455,12 @@ define float @test_subss(float %a0, float %a1, float *%a2) { ; BTVER2-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [8:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_subss: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vsubss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: vsubss (%rdi), %xmm0, %xmm0 # sched: [10:1.00] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = fsub float %a0, %a1 %2 = load float, float *%a2, align 4 %3 = fsub float %1, %2 @@ -2258,6 +2551,20 @@ define i32 @test_ucomiss(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a2) { ; BTVER2-NEXT: orb %cl, %dl # sched: [1:0.50] ; BTVER2-NEXT: movzbl %dl, %eax # sched: [1:0.50] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_ucomiss: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vucomiss %xmm1, %xmm0 # sched: [3:1.00] +; ZNVER1-NEXT: setnp %al # sched: [1:0.25] +; ZNVER1-NEXT: sete %cl # sched: [1:0.25] +; ZNVER1-NEXT: andb %al, %cl # sched: [1:0.25] +; ZNVER1-NEXT: vucomiss (%rdi), %xmm0 # sched: [10:1.00] +; ZNVER1-NEXT: setnp %al # sched: [1:0.25] +; ZNVER1-NEXT: sete %dl # sched: [1:0.25] +; ZNVER1-NEXT: andb %al, %dl # sched: [1:0.25] +; ZNVER1-NEXT: orb %cl, %dl # sched: [1:0.25] +; ZNVER1-NEXT: movzbl %dl, %eax # sched: [1:0.25] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %a1) %2 = load <4 x float>, <4 x float> *%a2, align 4 %3 = call i32 @llvm.x86.sse.ucomieq.ss(<4 x float> %a0, <4 x float> %2) @@ -2306,6 +2613,12 @@ define <4 x float> @test_unpckhps(<4 x float> %a0, <4 x float> %a1, <4 x float> ; BTVER2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50] ; BTVER2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_unpckhps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] sched: [1:0.50] +; ZNVER1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 2, i32 6, i32 3, i32 7> %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 2, i32 6, i32 3, i32 7> @@ -2352,6 +2665,12 @@ define <4 x float> @test_unpcklps(<4 x float> %a0, <4 x float> %a1, <4 x float> ; BTVER2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50] ; BTVER2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_unpcklps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] sched: [1:0.50] +; ZNVER1-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 4, i32 1, i32 5> %2 = load <4 x float>, <4 x float> *%a2, align 16 %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> <i32 0, i32 4, i32 1, i32 5> @@ -2402,6 +2721,12 @@ define <4 x float> @test_xorps(<4 x float> %a0, <4 x float> %a1, <4 x float> *%a ; BTVER2-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.50] ; BTVER2-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [6:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] +; +; ZNVER1-LABEL: test_xorps: +; ZNVER1: # BB#0: +; ZNVER1-NEXT: vxorps %xmm1, %xmm0, %xmm0 # sched: [1:0.25] +; ZNVER1-NEXT: vxorps (%rdi), %xmm0, %xmm0 # sched: [8:0.50] +; ZNVER1-NEXT: retq # sched: [5:0.50] %1 = bitcast <4 x float> %a0 to <4 x i32> %2 = bitcast <4 x float> %a1 to <4 x i32> %3 = xor <4 x i32> %1, %2 |