diff options
author | Dimitry Andric <dim@FreeBSD.org> | 2017-08-20 21:02:43 +0000 |
---|---|---|
committer | Dimitry Andric <dim@FreeBSD.org> | 2017-08-20 21:02:43 +0000 |
commit | 15c5c77fa04cd97e1057e8a585f669fc49da0d92 (patch) | |
tree | 9047e00a30ccb7b81dbe7227c8c883cbafb5d2dd /test | |
parent | 4e20bb0468b8d0db13287e666b482eb93689be99 (diff) | |
download | src-15c5c77fa04cd97e1057e8a585f669fc49da0d92.tar.gz src-15c5c77fa04cd97e1057e8a585f669fc49da0d92.zip |
Notes
Diffstat (limited to 'test')
22 files changed, 824 insertions, 52 deletions
diff --git a/test/Analysis/ScalarEvolution/max-addrec-size.ll b/test/Analysis/ScalarEvolution/max-addrec-size.ll new file mode 100644 index 000000000000..aad0ddda37bc --- /dev/null +++ b/test/Analysis/ScalarEvolution/max-addrec-size.ll @@ -0,0 +1,33 @@ +; RUN: opt -analyze -scalar-evolution -scalar-evolution-max-add-rec-size=3 < %s | FileCheck %s + +; Show that we are able to avoid creation of huge SCEVs by capping the max +; AddRec size. +define i32 @test_01(i32 %a, i32 %b) { + +; CHECK-LABEL: Classifying expressions for: @test_01 +; CHECK-NEXT: %iv = phi i32 [ %a, %entry ], [ %iv.next, %loop ] +; CHECK-NEXT: --> {%a,+,%b}<%loop> U: full-set S: full-set +; CHECK-NEXT: %iv.next = add i32 %iv, %b +; CHECK-NEXT: --> {(%a + %b),+,%b}<%loop> U: full-set S: full-set +; CHECK-NEXT: %x1 = mul i32 %iv, %iv.next +; CHECK-NEXT: --> {((%a + %b) * %a),+,(((2 * %a) + (2 * %b)) * %b),+,(2 * %b * %b)}<%loop> U: full-set S: full-set +; CHECK-NEXT: %x2 = mul i32 %x1, %x1 +; CHECK-NEXT: --> ({((%a + %b) * %a),+,(((2 * %a) + (2 * %b)) * %b),+,(2 * %b * %b)}<%loop> * {((%a + %b) * %a),+,(((2 * %a) + (2 * %b)) * %b),+,(2 * %b * %b)}<%loop>) U: full-set S: full-set +; CHECK-NEXT: %x3 = mul i32 %x2, %x1 +; CHECK-NEXT: --> ({((%a + %b) * %a),+,(((2 * %a) + (2 * %b)) * %b),+,(2 * %b * %b)}<%loop> * {((%a + %b) * %a),+,(((2 * %a) + (2 * %b)) * %b),+,(2 * %b * %b)}<%loop> * {((%a + %b) * %a),+,(((2 * %a) + (2 * %b)) * %b),+,(2 * %b * %b)}<%loop>) U: full-set S: full-set + +entry: + br label %loop + +loop: + %iv = phi i32 [ %a, %entry ], [ %iv.next, %loop ] + %iv.next = add i32 %iv, %b + %cond = icmp slt i32 %iv.next, 1000 + br i1 %cond, label %loop, label %exit + +exit: + %x1 = mul i32 %iv, %iv.next + %x2 = mul i32 %x1, %x1 + %x3 = mul i32 %x2, %x1 + ret i32 %x3 +} diff --git a/test/CodeGen/AArch64/arm64-ldst-unscaled-pre-post.mir b/test/CodeGen/AArch64/arm64-ldst-unscaled-pre-post.mir new file mode 100644 index 000000000000..dacaf4966d07 --- /dev/null +++ b/test/CodeGen/AArch64/arm64-ldst-unscaled-pre-post.mir @@ -0,0 +1,115 @@ +# RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass aarch64-ldst-opt -verify-machineinstrs -o - %s | FileCheck %s +--- +# CHECK-LABEL: name: test_LDURSi_post +# CHECK: LDRSpost %x0, -4 +name: test_LDURSi_post +body: | + bb.0.entry: + liveins: %x0 + + %s0 = LDURSi %x0, 0 + %x0 = SUBXri %x0, 4, 0 + RET_ReallyLR implicit %x0 +... +# CHECK-LABEL: name: test_LDURDi_post +# CHECK: LDRDpost %x0, -4 +name: test_LDURDi_post +body: | + bb.0.entry: + liveins: %x0 + + %d0 = LDURDi %x0, 0 + %x0 = SUBXri %x0, 4, 0 + RET_ReallyLR implicit %x0 +... +# CHECK-LABEL: name: test_LDURQi_post +# CHECK: LDRQpost %x0, -4 +name: test_LDURQi_post +body: | + bb.0.entry: + liveins: %x0 + + %q0 = LDURQi %x0, 0 + %x0 = SUBXri %x0, 4, 0 + RET_ReallyLR implicit %x0 +... +# CHECK-LABEL: name: test_LDURWi_post +# CHECK: LDRWpost %x0, -4 +name: test_LDURWi_post +body: | + bb.0.entry: + liveins: %x0 + + %w1 = LDURWi %x0, 0 + %x0 = SUBXri %x0, 4, 0 + RET_ReallyLR implicit %x0 +... +# CHECK-LABEL: name: test_LDURXi_post +# CHECK: %x1 = LDRXpost %x0, -4 +name: test_LDURXi_post +body: | + bb.0.entry: + liveins: %x0 + + %x1 = LDURXi %x0, 0 + %x0 = SUBXri %x0, 4, 0 + RET_ReallyLR implicit %x0 +... +# CHECK-LABEL: name: test_STURSi_post +# CHECK: STRSpost %s0, %x0, -4 +name: test_STURSi_post +body: | + bb.0.entry: + liveins: %x0 + + %s0 = FMOVS0 + STURSi %s0, %x0, 0 + %x0 = SUBXri %x0, 4, 0 + RET_ReallyLR implicit %x0 +... +# CHECK-LABEL: name: test_STURDi_post +# CHECK: STRDpost %d0, %x0, -4 +name: test_STURDi_post +body: | + bb.0.entry: + liveins: %x0 + + %d0 = FMOVD0 + STURDi %d0, %x0, 0 + %x0 = SUBXri %x0, 4, 0 + RET_ReallyLR implicit %x0 +... +# CHECK-LABEL: name: test_STURQi_post +# CHECK: STRQpost %q0, %x0, -4 +name: test_STURQi_post +body: | + bb.0.entry: + liveins: %x0 + + %q0 = MOVIv4i32 0, 0 + STURQi %q0, %x0, 0 + %x0 = SUBXri %x0, 4, 0 + RET_ReallyLR implicit %x0 +... +# CHECK-LABEL: name: test_STURWi_post +# CHECK: STRWpost %wzr, %x0, -4 +name: test_STURWi_post +body: | + bb.0.entry: + liveins: %x0 + + STURWi %wzr, %x0, 0 + %x0 = SUBXri %x0, 4, 0 + RET_ReallyLR implicit %x0 +... +# CHECK-LABEL: name: test_STURXi_post +# CHECK: STRXpost %xzr, %x0, -4 +name: test_STURXi_post +body: | + bb.0.entry: + liveins: %x0 + + STURXi %xzr, %x0, 0 + %x0 = SUBXri %x0, 4, 0 + RET_ReallyLR implicit %x0 +... diff --git a/test/CodeGen/ARM/cmpxchg-O0.ll b/test/CodeGen/ARM/cmpxchg-O0.ll index a3be72112c76..f8ad2bbbbe0e 100644 --- a/test/CodeGen/ARM/cmpxchg-O0.ll +++ b/test/CodeGen/ARM/cmpxchg-O0.ll @@ -10,11 +10,10 @@ define { i8, i1 } @test_cmpxchg_8(i8* %addr, i8 %desired, i8 %new) nounwind { ; CHECK: dmb ish ; CHECK: uxtb [[DESIRED:r[0-9]+]], [[DESIRED]] ; CHECK: [[RETRY:.LBB[0-9]+_[0-9]+]]: -; CHECK: mov{{s?}} [[STATUS:r[0-9]+]], #0 ; CHECK: ldrexb [[OLD:r[0-9]+]], [r0] ; CHECK: cmp [[OLD]], [[DESIRED]] ; CHECK: bne [[DONE:.LBB[0-9]+_[0-9]+]] -; CHECK: strexb [[STATUS]], r2, [r0] +; CHECK: strexb [[STATUS:r[0-9]+]], r2, [r0] ; CHECK: cmp{{(\.w)?}} [[STATUS]], #0 ; CHECK: bne [[RETRY]] ; CHECK: [[DONE]]: @@ -30,11 +29,10 @@ define { i16, i1 } @test_cmpxchg_16(i16* %addr, i16 %desired, i16 %new) nounwind ; CHECK: dmb ish ; CHECK: uxth [[DESIRED:r[0-9]+]], [[DESIRED]] ; CHECK: [[RETRY:.LBB[0-9]+_[0-9]+]]: -; CHECK: mov{{s?}} [[STATUS:r[0-9]+]], #0 ; CHECK: ldrexh [[OLD:r[0-9]+]], [r0] ; CHECK: cmp [[OLD]], [[DESIRED]] ; CHECK: bne [[DONE:.LBB[0-9]+_[0-9]+]] -; CHECK: strexh [[STATUS]], r2, [r0] +; CHECK: strexh [[STATUS:r[0-9]+]], r2, [r0] ; CHECK: cmp{{(\.w)?}} [[STATUS]], #0 ; CHECK: bne [[RETRY]] ; CHECK: [[DONE]]: @@ -50,11 +48,10 @@ define { i32, i1 } @test_cmpxchg_32(i32* %addr, i32 %desired, i32 %new) nounwind ; CHECK: dmb ish ; CHECK-NOT: uxt ; CHECK: [[RETRY:.LBB[0-9]+_[0-9]+]]: -; CHECK: mov{{s?}} [[STATUS:r[0-9]+]], #0 ; CHECK: ldrex [[OLD:r[0-9]+]], [r0] ; CHECK: cmp [[OLD]], [[DESIRED]] ; CHECK: bne [[DONE:.LBB[0-9]+_[0-9]+]] -; CHECK: strex [[STATUS]], r2, [r0] +; CHECK: strex [[STATUS:r[0-9]+]], r2, [r0] ; CHECK: cmp{{(\.w)?}} [[STATUS]], #0 ; CHECK: bne [[RETRY]] ; CHECK: [[DONE]]: diff --git a/test/CodeGen/ARM/virtregrewriter-subregliveness.mir b/test/CodeGen/ARM/virtregrewriter-subregliveness.mir new file mode 100644 index 000000000000..83335a3ccffd --- /dev/null +++ b/test/CodeGen/ARM/virtregrewriter-subregliveness.mir @@ -0,0 +1,84 @@ +# RUN: llc -o - -mtriple=thumbv7--windows-gnu -run-pass=greedy -run-pass=virtregrewriter %s | FileCheck %s +--- | + target datalayout = "e-m:w-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" + target triple = "thumbv7--windows-gnu" + + define void @subregLiveThrough() { ret void } + define void @subregNotLiveThrough() { ret void } + define void @subregNotLiveThrough2() { ret void } + +... +--- +# Check that we properly recognize that r1 is live through +# the first subreg copy. +# That will materialize as an implicit use of the big register +# on that copy. +# PR34107. +# +# CHECK-LABEL: name: subregLiveThrough +name: subregLiveThrough +tracksRegLiveness: true +registers: + - { id: 0, class: gprpair } +body: | + bb.0: + liveins: %r0, %r1 + + ; That copy is being coalesced so we should use a KILL + ; placeholder. If that's not a kill that means we probably + ; not coalescing %0 and %r0_r1 and thus we are not testing + ; the problematic code anymore. + ; + ; CHECK: %r0 = KILL %r0, implicit killed %r0_r1, implicit-def %r0_r1 + ; CHECK-NEXT: %r1 = KILL %r1, implicit killed %r0_r1 + undef %0.gsub_0 = COPY %r0 + %0.gsub_1 = COPY %r1 + tBX_RET 14, _, implicit %0 + + +... + +--- +# Check that we properly recognize that r1 is *not* live through +# the first subreg copy. +# CHECK-LABEL: name: subregNotLiveThrough +name: subregNotLiveThrough +tracksRegLiveness: true +registers: + - { id: 0, class: gprpair } +body: | + bb.0: + liveins: %r0, %r1 + + ; r1 is not live through so check we are not implicitly using + ; the big register. + ; CHECK: %r0 = KILL %r0, implicit-def %r0_r1 + ; CHECK-NEXT: tBX_RET + undef %0.gsub_0 = COPY %r0 + tBX_RET 14, _, implicit %0 + + +... + +--- +# Check that we properly recognize that r1 is *not* live through +# the first subreg copy. It is defined by this copy, but is not +# through. +# CHECK-LABEL: name: subregNotLiveThrough2 +name: subregNotLiveThrough2 +tracksRegLiveness: true +registers: + - { id: 0, class: gprpair } +body: | + bb.0: + liveins: %r0, %r1 + + ; r1 is not live through so check we are not implicitly using + ; the big register. + ; CHECK: %r0 = KILL %r0, implicit-def %r1, implicit-def %r0_r1 + ; CHECK-NEXT: tBX_RET + undef %0.gsub_0 = COPY %r0, implicit-def %r1 + tBX_RET 14, _, implicit %0 + + +... diff --git a/test/CodeGen/X86/adx-intrinsics.ll b/test/CodeGen/X86/adx-intrinsics.ll index 0498177a9c12..819a5df14e63 100644 --- a/test/CodeGen/X86/adx-intrinsics.ll +++ b/test/CodeGen/X86/adx-intrinsics.ll @@ -75,3 +75,30 @@ define i8 @test_subborrow_u64(i8 %c, i64 %a, i64 %b, i8* %ptr) { ret i8 %ret; } +; Try a version with loads. Previously we crashed on this. +define i32 @load_crash(i64* nocapture readonly %a, i64* nocapture readonly %b, i64* %res) { +; CHECK-LABEL: load_crash +; CHECK: addb +; ADX: adcxq +; CHECK: setb +; CHECK: retq + %1 = load i64, i64* %a, align 8 + %2 = load i64, i64* %b, align 8 + %3 = bitcast i64* %res to i8* + %4 = tail call i8 @llvm.x86.addcarryx.u64(i8 0, i64 %1, i64 %2, i8* %3) + %conv = zext i8 %4 to i32 + ret i32 %conv +} + +; Try a really simple all zero input case, which also used to crash +define void @allzeros() { +; CHECK-LABEL: allzeros +; CHECK: xorl +; CHECK: addb +; CHECK: sbbq +; CHECK: andl +; CHECK: retq +entry: + %0 = tail call i8 @llvm.x86.addcarryx.u64(i8 0, i64 0, i64 0, i8* null) + ret void +} diff --git a/test/CodeGen/X86/avx512bw-intrinsics.ll b/test/CodeGen/X86/avx512bw-intrinsics.ll index 5472f057ef27..4abe3df9fc2a 100644 --- a/test/CodeGen/X86/avx512bw-intrinsics.ll +++ b/test/CodeGen/X86/avx512bw-intrinsics.ll @@ -1921,9 +1921,9 @@ define <64 x i8>@test_int_x86_avx512_mask_pbroadcast_b_gpr_512(i8 %x0, <64 x i8> ; AVX512BW-LABEL: test_int_x86_avx512_mask_pbroadcast_b_gpr_512: ; AVX512BW: ## BB#0: ; AVX512BW-NEXT: kmovq %rsi, %k1 -; AVX512BW-NEXT: vpbroadcastb %dil, %zmm0 {%k1} -; AVX512BW-NEXT: vpbroadcastb %dil, %zmm1 {%k1} {z} -; AVX512BW-NEXT: vpbroadcastb %dil, %zmm2 +; AVX512BW-NEXT: vpbroadcastb %edi, %zmm1 {%k1} {z} +; AVX512BW-NEXT: vpbroadcastb %edi, %zmm0 {%k1} +; AVX512BW-NEXT: vpbroadcastb %edi, %zmm2 ; AVX512BW-NEXT: vpaddb %zmm0, %zmm2, %zmm0 ; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq @@ -1934,9 +1934,9 @@ define <64 x i8>@test_int_x86_avx512_mask_pbroadcast_b_gpr_512(i8 %x0, <64 x i8> ; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k0 ; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 ; AVX512F-32-NEXT: kunpckdq %k0, %k1, %k1 -; AVX512F-32-NEXT: vpbroadcastb %al, %zmm1 {%k1} {z} -; AVX512F-32-NEXT: vpbroadcastb %al, %zmm0 {%k1} -; AVX512F-32-NEXT: vpbroadcastb %al, %zmm2 +; AVX512F-32-NEXT: vpbroadcastb %eax, %zmm1 {%k1} {z} +; AVX512F-32-NEXT: vpbroadcastb %eax, %zmm0 {%k1} +; AVX512F-32-NEXT: vpbroadcastb %eax, %zmm2 ; AVX512F-32-NEXT: vpaddb %zmm0, %zmm2, %zmm0 ; AVX512F-32-NEXT: vpaddb %zmm0, %zmm1, %zmm0 ; AVX512F-32-NEXT: retl @@ -1954,20 +1954,20 @@ define <32 x i16>@test_int_x86_avx512_mask_pbroadcast_w_gpr_512(i16 %x0, <32 x i ; AVX512BW-LABEL: test_int_x86_avx512_mask_pbroadcast_w_gpr_512: ; AVX512BW: ## BB#0: ; AVX512BW-NEXT: kmovd %esi, %k1 -; AVX512BW-NEXT: vpbroadcastw %di, %zmm0 {%k1} -; AVX512BW-NEXT: vpbroadcastw %di, %zmm1 {%k1} {z} -; AVX512BW-NEXT: vpbroadcastw %di, %zmm2 +; AVX512BW-NEXT: vpbroadcastw %edi, %zmm1 {%k1} {z} +; AVX512BW-NEXT: vpbroadcastw %edi, %zmm0 {%k1} +; AVX512BW-NEXT: vpbroadcastw %edi, %zmm2 ; AVX512BW-NEXT: vpaddw %zmm0, %zmm2, %zmm0 ; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512F-32-LABEL: test_int_x86_avx512_mask_pbroadcast_w_gpr_512: ; AVX512F-32: # BB#0: -; AVX512F-32-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 -; AVX512F-32-NEXT: vpbroadcastw %ax, %zmm0 {%k1} -; AVX512F-32-NEXT: vpbroadcastw %ax, %zmm1 {%k1} {z} -; AVX512F-32-NEXT: vpbroadcastw %ax, %zmm2 +; AVX512F-32-NEXT: movw {{[0-9]+}}(%esp), %ax +; AVX512F-32-NEXT: vpbroadcastw %eax, %zmm1 {%k1} {z} +; AVX512F-32-NEXT: vpbroadcastw %eax, %zmm0 {%k1} +; AVX512F-32-NEXT: vpbroadcastw %eax, %zmm2 ; AVX512F-32-NEXT: vpaddw %zmm0, %zmm2, %zmm0 ; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0 ; AVX512F-32-NEXT: retl diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics.ll b/test/CodeGen/X86/avx512bwvl-intrinsics.ll index c3ba6f106e6a..9ceb3e5931a6 100644 --- a/test/CodeGen/X86/avx512bwvl-intrinsics.ll +++ b/test/CodeGen/X86/avx512bwvl-intrinsics.ll @@ -2799,9 +2799,9 @@ define <32 x i8>@test_int_x86_avx512_mask_pbroadcast_b_gpr_256(i8 %x0, <32 x i8> ; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_b_gpr_256: ; CHECK: ## BB#0: ; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce] -; CHECK-NEXT: vpbroadcastb %dil, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7a,0xc7] -; CHECK-NEXT: vpbroadcastb %dil, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x7a,0xcf] -; CHECK-NEXT: vpbroadcastb %dil, %ymm2 ## encoding: [0x62,0xf2,0x7d,0x28,0x7a,0xd7] +; CHECK-NEXT: vpbroadcastb %edi, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x7a,0xcf] +; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7a,0xc7] +; CHECK-NEXT: vpbroadcastb %edi, %ymm2 ## encoding: [0x62,0xf2,0x7d,0x28,0x7a,0xd7] ; CHECK-NEXT: vpaddb %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfc,0xc0] ; CHECK-NEXT: vpaddb %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfc,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -2819,9 +2819,9 @@ define <16 x i8>@test_int_x86_avx512_mask_pbroadcast_b_gpr_128(i8 %x0, <16 x i8> ; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_b_gpr_128: ; CHECK: ## BB#0: ; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce] -; CHECK-NEXT: vpbroadcastb %dil, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x7a,0xcf] -; CHECK-NEXT: vpbroadcastb %dil, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7a,0xc7] -; CHECK-NEXT: vpbroadcastb %dil, %xmm2 ## encoding: [0x62,0xf2,0x7d,0x08,0x7a,0xd7] +; CHECK-NEXT: vpbroadcastb %edi, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x7a,0xcf] +; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7a,0xc7] +; CHECK-NEXT: vpbroadcastb %edi, %xmm2 ## encoding: [0x62,0xf2,0x7d,0x08,0x7a,0xd7] ; CHECK-NEXT: vpaddb %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc0] ; CHECK-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfc,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -2839,9 +2839,9 @@ define <16 x i16>@test_int_x86_avx512_mask_pbroadcast_w_gpr_256(i16 %x0, <16 x i ; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_w_gpr_256: ; CHECK: ## BB#0: ; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce] -; CHECK-NEXT: vpbroadcastw %di, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x7b,0xcf] -; CHECK-NEXT: vpbroadcastw %di, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7b,0xc7] -; CHECK-NEXT: vpbroadcastw %di, %ymm2 ## encoding: [0x62,0xf2,0x7d,0x28,0x7b,0xd7] +; CHECK-NEXT: vpbroadcastw %edi, %ymm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x7b,0xcf] +; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x7b,0xc7] +; CHECK-NEXT: vpbroadcastw %edi, %ymm2 ## encoding: [0x62,0xf2,0x7d,0x28,0x7b,0xd7] ; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0] ; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] @@ -2859,9 +2859,9 @@ define <8 x i16>@test_int_x86_avx512_mask_pbroadcast_w_gpr_128(i16 %x0, <8 x i16 ; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcast_w_gpr_128: ; CHECK: ## BB#0: ; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce] -; CHECK-NEXT: vpbroadcastw %di, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x7b,0xcf] -; CHECK-NEXT: vpbroadcastw %di, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7b,0xc7] -; CHECK-NEXT: vpbroadcastw %di, %xmm2 ## encoding: [0x62,0xf2,0x7d,0x08,0x7b,0xd7] +; CHECK-NEXT: vpbroadcastw %edi, %xmm1 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x7b,0xcf] +; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x7b,0xc7] +; CHECK-NEXT: vpbroadcastw %edi, %xmm2 ## encoding: [0x62,0xf2,0x7d,0x08,0x7b,0xd7] ; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc0] ; CHECK-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] diff --git a/test/CodeGen/X86/pr33349.ll b/test/CodeGen/X86/pr33349.ll new file mode 100644 index 000000000000..db866db22481 --- /dev/null +++ b/test/CodeGen/X86/pr33349.ll @@ -0,0 +1,92 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mattr=+avx512f | FileCheck %s --check-prefix=KNL +; RUN: llc < %s -mattr=+avx512f,+avx512vl,+avx512bw,+avx512dq | FileCheck %s --check-prefix=SKX + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + + define void @test(<4 x i1> %m, <4 x x86_fp80> %v, <4 x x86_fp80>*%p) local_unnamed_addr { +; KNL-LABEL: test: +; KNL: # BB#0: # %bb +; KNL-NEXT: vpextrb $0, %xmm0, %eax +; KNL-NEXT: testb $1, %al +; KNL-NEXT: fld1 +; KNL-NEXT: fldz +; KNL-NEXT: fld %st(0) +; KNL-NEXT: fcmovne %st(2), %st(0) +; KNL-NEXT: vpextrb $4, %xmm0, %eax +; KNL-NEXT: testb $1, %al +; KNL-NEXT: fld %st(1) +; KNL-NEXT: fcmovne %st(3), %st(0) +; KNL-NEXT: vpextrb $8, %xmm0, %eax +; KNL-NEXT: testb $1, %al +; KNL-NEXT: fld %st(2) +; KNL-NEXT: fcmovne %st(4), %st(0) +; KNL-NEXT: vpextrb $12, %xmm0, %eax +; KNL-NEXT: testb $1, %al +; KNL-NEXT: fxch %st(3) +; KNL-NEXT: fcmovne %st(4), %st(0) +; KNL-NEXT: fstp %st(4) +; KNL-NEXT: fxch %st(3) +; KNL-NEXT: fstpt 30(%rdi) +; KNL-NEXT: fxch %st(1) +; KNL-NEXT: fstpt 20(%rdi) +; KNL-NEXT: fxch %st(1) +; KNL-NEXT: fstpt 10(%rdi) +; KNL-NEXT: fstpt (%rdi) +; KNL-NEXT: retq +; +; SKX-LABEL: test: +; SKX: # BB#0: # %bb +; SKX-NEXT: vpslld $31, %xmm0, %xmm0 +; SKX-NEXT: vptestmd %xmm0, %xmm0, %k0 +; SKX-NEXT: kshiftrw $2, %k0, %k1 +; SKX-NEXT: kshiftlw $15, %k1, %k2 +; SKX-NEXT: kshiftrw $15, %k2, %k2 +; SKX-NEXT: kshiftlw $15, %k2, %k2 +; SKX-NEXT: kshiftrw $15, %k2, %k2 +; SKX-NEXT: kmovd %k2, %eax +; SKX-NEXT: testb $1, %al +; SKX-NEXT: fld1 +; SKX-NEXT: fldz +; SKX-NEXT: fld %st(0) +; SKX-NEXT: fcmovne %st(2), %st(0) +; SKX-NEXT: kshiftlw $14, %k1, %k1 +; SKX-NEXT: kshiftrw $15, %k1, %k1 +; SKX-NEXT: kshiftlw $15, %k1, %k1 +; SKX-NEXT: kshiftrw $15, %k1, %k1 +; SKX-NEXT: kmovd %k1, %eax +; SKX-NEXT: testb $1, %al +; SKX-NEXT: fld %st(1) +; SKX-NEXT: fcmovne %st(3), %st(0) +; SKX-NEXT: kshiftlw $15, %k0, %k1 +; SKX-NEXT: kshiftrw $15, %k1, %k1 +; SKX-NEXT: kshiftlw $15, %k1, %k1 +; SKX-NEXT: kshiftrw $15, %k1, %k1 +; SKX-NEXT: kmovd %k1, %eax +; SKX-NEXT: testb $1, %al +; SKX-NEXT: fld %st(2) +; SKX-NEXT: fcmovne %st(4), %st(0) +; SKX-NEXT: kshiftlw $14, %k0, %k0 +; SKX-NEXT: kshiftrw $15, %k0, %k0 +; SKX-NEXT: kshiftlw $15, %k0, %k0 +; SKX-NEXT: kshiftrw $15, %k0, %k0 +; SKX-NEXT: kmovd %k0, %eax +; SKX-NEXT: testb $1, %al +; SKX-NEXT: fxch %st(3) +; SKX-NEXT: fcmovne %st(4), %st(0) +; SKX-NEXT: fstp %st(4) +; SKX-NEXT: fxch %st(3) +; SKX-NEXT: fstpt 10(%rdi) +; SKX-NEXT: fxch %st(1) +; SKX-NEXT: fstpt (%rdi) +; SKX-NEXT: fxch %st(1) +; SKX-NEXT: fstpt 30(%rdi) +; SKX-NEXT: fstpt 20(%rdi) +; SKX-NEXT: retq + bb: + %tmp = select <4 x i1> %m, <4 x x86_fp80> <x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000, x86_fp80 0xK3FFF8000000000000000>, <4 x x86_fp80> zeroinitializer + store <4 x x86_fp80> %tmp, <4 x x86_fp80>* %p, align 16 + ret void + } + diff --git a/test/CodeGen/X86/pr34088.ll b/test/CodeGen/X86/pr34088.ll new file mode 100644 index 000000000000..d3667e3884d4 --- /dev/null +++ b/test/CodeGen/X86/pr34088.ll @@ -0,0 +1,46 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown -mcpu=pentium4 | FileCheck %s + +%struct.Foo = type { i32, %struct.Bar } +%struct.Bar = type { i32, %struct.Buffer, i32 } +%struct.Buffer = type { i8*, i32 } + +; This test checks that the load of store %2 is not dropped. +; +define i32 @pr34088() local_unnamed_addr { +; CHECK-LABEL: pr34088: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: pushl %ebp +; CHECK-NEXT: .Lcfi0: +; CHECK-NEXT: .cfi_def_cfa_offset 8 +; CHECK-NEXT: .Lcfi1: +; CHECK-NEXT: .cfi_offset %ebp, -8 +; CHECK-NEXT: movl %esp, %ebp +; CHECK-NEXT: .Lcfi2: +; CHECK-NEXT: .cfi_def_cfa_register %ebp +; CHECK-NEXT: andl $-16, %esp +; CHECK-NEXT: subl $32, %esp +; CHECK-NEXT: xorps %xmm0, %xmm0 +; CHECK-NEXT: movaps {{.*#+}} xmm1 = [205,205,205,205,205,205,205,205,205,205,205,205,205,205,205,205] +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: movaps %xmm0, (%esp) +; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: movaps %xmm1, (%esp) +; CHECK-NEXT: movl $-842150451, {{[0-9]+}}(%esp) # imm = 0xCDCDCDCD +; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%esp) +; CHECK-NEXT: movl %ebp, %esp +; CHECK-NEXT: popl %ebp +; CHECK-NEXT: retl +entry: + %foo = alloca %struct.Foo, align 4 + %0 = bitcast %struct.Foo* %foo to i8* + call void @llvm.memset.p0i8.i32(i8* nonnull %0, i8 0, i32 20, i32 4, i1 false) + %buffer1 = getelementptr inbounds %struct.Foo, %struct.Foo* %foo, i32 0, i32 1, i32 1 + %1 = bitcast %struct.Buffer* %buffer1 to i64* + %2 = load i64, i64* %1, align 4 + call void @llvm.memset.p0i8.i32(i8* nonnull %0, i8 -51, i32 20, i32 4, i1 false) + store i64 %2, i64* %1, align 4 + ret i32 0 +} + +declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i32, i1) diff --git a/test/CodeGen/X86/select-mmx.ll b/test/CodeGen/X86/select-mmx.ll new file mode 100644 index 000000000000..9e6382faaa59 --- /dev/null +++ b/test/CodeGen/X86/select-mmx.ll @@ -0,0 +1,120 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+mmx < %s | FileCheck %s --check-prefix=X64 +; RUN: llc -mtriple=i686-unknown-unknown -mattr=+mmx < %s | FileCheck %s --check-prefix=I32 + + +; From source: clang -02 +;__m64 test47(int a) +;{ +; __m64 x = (a)? (__m64)(7): (__m64)(0); +; return __builtin_ia32_psllw(x, x); +;} + +define i64 @test47(i64 %arg) { +; +; X64-LABEL: test47: +; X64: # BB#0: +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: testq %rdi, %rdi +; X64-NEXT: movl $7, %ecx +; X64-NEXT: cmoveq %rcx, %rax +; X64-NEXT: movd %rax, %mm0 +; X64-NEXT: psllw %mm0, %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: retq +; +; I32-LABEL: test47: +; I32: # BB#0: +; I32-NEXT: pushl %ebp +; I32-NEXT: .Lcfi0: +; I32-NEXT: .cfi_def_cfa_offset 8 +; I32-NEXT: .Lcfi1: +; I32-NEXT: .cfi_offset %ebp, -8 +; I32-NEXT: movl %esp, %ebp +; I32-NEXT: .Lcfi2: +; I32-NEXT: .cfi_def_cfa_register %ebp +; I32-NEXT: andl $-8, %esp +; I32-NEXT: subl $16, %esp +; I32-NEXT: movl 8(%ebp), %eax +; I32-NEXT: orl 12(%ebp), %eax +; I32-NEXT: movl $7, %eax +; I32-NEXT: je .LBB0_2 +; I32-NEXT: # BB#1: +; I32-NEXT: xorl %eax, %eax +; I32-NEXT: .LBB0_2: +; I32-NEXT: movl %eax, {{[0-9]+}}(%esp) +; I32-NEXT: movl $0, {{[0-9]+}}(%esp) +; I32-NEXT: movq {{[0-9]+}}(%esp), %mm0 +; I32-NEXT: psllw %mm0, %mm0 +; I32-NEXT: movq %mm0, (%esp) +; I32-NEXT: movl (%esp), %eax +; I32-NEXT: movl {{[0-9]+}}(%esp), %edx +; I32-NEXT: movl %ebp, %esp +; I32-NEXT: popl %ebp +; I32-NEXT: retl + %cond = icmp eq i64 %arg, 0 + %slct = select i1 %cond, x86_mmx bitcast (i64 7 to x86_mmx), x86_mmx bitcast (i64 0 to x86_mmx) + %psll = tail call x86_mmx @llvm.x86.mmx.psll.w(x86_mmx %slct, x86_mmx %slct) + %retc = bitcast x86_mmx %psll to i64 + ret i64 %retc +} + + +; From source: clang -O2 +;__m64 test49(int a, long long n, long long m) +;{ +; __m64 x = (a)? (__m64)(n): (__m64)(m); +; return __builtin_ia32_psllw(x, x); +;} + +define i64 @test49(i64 %arg, i64 %x, i64 %y) { +; +; X64-LABEL: test49: +; X64: # BB#0: +; X64-NEXT: testq %rdi, %rdi +; X64-NEXT: cmovneq %rdx, %rsi +; X64-NEXT: movd %rsi, %mm0 +; X64-NEXT: psllw %mm0, %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: retq +; +; I32-LABEL: test49: +; I32: # BB#0: +; I32-NEXT: pushl %ebp +; I32-NEXT: .Lcfi3: +; I32-NEXT: .cfi_def_cfa_offset 8 +; I32-NEXT: .Lcfi4: +; I32-NEXT: .cfi_offset %ebp, -8 +; I32-NEXT: movl %esp, %ebp +; I32-NEXT: .Lcfi5: +; I32-NEXT: .cfi_def_cfa_register %ebp +; I32-NEXT: andl $-8, %esp +; I32-NEXT: subl $8, %esp +; I32-NEXT: movl 8(%ebp), %eax +; I32-NEXT: orl 12(%ebp), %eax +; I32-NEXT: je .LBB1_1 +; I32-NEXT: # BB#2: +; I32-NEXT: leal 24(%ebp), %eax +; I32-NEXT: jmp .LBB1_3 +; I32-NEXT: .LBB1_1: +; I32-NEXT: leal 16(%ebp), %eax +; I32-NEXT: .LBB1_3: +; I32-NEXT: movq (%eax), %mm0 +; I32-NEXT: psllw %mm0, %mm0 +; I32-NEXT: movq %mm0, (%esp) +; I32-NEXT: movl (%esp), %eax +; I32-NEXT: movl {{[0-9]+}}(%esp), %edx +; I32-NEXT: movl %ebp, %esp +; I32-NEXT: popl %ebp +; I32-NEXT: retl + %cond = icmp eq i64 %arg, 0 + %xmmx = bitcast i64 %x to x86_mmx + %ymmx = bitcast i64 %y to x86_mmx + %slct = select i1 %cond, x86_mmx %xmmx, x86_mmx %ymmx + %psll = tail call x86_mmx @llvm.x86.mmx.psll.w(x86_mmx %slct, x86_mmx %slct) + %retc = bitcast x86_mmx %psll to i64 + ret i64 %retc +} + +declare x86_mmx @llvm.x86.mmx.psll.w(x86_mmx, x86_mmx) + diff --git a/test/CodeGen/X86/vector-shuffle-128-v16.ll b/test/CodeGen/X86/vector-shuffle-128-v16.ll index abba0ff87ace..9f1ed021992d 100644 --- a/test/CodeGen/X86/vector-shuffle-128-v16.ll +++ b/test/CodeGen/X86/vector-shuffle-128-v16.ll @@ -1643,7 +1643,7 @@ define <16 x i8> @insert_dup_elt1_mem_v16i8_sext_i8(i8* %ptr) { ; AVX512VL: # BB#0: ; AVX512VL-NEXT: movsbl (%rdi), %eax ; AVX512VL-NEXT: shrl $8, %eax -; AVX512VL-NEXT: vpbroadcastb %al, %xmm0 +; AVX512VL-NEXT: vpbroadcastb %eax, %xmm0 ; AVX512VL-NEXT: retq %tmp = load i8, i8* %ptr, align 1 %tmp1 = sext i8 %tmp to i32 @@ -1696,7 +1696,7 @@ define <16 x i8> @insert_dup_elt2_mem_v16i8_sext_i8(i8* %ptr) { ; AVX512VL: # BB#0: ; AVX512VL-NEXT: movsbl (%rdi), %eax ; AVX512VL-NEXT: shrl $16, %eax -; AVX512VL-NEXT: vpbroadcastb %al, %xmm0 +; AVX512VL-NEXT: vpbroadcastb %eax, %xmm0 ; AVX512VL-NEXT: retq %tmp = load i8, i8* %ptr, align 1 %tmp1 = sext i8 %tmp to i32 diff --git a/test/CodeGen/X86/vector-shuffle-128-v8.ll b/test/CodeGen/X86/vector-shuffle-128-v8.ll index c03b9d1472c1..1cf8453fc6ad 100644 --- a/test/CodeGen/X86/vector-shuffle-128-v8.ll +++ b/test/CodeGen/X86/vector-shuffle-128-v8.ll @@ -2274,7 +2274,7 @@ define <8 x i16> @insert_dup_mem_v8i16_sext_i16(i16* %ptr) { ; AVX512VL-LABEL: insert_dup_mem_v8i16_sext_i16: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vpbroadcastw %ax, %xmm0 +; AVX512VL-NEXT: vpbroadcastw %eax, %xmm0 ; AVX512VL-NEXT: retq %tmp = load i16, i16* %ptr, align 2 %tmp1 = sext i16 %tmp to i32 @@ -2390,7 +2390,7 @@ define <8 x i16> @insert_dup_elt1_mem_v8i16_sext_i16(i16* %ptr) { ; AVX512VL: # BB#0: ; AVX512VL-NEXT: movswl (%rdi), %eax ; AVX512VL-NEXT: shrl $16, %eax -; AVX512VL-NEXT: vpbroadcastw %ax, %xmm0 +; AVX512VL-NEXT: vpbroadcastw %eax, %xmm0 ; AVX512VL-NEXT: retq %tmp = load i16, i16* %ptr, align 2 %tmp1 = sext i16 %tmp to i32 @@ -2443,7 +2443,7 @@ define <8 x i16> @insert_dup_elt3_mem_v8i16_sext_i16(i16* %ptr) { ; AVX512VL: # BB#0: ; AVX512VL-NEXT: movswl (%rdi), %eax ; AVX512VL-NEXT: shrl $16, %eax -; AVX512VL-NEXT: vpbroadcastw %ax, %xmm0 +; AVX512VL-NEXT: vpbroadcastw %eax, %xmm0 ; AVX512VL-NEXT: retq %tmp = load i16, i16* %ptr, align 2 %tmp1 = sext i16 %tmp to i32 diff --git a/test/CodeGen/X86/vector-shuffle-256-v16.ll b/test/CodeGen/X86/vector-shuffle-256-v16.ll index 6f5d916f2294..ba7c0894b932 100644 --- a/test/CodeGen/X86/vector-shuffle-256-v16.ll +++ b/test/CodeGen/X86/vector-shuffle-256-v16.ll @@ -4069,7 +4069,7 @@ define <16 x i16> @insert_dup_mem_v16i16_sext_i16(i16* %ptr) { ; AVX512VL-LABEL: insert_dup_mem_v16i16_sext_i16: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: movswl (%rdi), %eax -; AVX512VL-NEXT: vpbroadcastw %ax, %ymm0 +; AVX512VL-NEXT: vpbroadcastw %eax, %ymm0 ; AVX512VL-NEXT: retq %tmp = load i16, i16* %ptr, align 2 %tmp1 = sext i16 %tmp to i32 diff --git a/test/CodeGen/X86/vector-shuffle-256-v32.ll b/test/CodeGen/X86/vector-shuffle-256-v32.ll index 05a797cb6f8e..d51b69415b93 100644 --- a/test/CodeGen/X86/vector-shuffle-256-v32.ll +++ b/test/CodeGen/X86/vector-shuffle-256-v32.ll @@ -2431,7 +2431,7 @@ define <32 x i8> @insert_dup_elt1_mem_v32i8_sext_i8(i8* %ptr) { ; AVX512VL: # BB#0: ; AVX512VL-NEXT: movsbl (%rdi), %eax ; AVX512VL-NEXT: shrl $8, %eax -; AVX512VL-NEXT: vpbroadcastb %al, %ymm0 +; AVX512VL-NEXT: vpbroadcastb %eax, %ymm0 ; AVX512VL-NEXT: retq %tmp = load i8, i8* %ptr, align 1 %tmp1 = sext i8 %tmp to i32 diff --git a/test/CodeGen/X86/vector-shuffle-512-v32.ll b/test/CodeGen/X86/vector-shuffle-512-v32.ll index 7a5c992bb829..b8fc27ba5515 100644 --- a/test/CodeGen/X86/vector-shuffle-512-v32.ll +++ b/test/CodeGen/X86/vector-shuffle-512-v32.ll @@ -228,7 +228,7 @@ define <32 x i16> @insert_dup_mem_v32i16_i32(i32* %ptr) { ; SKX-LABEL: insert_dup_mem_v32i16_i32: ; SKX: ## BB#0: ; SKX-NEXT: movl (%rdi), %eax -; SKX-NEXT: vpbroadcastw %ax, %zmm0 +; SKX-NEXT: vpbroadcastw %eax, %zmm0 ; SKX-NEXT: retq %tmp = load i32, i32* %ptr, align 4 %tmp1 = insertelement <4 x i32> zeroinitializer, i32 %tmp, i32 0 @@ -249,7 +249,7 @@ define <32 x i16> @insert_dup_mem_v32i16_sext_i16(i16* %ptr) { ; SKX-LABEL: insert_dup_mem_v32i16_sext_i16: ; SKX: ## BB#0: ; SKX-NEXT: movswl (%rdi), %eax -; SKX-NEXT: vpbroadcastw %ax, %zmm0 +; SKX-NEXT: vpbroadcastw %eax, %zmm0 ; SKX-NEXT: retq %tmp = load i16, i16* %ptr, align 2 %tmp1 = sext i16 %tmp to i32 @@ -269,7 +269,7 @@ define <32 x i16> @insert_dup_elt1_mem_v32i16_i32(i32* %ptr) #0 { ; SKX-LABEL: insert_dup_elt1_mem_v32i16_i32: ; SKX: ## BB#0: ; SKX-NEXT: movzwl 2(%rdi), %eax -; SKX-NEXT: vpbroadcastw %ax, %zmm0 +; SKX-NEXT: vpbroadcastw %eax, %zmm0 ; SKX-NEXT: retq %tmp = load i32, i32* %ptr, align 4 %tmp1 = insertelement <4 x i32> zeroinitializer, i32 %tmp, i32 0 @@ -288,7 +288,7 @@ define <32 x i16> @insert_dup_elt3_mem_v32i16_i32(i32* %ptr) #0 { ; SKX-LABEL: insert_dup_elt3_mem_v32i16_i32: ; SKX: ## BB#0: ; SKX-NEXT: movzwl 2(%rdi), %eax -; SKX-NEXT: vpbroadcastw %ax, %zmm0 +; SKX-NEXT: vpbroadcastw %eax, %zmm0 ; SKX-NEXT: retq %tmp = load i32, i32* %ptr, align 4 %tmp1 = insertelement <4 x i32> zeroinitializer, i32 %tmp, i32 1 diff --git a/test/CodeGen/X86/vector-shuffle-512-v64.ll b/test/CodeGen/X86/vector-shuffle-512-v64.ll index f4650ec741a7..9dca3191e06b 100644 --- a/test/CodeGen/X86/vector-shuffle-512-v64.ll +++ b/test/CodeGen/X86/vector-shuffle-512-v64.ll @@ -332,7 +332,7 @@ define <64 x i8> @insert_dup_elt1_mem_v64i8_sext_i8(i8* %ptr) { ; AVX512BW: # BB#0: ; AVX512BW-NEXT: movsbl (%rdi), %eax ; AVX512BW-NEXT: shrl $8, %eax -; AVX512BW-NEXT: vpbroadcastb %al, %zmm0 +; AVX512BW-NEXT: vpbroadcastb %eax, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512DQ-LABEL: insert_dup_elt1_mem_v64i8_sext_i8: @@ -348,7 +348,7 @@ define <64 x i8> @insert_dup_elt1_mem_v64i8_sext_i8(i8* %ptr) { ; AVX512VBMI: # BB#0: ; AVX512VBMI-NEXT: movsbl (%rdi), %eax ; AVX512VBMI-NEXT: shrl $8, %eax -; AVX512VBMI-NEXT: vpbroadcastb %al, %zmm0 +; AVX512VBMI-NEXT: vpbroadcastb %eax, %zmm0 ; AVX512VBMI-NEXT: retq %tmp = load i8, i8* %ptr, align 1 %tmp1 = sext i8 %tmp to i32 diff --git a/test/Instrumentation/DataFlowSanitizer/Inputs/shadow-args-abilist.txt b/test/Instrumentation/DataFlowSanitizer/Inputs/shadow-args-abilist.txt new file mode 100644 index 000000000000..723cbc9086da --- /dev/null +++ b/test/Instrumentation/DataFlowSanitizer/Inputs/shadow-args-abilist.txt @@ -0,0 +1,8 @@ +fun:dfsan_get_label=uninstrumented +fun:dfsan_get_label=custom + +fun:k2=uninstrumented +fun:k2=custom + +fun:k4=uninstrumented +fun:k4=custom diff --git a/test/Instrumentation/DataFlowSanitizer/abilist.ll b/test/Instrumentation/DataFlowSanitizer/abilist.ll index 8b30875a03fa..e33237ffe19d 100644 --- a/test/Instrumentation/DataFlowSanitizer/abilist.ll +++ b/test/Instrumentation/DataFlowSanitizer/abilist.ll @@ -47,13 +47,13 @@ define void @f(i32 %x) { ; CHECK: %[[LABELVA1:.*]] = alloca [2 x i16] ; CHECK: %[[LABELRETURN:.*]] = alloca i16 - ; CHECK: call void @__dfsw_custom1(i32 1, i32 2, i16 0, i16 0) + ; CHECK: call void @__dfsw_custom1(i32 1, i32 2, i16 zeroext 0, i16 zeroext 0) call void @custom1(i32 1, i32 2) - ; CHECK: call i32 @__dfsw_custom2(i32 1, i32 2, i16 0, i16 0, i16* %[[LABELRETURN]]) + ; CHECK: call i32 @__dfsw_custom2(i32 1, i32 2, i16 zeroext 0, i16 zeroext 0, i16* %[[LABELRETURN]]) call i32 @custom2(i32 1, i32 2) - ; CHECK: call void @__dfsw_customcb({{.*}} @"dfst0$customcb", i8* bitcast ({{.*}} @"dfs$cb" to i8*), i16 0) + ; CHECK: call void @__dfsw_customcb({{.*}} @"dfst0$customcb", i8* bitcast ({{.*}} @"dfs$cb" to i8*), i16 zeroext 0) call void @customcb(i32 (i32)* @cb) ; CHECK: %[[LABELVA1_0:.*]] = getelementptr inbounds [2 x i16], [2 x i16]* %[[LABELVA1]], i32 0, i32 0 @@ -61,12 +61,12 @@ define void @f(i32 %x) { ; CHECK: %[[LABELVA1_1:.*]] = getelementptr inbounds [2 x i16], [2 x i16]* %[[LABELVA1]], i32 0, i32 1 ; CHECK: store i16 %{{.*}}, i16* %[[LABELVA1_1]] ; CHECK: %[[LABELVA1_0A:.*]] = getelementptr inbounds [2 x i16], [2 x i16]* %[[LABELVA1]], i32 0, i32 0 - ; CHECK: call void (i32, i16, i16*, ...) @__dfsw_custom3(i32 1, i16 0, i16* %[[LABELVA1_0A]], i32 2, i32 %{{.*}}) + ; CHECK: call void (i32, i16, i16*, ...) @__dfsw_custom3(i32 1, i16 zeroext 0, i16* %[[LABELVA1_0A]], i32 2, i32 %{{.*}}) call void (i32, ...) @custom3(i32 1, i32 2, i32 %x) ; CHECK: %[[LABELVA2_0:.*]] = getelementptr inbounds [2 x i16], [2 x i16]* %[[LABELVA2]], i32 0, i32 0 ; CHECK: %[[LABELVA2_0A:.*]] = getelementptr inbounds [2 x i16], [2 x i16]* %[[LABELVA2]], i32 0, i32 0 - ; CHECK: call i32 (i32, i16, i16*, i16*, ...) @__dfsw_custom4(i32 1, i16 0, i16* %[[LABELVA2_0A]], i16* %[[LABELRETURN]], i32 2, i32 3) + ; CHECK: call i32 (i32, i16, i16*, i16*, ...) @__dfsw_custom4(i32 1, i16 zeroext 0, i16* %[[LABELVA2_0A]], i16* %[[LABELRETURN]], i32 2, i32 3) call i32 (i32, ...) @custom4(i32 1, i32 2, i32 3) ret void diff --git a/test/Instrumentation/DataFlowSanitizer/shadow-args-zext.ll b/test/Instrumentation/DataFlowSanitizer/shadow-args-zext.ll new file mode 100644 index 000000000000..0ffbf1970e7f --- /dev/null +++ b/test/Instrumentation/DataFlowSanitizer/shadow-args-zext.ll @@ -0,0 +1,54 @@ +; RUN: opt -mtriple=x86_64-unknown-linux-gnu < %s -dfsan -S --dfsan-abilist=%S/Inputs/shadow-args-abilist.txt | FileCheck %s + +; REQUIRES: x86-registered-target + +; Test that the custom abi marks shadow parameters as zero extended. + +define i32 @m() { +entry: + %call = call zeroext i16 @dfsan_get_label(i64 signext 56) + %conv = zext i16 %call to i32 + ret i32 %conv +} + +; CHECK-LABEL: @"dfs$m" +; CHECK: %{{.*}} = call zeroext i16 @__dfsw_dfsan_get_label(i64 signext 56, i16 zeroext 0, i16* %{{.*}}) + +define i32 @k() { +entry: + %call = call zeroext i16 @k2(i64 signext 56, i64 signext 67) + %conv = zext i16 %call to i32 + ret i32 %conv +} + +; CHECK-LABEL: @"dfs$k" +; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k2(i64 signext 56, i64 signext 67, i16 zeroext {{.*}}, i16 zeroext {{.*}}, i16* %{{.*}}) + +define i32 @k3() { +entry: + %call = call zeroext i16 @k4(i64 signext 56, i64 signext 67, i64 signext 78, i64 signext 89) + %conv = zext i16 %call to i32 + ret i32 %conv +} + +; CHECK-LABEL: @"dfs$k3" +; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k4(i64 signext 56, i64 signext 67, i64 signext 78, i64 signext 89, i16 zeroext {{.*}}, i16 zeroext {{.*}}, i16 zeroext {{.*}}, i16 zeroext {{.*}}, i16* %{{.*}}) + +declare zeroext i16 @dfsan_get_label(i64 signext) + +; CHECK-LABEL: @"dfsw$dfsan_get_label" +; CHECK: %{{.*}} = call i16 @__dfsw_dfsan_get_label(i64 %0, i16 zeroext %1, i16* %{{.*}}) + +declare zeroext i16 @k2(i64 signext, i64 signext) +; CHECK-LABEL: @"dfsw$k2" +; CHECK: %{{.*}} = call i16 @__dfsw_k2(i64 %{{.*}}, i64 %{{.*}}, i16 zeroext %{{.*}}, i16 zeroext %{{.*}}, i16* %{{.*}}) + +declare zeroext i16 @k4(i64 signext, i64 signext, i64 signext, i64 signext) + +; CHECK-LABEL: @"dfsw$k4" +; CHECK: %{{.*}} = call i16 @__dfsw_k4(i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i16 zeroext %{{.*}}, i16 zeroext %{{.*}}, i16 zeroext %{{.*}}, i16 zeroext %{{.*}}, i16* %{{.*}}) + + +; CHECK: declare zeroext i16 @__dfsw_dfsan_get_label(i64 signext, i16, i16*) +; CHECK: declare zeroext i16 @__dfsw_k2(i64 signext, i64 signext, i16, i16, i16*) +; CHECK: declare zeroext i16 @__dfsw_k4(i64 signext, i64 signext, i64 signext, i64 signext, i16, i16, i16, i16, i16*) diff --git a/test/Transforms/BDCE/invalidate-assumptions.ll b/test/Transforms/BDCE/invalidate-assumptions.ll new file mode 100644 index 000000000000..d165d74be86d --- /dev/null +++ b/test/Transforms/BDCE/invalidate-assumptions.ll @@ -0,0 +1,100 @@ +; RUN: opt -bdce %s -S | FileCheck %s + +; The 'nuw' on the subtract allows us to deduce that %setbit is not demanded. +; But if we change that value to '0', then the 'nuw' is no longer valid. If we don't +; remove the 'nuw', another pass (-instcombine) may make a transform based on an +; that incorrect assumption and we can miscompile: +; https://bugs.llvm.org/show_bug.cgi?id=33695 + +define i1 @PR33695(i1 %b, i8 %x) { +; CHECK-LABEL: @PR33695( +; CHECK-NEXT: [[SETBIT:%.*]] = or i8 %x, 64 +; CHECK-NEXT: [[LITTLE_NUMBER:%.*]] = zext i1 %b to i8 +; CHECK-NEXT: [[BIG_NUMBER:%.*]] = shl i8 0, 1 +; CHECK-NEXT: [[SUB:%.*]] = sub i8 [[BIG_NUMBER]], [[LITTLE_NUMBER]] +; CHECK-NEXT: [[TRUNC:%.*]] = trunc i8 [[SUB]] to i1 +; CHECK-NEXT: ret i1 [[TRUNC]] +; + %setbit = or i8 %x, 64 + %little_number = zext i1 %b to i8 + %big_number = shl i8 %setbit, 1 + %sub = sub nuw i8 %big_number, %little_number + %trunc = trunc i8 %sub to i1 + ret i1 %trunc +} + +; Similar to above, but now with more no-wrap. +; https://bugs.llvm.org/show_bug.cgi?id=34037 + +define i64 @PR34037(i64 %m, i32 %r, i64 %j, i1 %b, i32 %k, i64 %p) { +; CHECK-LABEL: @PR34037( +; CHECK-NEXT: [[CONV:%.*]] = zext i32 %r to i64 +; CHECK-NEXT: [[AND:%.*]] = and i64 %m, 0 +; CHECK-NEXT: [[NEG:%.*]] = xor i64 0, 34359738367 +; CHECK-NEXT: [[OR:%.*]] = or i64 %j, 0 +; CHECK-NEXT: [[SHL:%.*]] = shl i64 0, 29 +; CHECK-NEXT: [[CONV1:%.*]] = select i1 %b, i64 7, i64 0 +; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[SHL]], [[CONV1]] +; CHECK-NEXT: [[CONV2:%.*]] = zext i32 %k to i64 +; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[SUB]], [[CONV2]] +; CHECK-NEXT: [[CONV4:%.*]] = and i64 %p, 65535 +; CHECK-NEXT: [[AND5:%.*]] = and i64 [[MUL]], [[CONV4]] +; CHECK-NEXT: ret i64 [[AND5]] +; + %conv = zext i32 %r to i64 + %and = and i64 %m, %conv + %neg = xor i64 %and, 34359738367 + %or = or i64 %j, %neg + %shl = shl i64 %or, 29 + %conv1 = select i1 %b, i64 7, i64 0 + %sub = sub nuw nsw i64 %shl, %conv1 + %conv2 = zext i32 %k to i64 + %mul = mul nsw i64 %sub, %conv2 + %conv4 = and i64 %p, 65535 + %and5 = and i64 %mul, %conv4 + ret i64 %and5 +} + +; This is a manufactured example based on the 1st test to prove that the +; assumption-killing algorithm stops at the call. Ie, it does not remove +; nsw/nuw from the 'add' because a call demands all bits of its argument. + +declare i1 @foo(i1) + +define i1 @poison_on_call_user_is_ok(i1 %b, i8 %x) { +; CHECK-LABEL: @poison_on_call_user_is_ok( +; CHECK-NEXT: [[SETBIT:%.*]] = or i8 %x, 64 +; CHECK-NEXT: [[LITTLE_NUMBER:%.*]] = zext i1 %b to i8 +; CHECK-NEXT: [[BIG_NUMBER:%.*]] = shl i8 0, 1 +; CHECK-NEXT: [[SUB:%.*]] = sub i8 [[BIG_NUMBER]], [[LITTLE_NUMBER]] +; CHECK-NEXT: [[TRUNC:%.*]] = trunc i8 [[SUB]] to i1 +; CHECK-NEXT: [[CALL_RESULT:%.*]] = call i1 @foo(i1 [[TRUNC]]) +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i1 [[CALL_RESULT]], true +; CHECK-NEXT: [[MUL:%.*]] = mul i1 [[TRUNC]], [[ADD]] +; CHECK-NEXT: ret i1 [[MUL]] +; + %setbit = or i8 %x, 64 + %little_number = zext i1 %b to i8 + %big_number = shl i8 %setbit, 1 + %sub = sub nuw i8 %big_number, %little_number + %trunc = trunc i8 %sub to i1 + %call_result = call i1 @foo(i1 %trunc) + %add = add nsw nuw i1 %call_result, 1 + %mul = mul i1 %trunc, %add + ret i1 %mul +} + + +; We were asserting that all users of a trivialized integer-type instruction were +; also integer-typed, but that's too strong. The alloca has a pointer-type result. + +define void @PR34179(i32* %a) { +; CHECK-LABEL: @PR34179( +; CHECK-NEXT: [[T0:%.*]] = load volatile i32, i32* %a +; CHECK-NEXT: ret void +; + %t0 = load volatile i32, i32* %a + %vla = alloca i32, i32 %t0 + ret void +} + diff --git a/test/Transforms/IndVarSimplify/exit_value_test2.ll b/test/Transforms/IndVarSimplify/exit_value_test2.ll index ee641667506c..7b6e91a742b2 100644 --- a/test/Transforms/IndVarSimplify/exit_value_test2.ll +++ b/test/Transforms/IndVarSimplify/exit_value_test2.ll @@ -3,15 +3,14 @@ ; Check IndVarSimplify should not replace exit value because or else ; udiv will be introduced by expand and the cost will be high. -; -; CHECK-LABEL: @_Z3fooPKcjj( -; CHECK-NOT: udiv declare void @_Z3mixRjj(i32* dereferenceable(4), i32) declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) define i32 @_Z3fooPKcjj(i8* nocapture readonly %s, i32 %len, i32 %c) { +; CHECK-LABEL: @_Z3fooPKcjj( +; CHECK-NOT: udiv entry: %a = alloca i32, align 4 %tmp = bitcast i32* %a to i8* @@ -50,3 +49,26 @@ while.end: ; preds = %while.cond.while.en call void @llvm.lifetime.end.p0i8(i64 4, i8* %tmp) ret i32 %tmp4 } + +define i32 @zero_backedge_count_test(i32 %unknown_init, i32* %unknown_mem) { +; CHECK-LABEL: @zero_backedge_count_test( +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry], [ %iv.inc, %loop ] + %unknown_phi = phi i32 [ %unknown_init, %entry ], [ %unknown_next, %loop ] + %iv.inc = add i32 %iv, 1 + %be_taken = icmp ne i32 %iv.inc, 1 + %unknown_next = load volatile i32, i32* %unknown_mem + br i1 %be_taken, label %loop, label %leave + +leave: +; We can fold %unknown_phi even though the backedge value for it is completely +; unknown, since we can prove that the loop's backedge taken count is 0. + +; CHECK: leave: +; CHECK: ret i32 %unknown_init + %exit_val = phi i32 [ %unknown_phi, %loop ] + ret i32 %exit_val +} diff --git a/test/Transforms/SimplifyCFG/pr34131.ll b/test/Transforms/SimplifyCFG/pr34131.ll new file mode 100644 index 000000000000..b64b6876e04e --- /dev/null +++ b/test/Transforms/SimplifyCFG/pr34131.ll @@ -0,0 +1,74 @@ +; RUN: opt -simplifycfg -S < %s | FileCheck %s + +; Just checking for lack of crash here, but we should be able to check the IR? +; Earlier version using auto-generated checks from utils/update_test_checks.py +; had bot problems though... + +define void @patatino() { + +; CHECK-LABEL: @patatino + + br label %bb1 +bb1: ; preds = %bb36, %0 + br label %bb2 +bb2: ; preds = %bb3, %bb1 + br i1 undef, label %bb4, label %bb3 +bb3: ; preds = %bb4, %bb2 + br i1 undef, label %bb2, label %bb5 +bb4: ; preds = %bb2 + switch i32 undef, label %bb3 [ + ] +bb5: ; preds = %bb3 + br label %bb6 +bb6: ; preds = %bb5 + br i1 undef, label %bb7, label %bb9 +bb7: ; preds = %bb6 + %tmp = or i64 undef, 1 + %tmp8 = icmp ult i64 %tmp, 0 + br i1 %tmp8, label %bb12, label %bb9 +bb9: ; preds = %bb35, %bb34, %bb33, %bb32, %bb31, %bb30, %bb27, %bb24, %bb21, %bb18, %bb16, %bb14, %bb12, %bb7, %bb6 + br label %bb11 +bb10: ; preds = %bb36 + br label %bb11 +bb11: ; preds = %bb10, %bb9 + ret void +bb12: ; preds = %bb7 + %tmp13 = icmp ult i64 0, 0 + br i1 %tmp13, label %bb14, label %bb9 +bb14: ; preds = %bb12 + %tmp15 = icmp ult i64 undef, 0 + br i1 %tmp15, label %bb16, label %bb9 +bb16: ; preds = %bb14 + %tmp17 = icmp ult i64 undef, 0 + br i1 %tmp17, label %bb18, label %bb9 +bb18: ; preds = %bb16 + %tmp19 = or i64 undef, 5 + %tmp20 = icmp ult i64 %tmp19, 0 + br i1 %tmp20, label %bb21, label %bb9 +bb21: ; preds = %bb18 + %tmp22 = or i64 undef, 6 + %tmp23 = icmp ult i64 %tmp22, 0 + br i1 %tmp23, label %bb24, label %bb9 +bb24: ; preds = %bb21 + %tmp25 = or i64 undef, 7 + %tmp26 = icmp ult i64 %tmp25, 0 + br i1 %tmp26, label %bb27, label %bb9 +bb27: ; preds = %bb24 + %tmp28 = or i64 undef, 8 + %tmp29 = icmp ult i64 %tmp28, 0 + br i1 %tmp29, label %bb30, label %bb9 +bb30: ; preds = %bb27 + br i1 undef, label %bb31, label %bb9 +bb31: ; preds = %bb30 + br i1 undef, label %bb32, label %bb9 +bb32: ; preds = %bb31 + br i1 undef, label %bb33, label %bb9 +bb33: ; preds = %bb32 + br i1 undef, label %bb34, label %bb9 +bb34: ; preds = %bb33 + br i1 undef, label %bb35, label %bb9 +bb35: ; preds = %bb34 + br i1 undef, label %bb36, label %bb9 +bb36: ; preds = %bb35 + br i1 undef, label %bb1, label %bb10 +} |