summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll19
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll65
-rw-r--r--test/CodeGen/AArch64/GlobalISel/call-translator.ll2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-combines.mir8
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll7
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir85
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-undef.mir15
-rw-r--r--test/CodeGen/AArch64/GlobalISel/select-trunc.mir4
-rw-r--r--test/CodeGen/AArch64/arm64-ccmp.ll6
-rw-r--r--test/CodeGen/AArch64/arm64-spill-remarks.ll27
-rw-r--r--test/CodeGen/AArch64/ccmp-successor-probs.mir46
-rw-r--r--test/CodeGen/AArch64/cond-br-tuning.ll8
-rw-r--r--test/CodeGen/AMDGPU/alignbit-pat.ll100
-rw-r--r--test/CodeGen/AMDGPU/bug-vopc-commute.ll6
-rw-r--r--test/CodeGen/AMDGPU/cgp-bitfield-extract.ll9
-rw-r--r--test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll10
-rw-r--r--test/CodeGen/AMDGPU/combine-and-sext-bool.ll27
-rw-r--r--test/CodeGen/AMDGPU/combine-cond-add-sub.ll20
-rw-r--r--test/CodeGen/AMDGPU/fold-fmul-to-neg-abs.ll37
-rw-r--r--test/CodeGen/AMDGPU/llvm.SI.load.dword.ll22
-rw-r--r--test/CodeGen/AMDGPU/llvm.SI.tbuffer.store.ll20
-rw-r--r--test/CodeGen/AMDGPU/misched-killflags.mir45
-rw-r--r--test/CodeGen/AMDGPU/mubuf.ll22
-rw-r--r--test/CodeGen/AMDGPU/rename-independent-subregs-invalid-mac-operands.mir69
-rw-r--r--test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir155
-rw-r--r--test/CodeGen/AMDGPU/ret_jump.ll7
-rw-r--r--test/CodeGen/AMDGPU/scheduler-subrange-crash.ll12
-rw-r--r--test/CodeGen/AMDGPU/sdwa-peephole-instr.mir446
-rw-r--r--test/CodeGen/AMDGPU/select-vectors.ll2
-rw-r--r--test/CodeGen/AMDGPU/setcc-sext.ll292
-rw-r--r--test/CodeGen/AMDGPU/sgpr-copy.ll94
-rw-r--r--test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll22
-rw-r--r--test/CodeGen/AMDGPU/shift-i64-opts.ll74
-rw-r--r--test/CodeGen/AMDGPU/si-lod-bias.ll17
-rw-r--r--test/CodeGen/AMDGPU/si-sgpr-spill.ll398
-rw-r--r--test/CodeGen/AMDGPU/si-spill-cf.ll136
-rw-r--r--test/CodeGen/AMDGPU/smrd.ll48
-rw-r--r--test/CodeGen/AMDGPU/spill-to-smem-m0.ll22
-rw-r--r--test/CodeGen/AMDGPU/split-smrd.ll4
-rw-r--r--test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll20
-rw-r--r--test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll2
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir73
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll10
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-isel.ll20
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalizer.mir55
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir31
-rw-r--r--test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll2
-rw-r--r--test/CodeGen/ARM/Windows/no-arm-mode.ll10
-rw-r--r--test/CodeGen/ARM/Windows/tls.ll14
-rw-r--r--test/CodeGen/ARM/alloca.ll4
-rw-r--r--test/CodeGen/ARM/arg-copy-elide.ll4
-rw-r--r--test/CodeGen/ARM/arm-abi-attr.ll2
-rw-r--r--test/CodeGen/ARM/arm-and-tst-peephole.ll2
-rw-r--r--test/CodeGen/ARM/arm-position-independence-jump-table.ll2
-rw-r--r--test/CodeGen/ARM/arm-shrink-wrapping-linux.ll10
-rw-r--r--test/CodeGen/ARM/atomic-cmpxchg.ll4
-rw-r--r--test/CodeGen/ARM/bool-ext-inc.ll28
-rw-r--r--test/CodeGen/ARM/cmpxchg-O0-be.ll26
-rw-r--r--test/CodeGen/ARM/cmpxchg-weak.ll4
-rw-r--r--test/CodeGen/ARM/code-placement.ll5
-rw-r--r--test/CodeGen/ARM/constantfp.ll12
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-basic.ll6
-rw-r--r--test/CodeGen/ARM/cortexr52-misched-basic.ll4
-rw-r--r--test/CodeGen/ARM/ctor_order.ll2
-rw-r--r--test/CodeGen/ARM/ctors_dtors.ll2
-rw-r--r--test/CodeGen/ARM/cttz.ll4
-rw-r--r--test/CodeGen/ARM/cttz_vector.ll64
-rw-r--r--test/CodeGen/ARM/cxx-tlscc.ll2
-rw-r--r--test/CodeGen/ARM/execute-only-big-stack-frame.ll6
-rw-r--r--test/CodeGen/ARM/execute-only-section.ll6
-rw-r--r--test/CodeGen/ARM/execute-only.ll6
-rw-r--r--test/CodeGen/ARM/fp16-promote.ll29
-rw-r--r--test/CodeGen/ARM/fp16-v3.ll4
-rw-r--r--test/CodeGen/ARM/ifcvt7.ll2
-rw-r--r--test/CodeGen/ARM/illegal-bitfield-loadstore.ll6
-rw-r--r--test/CodeGen/ARM/indirectbr.ll4
-rw-r--r--test/CodeGen/ARM/jump-table-islands.ll2
-rw-r--r--test/CodeGen/ARM/jump-table-tbh.ll6
-rw-r--r--test/CodeGen/ARM/ldm-stm-i256.ll20
-rw-r--r--test/CodeGen/ARM/legalize-unaligned-load.ll2
-rw-r--r--test/CodeGen/ARM/long-setcc.ll2
-rw-r--r--test/CodeGen/ARM/long_shift.ll16
-rw-r--r--test/CodeGen/ARM/misched-fusion-aes.ll6
-rw-r--r--test/CodeGen/ARM/select_const.ll8
-rw-r--r--test/CodeGen/ARM/shift-i64.ll2
-rw-r--r--test/CodeGen/ARM/ssp-data-layout.ll2
-rw-r--r--test/CodeGen/ARM/str_pre-2.ll2
-rw-r--r--test/CodeGen/ARM/swifterror.ll52
-rw-r--r--test/CodeGen/ARM/thumb2-it-block.ll4
-rw-r--r--test/CodeGen/ARM/vcgt.ll4
-rw-r--r--test/CodeGen/ARM/vector-DAGCombine.ll10
-rw-r--r--test/CodeGen/ARM/vext.ll58
-rw-r--r--test/CodeGen/ARM/vfp.ll4
-rw-r--r--test/CodeGen/ARM/vld1.ll2
-rw-r--r--test/CodeGen/ARM/vld2.ll16
-rw-r--r--test/CodeGen/ARM/vld3.ll16
-rw-r--r--test/CodeGen/ARM/vld4.ll24
-rw-r--r--test/CodeGen/ARM/vlddup.ll54
-rw-r--r--test/CodeGen/ARM/vldlane.ll2
-rw-r--r--test/CodeGen/ARM/vpadd.ll22
-rw-r--r--test/CodeGen/ARM/vst1.ll2
-rw-r--r--test/CodeGen/ARM/vst4.ll8
-rw-r--r--test/CodeGen/ARM/vstlane.ll6
-rw-r--r--test/CodeGen/ARM/vuzp.ll269
-rw-r--r--test/CodeGen/BPF/remove_truncate_1.ll87
-rw-r--r--test/CodeGen/BPF/remove_truncate_2.ll65
-rw-r--r--test/CodeGen/Hexagon/addrmode-keepdeadphis.mir30
-rw-r--r--test/CodeGen/Hexagon/expand-condsets-undefvni.ll49
-rw-r--r--test/CodeGen/Hexagon/expand-vselect-kill.ll53
-rw-r--r--test/CodeGen/Hexagon/fpelim-basic.ll91
-rw-r--r--test/CodeGen/Hexagon/frame.ll23
-rw-r--r--test/CodeGen/Hexagon/jt-in-text.ll57
-rw-r--r--test/CodeGen/Hexagon/newvaluejump-kill2.mir18
-rw-r--r--test/CodeGen/Hexagon/newvaluejump2.ll2
-rw-r--r--test/CodeGen/Hexagon/regalloc-liveout-undef.mir35
-rw-r--r--test/CodeGen/MIR/Generic/multiRunPass.mir3
-rw-r--r--test/CodeGen/Mips/2008-06-05-Carry.ll13
-rw-r--r--test/CodeGen/Mips/dsp-patterns.ll4
-rw-r--r--test/CodeGen/Mips/llcarry.ll11
-rw-r--r--test/CodeGen/Mips/llvm-ir/add.ll380
-rw-r--r--test/CodeGen/Mips/llvm-ir/sub.ll170
-rw-r--r--test/CodeGen/Mips/madd-msub.ll81
-rw-r--r--test/CodeGen/NVPTX/lower-aggr-copies.ll4
-rw-r--r--test/CodeGen/PowerPC/anon_aggr.ll64
-rw-r--r--test/CodeGen/PowerPC/floatPSA.ll2
-rw-r--r--test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll32
-rw-r--r--test/CodeGen/PowerPC/memcmp.ll131
-rw-r--r--test/CodeGen/PowerPC/memcmpIR.ll90
-rw-r--r--test/CodeGen/PowerPC/merge_stores_dereferenceable.ll24
-rw-r--r--test/CodeGen/PowerPC/ppc64-align-long-double.ll24
-rw-r--r--test/CodeGen/PowerPC/tls.ll2
-rw-r--r--test/CodeGen/PowerPC/tls_get_addr_fence1.mir66
-rw-r--r--test/CodeGen/PowerPC/tls_get_addr_fence2.mir65
-rw-r--r--test/CodeGen/Thumb/long-setcc.ll2
-rw-r--r--test/CodeGen/Thumb2/constant-islands-new-island.ll6
-rw-r--r--test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll154
-rw-r--r--test/CodeGen/Thumb2/thumb2-ifcvt2.ll3
-rw-r--r--test/CodeGen/WebAssembly/exception.ll22
-rw-r--r--test/CodeGen/X86/GlobalISel/and-scalar.ll43
-rw-r--r--test/CodeGen/X86/GlobalISel/fadd-scalar.ll20
-rw-r--r--test/CodeGen/X86/GlobalISel/fdiv-scalar.ll20
-rw-r--r--test/CodeGen/X86/GlobalISel/fmul-scalar.ll20
-rw-r--r--test/CodeGen/X86/GlobalISel/fsub-scalar.ll20
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir124
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir74
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir74
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir74
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir74
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir124
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir124
-rw-r--r--test/CodeGen/X86/GlobalISel/or-scalar.ll43
-rw-r--r--test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir235
-rw-r--r--test/CodeGen/X86/GlobalISel/select-add.mir80
-rw-r--r--test/CodeGen/X86/GlobalISel/select-and-scalar.mir160
-rw-r--r--test/CodeGen/X86/GlobalISel/select-constant.mir21
-rw-r--r--test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir119
-rw-r--r--test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir119
-rw-r--r--test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir119
-rw-r--r--test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir119
-rw-r--r--test/CodeGen/X86/GlobalISel/select-merge-vec256.mir52
-rw-r--r--test/CodeGen/X86/GlobalISel/select-merge-vec512.mir74
-rw-r--r--test/CodeGen/X86/GlobalISel/select-or-scalar.mir160
-rw-r--r--test/CodeGen/X86/GlobalISel/select-sub.mir77
-rw-r--r--test/CodeGen/X86/GlobalISel/select-xor-scalar.mir160
-rw-r--r--test/CodeGen/X86/GlobalISel/xor-scalar.ll43
-rw-r--r--test/CodeGen/X86/atom-call-reg-indirect.ll2
-rw-r--r--test/CodeGen/X86/atom-fixup-lea2.ll2
-rw-r--r--test/CodeGen/X86/atom-sched.ll1
-rw-r--r--test/CodeGen/X86/avx2-arith.ll106
-rw-r--r--test/CodeGen/X86/avx2-cmp.ll36
-rwxr-xr-xtest/CodeGen/X86/avx2-conversions.ll74
-rw-r--r--test/CodeGen/X86/avx2-fma-fneg-combine.ll32
-rw-r--r--test/CodeGen/X86/avx2-gather.ll28
-rw-r--r--test/CodeGen/X86/avx2-logic.ll34
-rw-r--r--test/CodeGen/X86/avx2-phaddsub.ll36
-rw-r--r--test/CodeGen/X86/avx2-shift.ll170
-rw-r--r--test/CodeGen/X86/avx2-vector-shifts.ll168
-rwxr-xr-xtest/CodeGen/X86/avx2-vperm.ll20
-rw-r--r--test/CodeGen/X86/avx512-arith.ll258
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll404
-rw-r--r--test/CodeGen/X86/avx512vl-vec-masked-cmp.ll13485
-rw-r--r--test/CodeGen/X86/bswap-vector.ll11
-rw-r--r--test/CodeGen/X86/bswap-wide-int.ll173
-rw-r--r--test/CodeGen/X86/compress_expand.ll8
-rw-r--r--test/CodeGen/X86/cpus.ll2
-rw-r--r--test/CodeGen/X86/fp128-cast.ll16
-rw-r--r--test/CodeGen/X86/insertelement-zero.ll15
-rw-r--r--test/CodeGen/X86/lower-vec-shift.ll7
-rw-r--r--test/CodeGen/X86/lower-vec-shuffle-bug.ll11
-rw-r--r--test/CodeGen/X86/masked_memop.ll16
-rw-r--r--test/CodeGen/X86/memcmp.ll766
-rw-r--r--test/CodeGen/X86/palignr.ll241
-rw-r--r--test/CodeGen/X86/peephole-recurrence.mir232
-rw-r--r--test/CodeGen/X86/sbb.ll80
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v16.ll57
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v8.ll88
-rw-r--r--test/CodeGen/X86/vector-truncate-combine.ll35
-rw-r--r--test/CodeGen/X86/x86-interleaved-access.ll58
199 files changed, 21325 insertions, 3100 deletions
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
index 0298315a5510..48f500eb36b5 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
@@ -158,15 +158,30 @@ define fp128 @test_quad_dump() {
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(p0) = G_EXTRACT_VECTOR_ELT %vreg1, %vreg2; (in function: vector_of_pointers_extractelement)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_extractelement
; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_extractelement:
+@var = global <2 x i16*> zeroinitializer
define void @vector_of_pointers_extractelement() {
- %dummy = extractelement <2 x i16*> undef, i32 0
+ br label %end
+
+block:
+ %dummy = extractelement <2 x i16*> %vec, i32 0
ret void
+
+end:
+ %vec = load <2 x i16*>, <2 x i16*>* undef
+ br label %block
}
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(<2 x p0>) = G_INSERT_VECTOR_ELT %vreg1, %vreg2, %vreg3; (in function: vector_of_pointers_insertelement
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_insertelement
; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_insertelement:
define void @vector_of_pointers_insertelement() {
- %dummy = insertelement <2 x i16*> undef, i16* null, i32 0
+ br label %end
+
+block:
+ %dummy = insertelement <2 x i16*> %vec, i16* null, i32 0
ret void
+
+end:
+ %vec = load <2 x i16*>, <2 x i16*>* undef
+ br label %block
}
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
index 81b42d064810..50ad83feed85 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
@@ -577,7 +577,7 @@ define i32 @constant_int_start() {
}
; CHECK-LABEL: name: test_undef
-; CHECK: [[UNDEF:%[0-9]+]](s32) = IMPLICIT_DEF
+; CHECK: [[UNDEF:%[0-9]+]](s32) = G_IMPLICIT_DEF
; CHECK: %w0 = COPY [[UNDEF]]
define i32 @test_undef() {
ret i32 undef
@@ -807,7 +807,7 @@ define float @test_frem(float %arg1, float %arg2) {
; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_SADDO [[LHS]], [[RHS]]
-; CHECK: [[TMP:%[0-9]+]](s64) = IMPLICIT_DEF
+; CHECK: [[TMP:%[0-9]+]](s64) = G_IMPLICIT_DEF
; CHECK: [[TMP1:%[0-9]+]](s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
; CHECK: [[RES:%[0-9]+]](s64) = G_INSERT [[TMP1]], [[OVERFLOW]](s1), 32
; CHECK: G_STORE [[RES]](s64), [[ADDR]](p0)
@@ -824,7 +824,7 @@ define void @test_sadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
; CHECK: [[ZERO:%[0-9]+]](s1) = G_CONSTANT i1 false
; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_UADDE [[LHS]], [[RHS]], [[ZERO]]
-; CHECK: [[TMP:%[0-9]+]](s64) = IMPLICIT_DEF
+; CHECK: [[TMP:%[0-9]+]](s64) = G_IMPLICIT_DEF
; CHECK: [[TMP1:%[0-9]+]](s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
; CHECK: [[RES:%[0-9]+]](s64) = G_INSERT [[TMP1]], [[OVERFLOW]](s1), 32
; CHECK: G_STORE [[RES]](s64), [[ADDR]](p0)
@@ -840,7 +840,7 @@ define void @test_uadd_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_SSUBO [[LHS]], [[RHS]]
-; CHECK: [[TMP:%[0-9]+]](s64) = IMPLICIT_DEF
+; CHECK: [[TMP:%[0-9]+]](s64) = G_IMPLICIT_DEF
; CHECK: [[TMP1:%[0-9]+]](s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
; CHECK: [[RES:%[0-9]+]](s64) = G_INSERT [[TMP1]], [[OVERFLOW]](s1), 32
; CHECK: G_STORE [[RES]](s64), [[ADDR]](p0)
@@ -857,7 +857,7 @@ define void @test_ssub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
; CHECK: [[ZERO:%[0-9]+]](s1) = G_CONSTANT i1 false
; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_USUBE [[LHS]], [[RHS]], [[ZERO]]
-; CHECK: [[TMP:%[0-9]+]](s64) = IMPLICIT_DEF
+; CHECK: [[TMP:%[0-9]+]](s64) = G_IMPLICIT_DEF
; CHECK: [[TMP1:%[0-9]+]](s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
; CHECK: [[RES:%[0-9]+]](s64) = G_INSERT [[TMP1]], [[OVERFLOW]](s1), 32
; CHECK: G_STORE [[RES]](s64), [[ADDR]](p0)
@@ -873,7 +873,7 @@ define void @test_usub_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %subr) {
; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_SMULO [[LHS]], [[RHS]]
-; CHECK: [[TMP:%[0-9]+]](s64) = IMPLICIT_DEF
+; CHECK: [[TMP:%[0-9]+]](s64) = G_IMPLICIT_DEF
; CHECK: [[TMP1:%[0-9]+]](s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
; CHECK: [[RES:%[0-9]+]](s64) = G_INSERT [[TMP1]], [[OVERFLOW]](s1), 32
; CHECK: G_STORE [[RES]](s64), [[ADDR]](p0)
@@ -889,7 +889,7 @@ define void @test_smul_overflow(i32 %lhs, i32 %rhs, { i32, i1 }* %addr) {
; CHECK: [[RHS:%[0-9]+]](s32) = COPY %w1
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
; CHECK: [[VAL:%[0-9]+]](s32), [[OVERFLOW:%[0-9]+]](s1) = G_UMULO [[LHS]], [[RHS]]
-; CHECK: [[TMP:%[0-9]+]](s64) = IMPLICIT_DEF
+; CHECK: [[TMP:%[0-9]+]](s64) = G_IMPLICIT_DEF
; CHECK: [[TMP1:%[0-9]+]](s64) = G_INSERT [[TMP]], [[VAL]](s32), 0
; CHECK: [[RES:%[0-9]+]](s64) = G_INSERT [[TMP1]], [[OVERFLOW]](s1), 32
; CHECK: G_STORE [[RES]](s64), [[ADDR]](p0)
@@ -1271,6 +1271,45 @@ define float @test_fma_intrin(float %a, float %b, float %c) {
ret float %res
}
+declare float @llvm.exp.f32(float)
+define float @test_exp_intrin(float %a) {
+; CHECK-LABEL: name: test_exp_intrin
+; CHECK: [[A:%[0-9]+]](s32) = COPY %s0
+; CHECK: [[RES:%[0-9]+]](s32) = G_FEXP [[A]]
+; CHECK: %s0 = COPY [[RES]]
+ %res = call float @llvm.exp.f32(float %a)
+ ret float %res
+}
+
+declare float @llvm.exp2.f32(float)
+define float @test_exp2_intrin(float %a) {
+; CHECK-LABEL: name: test_exp2_intrin
+; CHECK: [[A:%[0-9]+]](s32) = COPY %s0
+; CHECK: [[RES:%[0-9]+]](s32) = G_FEXP2 [[A]]
+; CHECK: %s0 = COPY [[RES]]
+ %res = call float @llvm.exp2.f32(float %a)
+ ret float %res
+}
+
+declare float @llvm.log.f32(float)
+define float @test_log_intrin(float %a) {
+; CHECK-LABEL: name: test_log_intrin
+; CHECK: [[A:%[0-9]+]](s32) = COPY %s0
+; CHECK: [[RES:%[0-9]+]](s32) = G_FLOG [[A]]
+; CHECK: %s0 = COPY [[RES]]
+ %res = call float @llvm.log.f32(float %a)
+ ret float %res
+}
+
+declare float @llvm.log2.f32(float)
+define float @test_log2_intrin(float %a) {
+; CHECK-LABEL: name: test_log2_intrin
+; CHECK: [[A:%[0-9]+]](s32) = COPY %s0
+; CHECK: [[RES:%[0-9]+]](s32) = G_FLOG2 [[A]]
+; CHECK: %s0 = COPY [[RES]]
+ %res = call float @llvm.log2.f32(float %a)
+ ret float %res
+}
declare void @llvm.lifetime.start.p0i8(i64, i8*)
declare void @llvm.lifetime.end.p0i8(i64, i8*)
define void @test_lifetime_intrin() {
@@ -1464,7 +1503,7 @@ define float @test_different_call_conv_target(float %x) {
define <2 x i32> @test_shufflevector_s32_v2s32(i32 %arg) {
; CHECK-LABEL: name: test_shufflevector_s32_v2s32
; CHECK: [[ARG:%[0-9]+]](s32) = COPY %w0
-; CHECK-DAG: [[UNDEF:%[0-9]+]](s32) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](s32) = G_IMPLICIT_DEF
; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[MASK:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32)
; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](s32), [[UNDEF]], [[MASK]](<2 x s32>)
@@ -1477,7 +1516,7 @@ define <2 x i32> @test_shufflevector_s32_v2s32(i32 %arg) {
define i32 @test_shufflevector_v2s32_s32(<2 x i32> %arg) {
; CHECK-LABEL: name: test_shufflevector_v2s32_s32
; CHECK: [[ARG:%[0-9]+]](<2 x s32>) = COPY %d0
-; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
; CHECK: [[RES:%[0-9]+]](s32) = G_SHUFFLE_VECTOR [[ARG]](<2 x s32>), [[UNDEF]], [[C1]](s32)
; CHECK: %w0 = COPY [[RES]](s32)
@@ -1489,7 +1528,7 @@ define i32 @test_shufflevector_v2s32_s32(<2 x i32> %arg) {
define <2 x i32> @test_shufflevector_v2s32_v2s32(<2 x i32> %arg) {
; CHECK-LABEL: name: test_shufflevector_v2s32_v2s32
; CHECK: [[ARG:%[0-9]+]](<2 x s32>) = COPY %d0
-; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[MASK:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32)
@@ -1502,7 +1541,7 @@ define <2 x i32> @test_shufflevector_v2s32_v2s32(<2 x i32> %arg) {
define i32 @test_shufflevector_v2s32_v3s32(<2 x i32> %arg) {
; CHECK-LABEL: name: test_shufflevector_v2s32_v3s32
; CHECK: [[ARG:%[0-9]+]](<2 x s32>) = COPY %d0
-; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[MASK:%[0-9]+]](<3 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32), [[C1]](s32)
@@ -1531,7 +1570,7 @@ define <4 x i32> @test_shufflevector_v2s32_v4s32(<2 x i32> %arg1, <2 x i32> %arg
define <2 x i32> @test_shufflevector_v4s32_v2s32(<4 x i32> %arg) {
; CHECK-LABEL: name: test_shufflevector_v4s32_v2s32
; CHECK: [[ARG:%[0-9]+]](<4 x s32>) = COPY %q0
-; CHECK-DAG: [[UNDEF:%[0-9]+]](<4 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<4 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
; CHECK-DAG: [[C3:%[0-9]+]](s32) = G_CONSTANT i32 3
; CHECK-DAG: [[MASK:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C3]](s32)
@@ -1570,7 +1609,7 @@ define <16 x i8> @test_shufflevector_v8s8_v16s8(<8 x i8> %arg1, <8 x i8> %arg2)
}
; CHECK-LABEL: test_constant_vector
-; CHECK: [[UNDEF:%[0-9]+]](s16) = IMPLICIT_DEF
+; CHECK: [[UNDEF:%[0-9]+]](s16) = G_IMPLICIT_DEF
; CHECK: [[F:%[0-9]+]](s16) = G_FCONSTANT half 0xH3C00
; CHECK: [[M:%[0-9]+]](<4 x s16>) = G_MERGE_VALUES [[UNDEF]](s16), [[UNDEF]](s16), [[UNDEF]](s16), [[F]](s16)
; CHECK: %d0 = COPY [[M]](<4 x s16>)
diff --git a/test/CodeGen/AArch64/GlobalISel/call-translator.ll b/test/CodeGen/AArch64/GlobalISel/call-translator.ll
index 0e593fdb7b85..8fba8e09f9ff 100644
--- a/test/CodeGen/AArch64/GlobalISel/call-translator.ll
+++ b/test/CodeGen/AArch64/GlobalISel/call-translator.ll
@@ -64,7 +64,7 @@ define void @test_multiple_args(i64 %in) {
; CHECK: [[I8:%[0-9]+]](s8) = COPY %w1
; CHECK: [[ADDR:%[0-9]+]](p0) = COPY %x2
-; CHECK: [[UNDEF:%[0-9]+]](s192) = IMPLICIT_DEF
+; CHECK: [[UNDEF:%[0-9]+]](s192) = G_IMPLICIT_DEF
; CHECK: [[ARG0:%[0-9]+]](s192) = G_INSERT [[UNDEF]], [[DBL]](s64), 0
; CHECK: [[ARG1:%[0-9]+]](s192) = G_INSERT [[ARG0]], [[I64]](s64), 64
; CHECK: [[ARG2:%[0-9]+]](s192) = G_INSERT [[ARG1]], [[I8]](s8), 128
diff --git a/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll b/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
index ef4445111d7b..d9fec0ec7d46 100644
--- a/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
+++ b/test/CodeGen/AArch64/GlobalISel/irtranslator-exceptions.ll
@@ -19,7 +19,7 @@ declare i32 @llvm.eh.typeid.for(i8*)
; CHECK: [[BAD]] (landing-pad):
; CHECK: EH_LABEL
-; CHECK: [[UNDEF:%[0-9]+]](s128) = IMPLICIT_DEF
+; CHECK: [[UNDEF:%[0-9]+]](s128) = G_IMPLICIT_DEF
; CHECK: [[PTR:%[0-9]+]](p0) = COPY %x0
; CHECK: [[VAL_WITH_PTR:%[0-9]+]](s128) = G_INSERT [[UNDEF]], [[PTR]](p0), 0
; CHECK: [[SEL_PTR:%[0-9]+]](p0) = COPY %x1
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir b/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
index e3e0175d39ac..fbacc28d7434 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-combines.mir
@@ -57,11 +57,11 @@ body: |
%0:_(s64) = COPY %x0
; CHECK-LABEL: name: test_combines_4
- ; CHECK: %2(<2 x s32>) = G_EXTRACT %1(s128), 0
- ; CHECK: %3(<2 x s32>) = G_ADD %2, %2
+ ; CHECK: %2(s64) = COPY %0(s64)
+ ; CHECK: %3(s64) = G_ADD %2, %2
%1:_(s128) = G_MERGE_VALUES %0, %0
- %2:_(<2 x s32>) = G_EXTRACT %1, 0
- %3:_(<2 x s32>) = G_ADD %2, %2
+ %2:_(s64) = G_EXTRACT %1, 0
+ %3:_(s64) = G_ADD %2, %2
...
---
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll b/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
index 23e7d5163e5a..42ca367e122b 100644
--- a/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-exceptions.ll
@@ -22,12 +22,11 @@ declare void @_Unwind_Resume(i8*)
; CHECK: [[SEL:%[0-9]+]](s32) = G_PTRTOINT [[SEL_PTR]]
; CHECK: [[STRUCT_SEL:%[0-9]+]](s64) = G_INSERT {{%[0-9]+}}, [[SEL]](s32), 0
-; CHECK: [[STRUCT:%[0-9]+]](s128) = G_MERGE_VALUES [[STRUCT_PTR]](s64), [[STRUCT_SEL]]
-
-; CHECK: [[PTR:%[0-9]+]](p0) = G_EXTRACT [[STRUCT]](s128), 0
+; CHECK: [[PTR:%[0-9]+]](p0) = G_INTTOPTR [[STRUCT_PTR]](s64)
; CHECK: G_STORE [[PTR]](p0), {{%[0-9]+}}(p0)
-; CHECK: [[SEL:%[0-9]+]](s32) = G_EXTRACT [[STRUCT]](s128), 64
+; CHECK: [[SEL_TMP:%[0-9]+]](s32) = G_EXTRACT [[STRUCT_SEL]](s64), 0
+; CHECK: [[SEL:%[0-9]+]](s32) = COPY [[SEL_TMP]]
; CHECK: G_STORE [[SEL]](s32), {{%[0-9]+}}(p0)
define void @bar() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir b/test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir
new file mode 100644
index 000000000000..dc6b59b24a9a
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-extracts.mir
@@ -0,0 +1,85 @@
+# RUN: llc -mtriple=aarch64-linux-gnu -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
+
+---
+name: test_extracts_1
+body: |
+ bb.0:
+ liveins: %w0
+
+ ; Low part of extraction takes entirity of the low register entirely, so
+ ; value stored is forwarded directly from first load.
+
+ ; CHECK-LABEL: name: test_extracts_1
+ ; CHECK: [[LO:%[0-9]+]](s64) = G_LOAD
+ ; CHECK: {{%[0-9]+}}(s64) = G_LOAD
+ ; CHECK: [[VAL:%[0-9]+]](s64) = COPY [[LO]]
+ ; CHECK: G_STORE [[VAL]]
+ %0:_(s64) = COPY %x0
+ %1:_(s32) = COPY %w1
+ %2:_(p0) = COPY %x2
+ %3:_(s128) = G_LOAD %2(p0) :: (load 16)
+ %4:_(s64) = G_EXTRACT %3(s128), 0
+ G_STORE %4(s64), %2(p0) :: (store 8)
+ RET_ReallyLR
+...
+
+---
+name: test_extracts_2
+body: |
+ bb.0:
+ liveins: %w0
+
+ ; Low extraction wipes takes whole low register. High extraction is real.
+ ; CHECK-LABEL: name: test_extracts_2
+ ; CHECK: [[LO_TMP:%[0-9]+]](s64) = G_LOAD
+ ; CHECK: [[HI:%[0-9]+]](s64) = G_LOAD
+ ; CHECK: [[LO:%[0-9]+]](s64) = COPY [[LO_TMP]]
+ ; CHECK: [[NEWHI_TMP:%[0-9]+]](s32) = G_EXTRACT [[HI]](s64), 0
+ ; CHECK: [[NEWHI:%[0-9]+]](s32) = COPY [[NEWHI_TMP]]
+ ; CHECK: G_STORE [[LO]]
+ ; CHECK: G_STORE [[NEWHI]]
+ %0:_(s64) = COPY %x0
+ %1:_(s32) = COPY %w1
+ %2:_(p0) = COPY %x2
+ %3:_(s128) = G_LOAD %2(p0) :: (load 16)
+ %4:_(s64) = G_EXTRACT %3(s128), 0
+ %5:_(s32) = G_EXTRACT %3(s128), 64
+ G_STORE %4(s64), %2(p0) :: (store 8)
+ G_STORE %5(s32), %2(p0) :: (store 4)
+ RET_ReallyLR
+...
+
+---
+name: test_extracts_3
+body: |
+ bb.0:
+ liveins: %x0, %x1, %x2
+
+
+ ; CHECK-LABEL: name: test_extracts_3
+ ; CHECK: [[LO:%[0-9]+]](s32) = G_EXTRACT %0(s64), 32
+ ; CHECK: [[HI:%[0-9]+]](s32) = G_EXTRACT %1(s64), 0
+ ; CHECK: %3(s64) = G_MERGE_VALUES [[LO]](s32), [[HI]](s32)
+ %0:_(s64) = COPY %x0
+ %1:_(s64) = COPY %x1
+ %2:_(s128) = G_MERGE_VALUES %0, %1
+ %3:_(s64) = G_EXTRACT %2, 32
+ RET_ReallyLR
+...
+
+---
+name: test_extracts_4
+body: |
+ bb.0:
+ liveins: %x0, %x1, %x2
+
+
+ ; CHECK-LABEL: name: test_extracts_4
+ ; CHECK: [[LO_TMP:%[0-9]+]](s32) = G_EXTRACT %0(s64), 32
+ ; CHECK: %3(s32) = COPY [[LO_TMP]]
+ %0:_(s64) = COPY %x0
+ %1:_(s64) = COPY %x1
+ %2:_(s128) = G_MERGE_VALUES %0, %1
+ %3:_(s32) = G_EXTRACT %2, 32
+ RET_ReallyLR
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir b/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir
new file mode 100644
index 000000000000..e7cf59b3394e
--- /dev/null
+++ b/test/CodeGen/AArch64/GlobalISel/legalize-undef.mir
@@ -0,0 +1,15 @@
+# RUN: llc -mtriple=aarch64-linux-gnu -O0 -run-pass=legalizer -global-isel %s -o - | FileCheck %s
+
+---
+name: test_implicit_def
+registers:
+body: |
+ bb.0.entry:
+ liveins:
+ ; CHECK-LABEL: name: test_implicit_def
+ ; CHECK: [[LO:%[0-9]+]](s64) = G_IMPLICIT_DEF
+ ; CHECK: [[HI:%[0-9]+]](s64) = G_IMPLICIT_DEF
+ ; CHECK: %0(s128) = G_MERGE_VALUES [[LO]](s64), [[HI]](s64)
+
+ %0:_(s128) = G_IMPLICIT_DEF
+...
diff --git a/test/CodeGen/AArch64/GlobalISel/select-trunc.mir b/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
index 5559e2d3a0d1..f43a9ab34ffd 100644
--- a/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
+++ b/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
@@ -15,8 +15,8 @@ legalized: true
regBankSelected: true
# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gpr64, preferred-register: '' }
-# CHECK-NEXT: - { id: 1, class: gpr32, preferred-register: '' }
+# CHECK-NEXT: - { id: 0, class: gpr64sp, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: gpr32sp, preferred-register: '' }
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
diff --git a/test/CodeGen/AArch64/arm64-ccmp.ll b/test/CodeGen/AArch64/arm64-ccmp.ll
index 2682fa7dcce1..a910585e7f5d 100644
--- a/test/CodeGen/AArch64/arm64-ccmp.ll
+++ b/test/CodeGen/AArch64/arm64-ccmp.ll
@@ -108,9 +108,9 @@ if.end: ; preds = %if.then, %lor.lhs.f
; CHECK: cmp w0, #1
; CHECK: sdiv [[DIVRES:w[0-9]+]], w1, w0
; CHECK: ccmp [[DIVRES]], #16, #0, ge
-; CHECK: b.gt [[BLOCK:LBB[0-9_]+]]
-; CHECK: bl _foo
+; CHECK: b.le [[BLOCK:LBB[0-9_]+]]
; CHECK: [[BLOCK]]:
+; CHECK: bl _foo
; CHECK: orr w0, wzr, #0x7
define i32 @speculate_division(i32 %a, i32 %b) nounwind ssp {
entry:
@@ -135,7 +135,7 @@ if.end:
; CHECK: cmp
; CHECK-NOT: b.
; CHECK: fccmp {{.*}}, #8, ge
-; CHECK: b.lt
+; CHECK: b.ge
define i32 @single_fcmp(i32 %a, float %b) nounwind ssp {
entry:
%cmp = icmp sgt i32 %a, 0
diff --git a/test/CodeGen/AArch64/arm64-spill-remarks.ll b/test/CodeGen/AArch64/arm64-spill-remarks.ll
index bc9340352d75..cfebeb496e18 100644
--- a/test/CodeGen/AArch64/arm64-spill-remarks.ll
+++ b/test/CodeGen/AArch64/arm64-spill-remarks.ll
@@ -3,6 +3,15 @@
; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -aarch64-neon-syntax=apple 2>&1 | FileCheck -check-prefix=NO_REMARK %s
; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -aarch64-neon-syntax=apple -pass-remarks-output=%t.yaml -pass-remarks-with-hotness 2>&1 | FileCheck -check-prefix=NO_REMARK %s
; RUN: cat %t.yaml | FileCheck -check-prefix=YAML %s
+;
+; Verify that remarks below the hotness threshold are not output.
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -aarch64-neon-syntax=apple -pass-remarks-missed=regalloc \
+; RUN: -pass-remarks-with-hotness -pass-remarks-hotness-threshold=500 \
+; RUN: 2>&1 | FileCheck -check-prefix=THRESHOLD %s
+; RUN: llc < %s -mtriple=arm64-apple-ios7.0 -aarch64-neon-syntax=apple -pass-remarks-output=%t.threshold.yaml \
+; RUN: -pass-remarks-with-hotness -pass-remarks-hotness-threshold=500 \
+; RUN: 2>&1 | FileCheck -check-prefix=NO_REMARK %s
+; RUN: cat %t.threshold.yaml | FileCheck -check-prefix=THRESHOLD_YAML %s
; This has two nested loops, each with one value that has to be spilled and
; then reloaded.
@@ -23,6 +32,9 @@
; NO_REMARK-NOT: remark
+; THRESHOLD-NOT: (hotness: 300)
+; THRESHOLD: remark: /tmp/kk.c:2:20: 1 spills 1 reloads generated in loop (hotness: 30000)
+
; YAML: --- !Missed
; YAML: Pass: regalloc
; YAML: Name: LoopSpillReload
@@ -63,6 +75,21 @@
; YAML: - String: generated in loop
; YAML: ...
+; THRESHOLD_YAML-NOT: Hotness: 300{{$}}
+; THRESHOLD_YAML: --- !Missed
+; THRESHOLD_YAML: Pass: regalloc
+; THRESHOLD_YAML: Name: LoopSpillReload
+; THRESHOLD_YAML: DebugLoc: { File: /tmp/kk.c, Line: 2, Column: 20 }
+; THRESHOLD_YAML: Function: fpr128
+; THRESHOLD_YAML: Hotness: 30000
+; THRESHOLD_YAML: Args:
+; THRESHOLD_YAML: - NumSpills: '1'
+; THRESHOLD_YAML: - String: ' spills '
+; THRESHOLD_YAML: - NumReloads: '1'
+; THRESHOLD_YAML: - String: ' reloads '
+; THRESHOLD_YAML: - String: generated in loop
+; THRESHOLD_YAML: ...
+
define void @fpr128(<4 x float>* %p) nounwind ssp !prof !11 {
entry:
br label %loop, !dbg !8
diff --git a/test/CodeGen/AArch64/ccmp-successor-probs.mir b/test/CodeGen/AArch64/ccmp-successor-probs.mir
new file mode 100644
index 000000000000..8e81c419841b
--- /dev/null
+++ b/test/CodeGen/AArch64/ccmp-successor-probs.mir
@@ -0,0 +1,46 @@
+# RUN: llc -o - %s -mtriple=aarch64--linux-gnu -mcpu=falkor -run-pass=aarch64-ccmp | FileCheck %s
+---
+# This test checks that successor probabilties are properly updated after a
+# ccmp-conversion.
+#
+# CHECK-LABEL: name: aarch64-ccmp-successor-probs
+# CHECK: bb.0:
+# CHECK-NEXT: successors: %bb.2(0x04000000), %bb.3(0x7c000000)
+# CHECK: CCMPXr %5, %4, 0, 10, implicit-def %nzcv, implicit %nzcv
+#
+name: aarch64-ccmp-successor-probs
+registers:
+ - { id: 0, class: gpr64 }
+ - { id: 1, class: gpr64 }
+ - { id: 2, class: gpr64 }
+ - { id: 3, class: gpr64 }
+ - { id: 4, class: gpr64 }
+ - { id: 5, class: gpr64 }
+ - { id: 6, class: gpr64 }
+ - { id: 7, class: gpr64 }
+body : |
+ bb.0:
+ successors: %bb.1(0x7e000000), %bb.2(0x02000000)
+
+ %0 = LDRXui killed %x0, 69
+ %1 = COPY %xzr
+ %2 = SUBSXrr %1, %0, implicit-def dead %nzcv
+ %3 = SUBSXri %x1, 1, 0, implicit-def dead %nzcv
+ %4 = COPY %0
+ %5 = COPY %3
+ %6 = SUBSXrr %x1, killed %2, implicit-def %nzcv
+ Bcc 11, %bb.2, implicit %nzcv
+ B %bb.1
+
+ bb.1:
+ successors: %bb.2(0x02082082), %bb.3(0x7df7df7e)
+
+ %7 = SUBSXrr %5, %4, implicit-def %nzcv
+ Bcc 12, %bb.2, implicit %nzcv
+ B %bb.3
+
+ bb.2:
+ successors: %bb.3(0x80000000)
+
+ bb.3:
+...
diff --git a/test/CodeGen/AArch64/cond-br-tuning.ll b/test/CodeGen/AArch64/cond-br-tuning.ll
index 628d89e34a01..d966acbebfdd 100644
--- a/test/CodeGen/AArch64/cond-br-tuning.ll
+++ b/test/CodeGen/AArch64/cond-br-tuning.ll
@@ -83,7 +83,7 @@ L2:
; CHECK-LABEL: test_add_tbz:
; CHECK: adds
-; CHECK: b.ge
+; CHECK: b.pl
; CHECK: ret
define void @test_add_tbz(i32 %a, i32 %b, i32* %ptr) {
entry:
@@ -99,7 +99,7 @@ L2:
; CHECK-LABEL: test_subs_tbz:
; CHECK: subs
-; CHECK: b.ge
+; CHECK: b.pl
; CHECK: ret
define void @test_subs_tbz(i32 %a, i32 %b, i32* %ptr) {
entry:
@@ -115,7 +115,7 @@ L2:
; CHECK-LABEL: test_add_tbnz
; CHECK: adds
-; CHECK: b.lt
+; CHECK: b.mi
; CHECK: ret
define void @test_add_tbnz(i32 %a, i32 %b, i32* %ptr) {
entry:
@@ -131,7 +131,7 @@ L2:
; CHECK-LABEL: test_subs_tbnz
; CHECK: subs
-; CHECK: b.lt
+; CHECK: b.mi
; CHECK: ret
define void @test_subs_tbnz(i32 %a, i32 %b, i32* %ptr) {
entry:
diff --git a/test/CodeGen/AMDGPU/alignbit-pat.ll b/test/CodeGen/AMDGPU/alignbit-pat.ll
new file mode 100644
index 000000000000..ff5c8960fad3
--- /dev/null
+++ b/test/CodeGen/AMDGPU/alignbit-pat.ll
@@ -0,0 +1,100 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}alignbit_shr_pat:
+; GCN-DAG: s_load_dword s[[SHR:[0-9]+]]
+; GCN-DAG: load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
+; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], s[[SHR]]
+
+define amdgpu_kernel void @alignbit_shr_pat(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp3 = and i32 %arg2, 31
+ %tmp4 = zext i32 %tmp3 to i64
+ %tmp5 = lshr i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}alignbit_shr_pat_v:
+; GCN-DAG: load_dword v[[SHR:[0-9]+]],
+; GCN-DAG: load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
+; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], v[[SHR]]
+
+define amdgpu_kernel void @alignbit_shr_pat_v(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1) {
+bb:
+ %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep1 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tid
+ %tmp = load i64, i64 addrspace(1)* %gep1, align 8
+ %gep2 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i32 %tid
+ %amt = load i32, i32 addrspace(1)* %gep2, align 4
+ %tmp3 = and i32 %amt, 31
+ %tmp4 = zext i32 %tmp3 to i64
+ %tmp5 = lshr i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %gep2, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_and30:
+; Negative test, wrong constant
+; GCN: v_lshr_b64
+; GCN-NOT: v_alignbit_b32
+
+define amdgpu_kernel void @alignbit_shr_pat_wrong_and30(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp3 = and i32 %arg2, 30
+ %tmp4 = zext i32 %tmp3 to i64
+ %tmp5 = lshr i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_and63:
+; Negative test, wrong constant
+; GCN: v_lshr_b64
+; GCN-NOT: v_alignbit_b32
+
+define amdgpu_kernel void @alignbit_shr_pat_wrong_and63(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp3 = and i32 %arg2, 63
+ %tmp4 = zext i32 %tmp3 to i64
+ %tmp5 = lshr i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}alignbit_shr_pat_const30:
+; GCN: load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
+; GCN: v_alignbit_b32 v{{[0-9]+}}, v[[HI]], v[[LO]], 30
+
+define amdgpu_kernel void @alignbit_shr_pat_const30(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp5 = lshr i64 %tmp, 30
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}alignbit_shr_pat_wrong_const33:
+; Negative test, shift amount more than 31
+; GCN: v_lshrrev_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
+; GCN-NOT: v_alignbit_b32
+
+define amdgpu_kernel void @alignbit_shr_pat_wrong_const33(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp5 = lshr i64 %tmp, 33
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone speculatable }
diff --git a/test/CodeGen/AMDGPU/bug-vopc-commute.ll b/test/CodeGen/AMDGPU/bug-vopc-commute.ll
index 7c02d8385462..e951b5e08927 100644
--- a/test/CodeGen/AMDGPU/bug-vopc-commute.ll
+++ b/test/CodeGen/AMDGPU/bug-vopc-commute.ll
@@ -8,8 +8,8 @@
; of which were in SGPRs.
define amdgpu_vs float @main(i32 %v) {
main_body:
- %d1 = call float @llvm.SI.load.const(<16 x i8> undef, i32 960)
- %d2 = call float @llvm.SI.load.const(<16 x i8> undef, i32 976)
+ %d1 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 960)
+ %d2 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 976)
br i1 undef, label %ENDIF56, label %IF57
IF57: ; preds = %ENDIF
@@ -41,7 +41,7 @@ ENDIF62: ; preds = %ENDIF59
}
; Function Attrs: nounwind readnone
-declare float @llvm.SI.load.const(<16 x i8>, i32) #0
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #0
attributes #0 = { nounwind readnone }
attributes #1 = { readnone }
diff --git a/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll b/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
index 53adf09026ec..04ad3bcccd3f 100644
--- a/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
+++ b/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
@@ -176,14 +176,13 @@ ret:
; OPT: ret
; GCN-LABEL: {{^}}sink_ubfe_i64_span_midpoint:
-; GCN: s_cbranch_scc1 BB3_2
-; GCN: s_lshr_b64 s{{\[}}[[LO:[0-9]+]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, 30
-; GCN: s_and_b32 s{{[0-9]+}}, s[[LO]], 0xff
+; GCN: v_alignbit_b32 v[[LO:[0-9]+]], s{{[0-9]+}}, v{{[0-9]+}}, 30
+; GCN: s_cbranch_scc1 BB3_2
+; GCN: v_and_b32_e32 v{{[0-9]+}}, 0xff, v[[LO]]
; GCN: BB3_2:
-; GCN: s_lshr_b64 s{{\[}}[[LO:[0-9]+]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, 30
-; GCN: s_and_b32 s{{[0-9]+}}, s[[LO]], 0x7f
+; GCN: v_and_b32_e32 v{{[0-9]+}}, 0x7f, v[[LO]]
; GCN: BB3_3:
; GCN: buffer_store_dwordx2
diff --git a/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll b/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll
index a68ddabd9560..37fd08242fba 100644
--- a/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll
+++ b/test/CodeGen/AMDGPU/code-object-metadata-from-llvm-ir-full.ll
@@ -16,7 +16,9 @@
; CHECK: ---
; CHECK: Version: [ 1, 0 ]
-; CHECK: Printf: [ '1:1:4:%d\n', '2:1:8:%g\n' ]
+; CHECK: Printf:
+; CHECK: - '1:1:4:%d\n'
+; CHECK: - '2:1:8:%g\n'
; CHECK: Kernels:
; CHECK: - Name: test_char
@@ -1253,8 +1255,8 @@ define amdgpu_kernel void @test_pointee_align(i64 addrspace(1)* %a,
; NOTES-NEXT: Owner Data size Description
; NOTES-NEXT: AMD 0x00000008 Unknown note type: (0x00000001)
; NOTES-NEXT: AMD 0x0000001b Unknown note type: (0x00000003)
-; GFX700: AMD 0x00008b06 Unknown note type: (0x0000000a)
-; GFX800: AMD 0x00008e6a Unknown note type: (0x0000000a)
-; GFX900: AMD 0x00008b06 Unknown note type: (0x0000000a)
+; GFX700: AMD 0x00008b0a Unknown note type: (0x0000000a)
+; GFX800: AMD 0x00008e6e Unknown note type: (0x0000000a)
+; GFX900: AMD 0x00008b0a Unknown note type: (0x0000000a)
; PARSER: AMDGPU Code Object Metadata Parser Test: PASS
diff --git a/test/CodeGen/AMDGPU/combine-and-sext-bool.ll b/test/CodeGen/AMDGPU/combine-and-sext-bool.ll
new file mode 100644
index 000000000000..cd4ac4d58ad3
--- /dev/null
+++ b/test/CodeGen/AMDGPU/combine-and-sext-bool.ll
@@ -0,0 +1,27 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}and_i1_sext_bool:
+; GCN: v_cmp_{{gt|le}}_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: v_cndmask_b32_e{{32|64}} [[VAL:v[0-9]+]], 0, v{{[0-9]+}}, [[CC]]
+; GCN: store_dword {{.*}}[[VAL]]
+; GCN-NOT: v_cndmask_b32_e64 v{{[0-9]+}}, {{0|-1}}, {{0|-1}}
+; GCN-NOT: v_and_b32_e32
+
+define amdgpu_kernel void @and_i1_sext_bool(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
+ %v = load i32, i32 addrspace(1)* %gep, align 4
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %and = and i32 %v, %ext
+ store i32 %and, i32 addrspace(1)* %gep, align 4
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+declare i32 @llvm.amdgcn.workitem.id.y() #0
+
+attributes #0 = { nounwind readnone speculatable }
diff --git a/test/CodeGen/AMDGPU/combine-cond-add-sub.ll b/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
index 187fb24dfb66..9e47c7d3449c 100644
--- a/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
+++ b/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
@@ -150,6 +150,26 @@ bb:
ret void
}
+; GCN-LABEL: {{^}}add_and:
+; GCN: s_and_b64 [[CC:[^,]+]],
+; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, 0, v{{[0-9]+}}, [[CC]]
+; GCN-NOT: v_cndmask
+
+define amdgpu_kernel void @add_and(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
+ %v = load i32, i32 addrspace(1)* %gep, align 4
+ %cmp1 = icmp ugt i32 %x, %y
+ %cmp2 = icmp ugt i32 %x, 1
+ %cmp = and i1 %cmp1, %cmp2
+ %ext = zext i1 %cmp to i32
+ %add = add i32 %v, %ext
+ store i32 %add, i32 addrspace(1)* %gep, align 4
+ ret void
+}
+
declare i1 @llvm.amdgcn.class.f32(float, i32) #0
declare i32 @llvm.amdgcn.workitem.id.x() #0
diff --git a/test/CodeGen/AMDGPU/fold-fmul-to-neg-abs.ll b/test/CodeGen/AMDGPU/fold-fmul-to-neg-abs.ll
new file mode 100644
index 000000000000..3637722d004d
--- /dev/null
+++ b/test/CodeGen/AMDGPU/fold-fmul-to-neg-abs.ll
@@ -0,0 +1,37 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}fold_mul_neg:
+; GCN: load_dword [[V:v[0-9]+]]
+; GCN: v_or_b32_e32 [[NEG:v[0-9]]], 0x80000000, [[V]]
+; GCN: store_dword [[NEG]]
+
+define amdgpu_kernel void @fold_mul_neg(float addrspace(1)* %arg) {
+ %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tid
+ %v = load float, float addrspace(1)* %gep, align 4
+ %cmp = fcmp fast ogt float %v, 0.000000e+00
+ %sel = select i1 %cmp, float -1.000000e+00, float 1.000000e+00
+ %mul = fmul fast float %v, %sel
+ store float %mul, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}fold_mul_abs:
+; GCN: load_dword [[V:v[0-9]+]]
+; GCN: v_and_b32_e32 [[ABS:v[0-9]]], 0x7fffffff, [[V]]
+; GCN: store_dword [[ABS]]
+
+define amdgpu_kernel void @fold_mul_abs(float addrspace(1)* %arg) {
+ %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tid
+ %v = load float, float addrspace(1)* %gep, align 4
+ %cmp = fcmp fast olt float %v, 0.000000e+00
+ %sel = select i1 %cmp, float -1.000000e+00, float 1.000000e+00
+ %mul = fmul fast float %v, %sel
+ store float %mul, float addrspace(1)* %gep, align 4
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone speculatable }
diff --git a/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll b/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll
index 51f564d96909..564d2b32964f 100644
--- a/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll
+++ b/test/CodeGen/AMDGPU/llvm.SI.load.dword.ll
@@ -14,24 +14,24 @@
; CHECK: s_movk_i32 [[K:s[0-9]+]], 0x4d2 ; encoding
; CHECK: buffer_load_dword {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, [[K]] idxen offen offset:65535 glc slc
-define amdgpu_vs void @main([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <32 x i8>] addrspace(2)* byval %arg2, [2 x <16 x i8>] addrspace(2)* byval %arg3, [17 x <16 x i8>] addrspace(2)* inreg %arg4, [17 x <16 x i8>] addrspace(2)* inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9) {
+define amdgpu_vs void @main([17 x <4 x i32>] addrspace(2)* byval %arg, [32 x <4 x i32>] addrspace(2)* byval %arg1, [16 x <32 x i8>] addrspace(2)* byval %arg2, [2 x <4 x i32>] addrspace(2)* byval %arg3, [17 x <4 x i32>] addrspace(2)* inreg %arg4, [17 x <4 x i32>] addrspace(2)* inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9) {
main_body:
- %tmp = getelementptr [2 x <16 x i8>], [2 x <16 x i8>] addrspace(2)* %arg3, i64 0, i32 1
- %tmp10 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
+ %tmp = getelementptr [2 x <4 x i32>], [2 x <4 x i32>] addrspace(2)* %arg3, i64 0, i32 1
+ %tmp10 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
%tmp11 = shl i32 %arg6, 2
- %tmp12 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp10, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 0)
+ %tmp12 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> %tmp10, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 0)
%tmp13 = bitcast i32 %tmp12 to float
- %tmp14 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp10, i32 %tmp11, i32 0, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0)
+ %tmp14 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> %tmp10, i32 %tmp11, i32 0, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0)
%tmp15 = bitcast i32 %tmp14 to float
- %tmp16 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp10, i32 %tmp11, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 0)
+ %tmp16 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> %tmp10, i32 %tmp11, i32 0, i32 0, i32 0, i32 1, i32 1, i32 1, i32 0)
%tmp17 = bitcast i32 %tmp16 to float
- %tmp18 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %tmp10, <2 x i32> zeroinitializer, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 0)
+ %tmp18 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<4 x i32> %tmp10, <2 x i32> zeroinitializer, i32 0, i32 0, i32 1, i32 1, i32 1, i32 1, i32 0)
%tmp19 = bitcast i32 %tmp18 to float
- %tmp20 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %tmp10, <2 x i32> zeroinitializer, i32 0, i32 123, i32 1, i32 1, i32 1, i32 1, i32 0)
+ %tmp20 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<4 x i32> %tmp10, <2 x i32> zeroinitializer, i32 0, i32 123, i32 1, i32 1, i32 1, i32 1, i32 0)
%tmp21 = bitcast i32 %tmp20 to float
- %tmp22 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %tmp10, <2 x i32> zeroinitializer, i32 1234, i32 65535, i32 1, i32 1, i32 1, i32 1, i32 0)
+ %tmp22 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<4 x i32> %tmp10, <2 x i32> zeroinitializer, i32 1234, i32 65535, i32 1, i32 1, i32 1, i32 1, i32 0)
%tmp23 = bitcast i32 %tmp22 to float
call void @llvm.amdgcn.exp.f32(i32 15, i32 12, float %tmp13, float %tmp15, float %tmp17, float %tmp19, i1 false, i1 false)
@@ -40,10 +40,10 @@ main_body:
}
; Function Attrs: nounwind readonly
-declare i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #0
+declare i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
; Function Attrs: nounwind readonly
-declare i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8>, <2 x i32>, i32, i32, i32, i32, i32, i32, i32) #0
+declare i32 @llvm.SI.buffer.load.dword.i32.v2i32(<4 x i32>, <2 x i32>, i32, i32, i32, i32, i32, i32, i32) #0
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #1
diff --git a/test/CodeGen/AMDGPU/llvm.SI.tbuffer.store.ll b/test/CodeGen/AMDGPU/llvm.SI.tbuffer.store.ll
index cd9c082ed941..01b76422c03f 100644
--- a/test/CodeGen/AMDGPU/llvm.SI.tbuffer.store.ll
+++ b/test/CodeGen/AMDGPU/llvm.SI.tbuffer.store.ll
@@ -5,7 +5,7 @@
;CHECK: tbuffer_store_format_xyzw {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, dfmt:14, nfmt:4, 0 offen offset:32 glc slc
define amdgpu_vs void @test1(i32 %a1, i32 %vaddr) {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
- call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
+ call void @llvm.SI.tbuffer.store.v4i32(<4 x i32> undef, <4 x i32> %vdata,
i32 4, i32 %vaddr, i32 0, i32 32, i32 14, i32 4, i32 1, i32 0, i32 1,
i32 1, i32 0)
ret void
@@ -15,7 +15,7 @@ define amdgpu_vs void @test1(i32 %a1, i32 %vaddr) {
;CHECK: tbuffer_store_format_xyzw {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, dfmt:14, nfmt:4, 0 idxen offset:32 glc slc
define amdgpu_vs void @test1_idx(i32 %a1, i32 %vaddr) {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
- call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
+ call void @llvm.SI.tbuffer.store.v4i32(<4 x i32> undef, <4 x i32> %vdata,
i32 4, i32 %vaddr, i32 0, i32 32, i32 14, i32 4, i32 0, i32 1, i32 1,
i32 1, i32 0)
ret void
@@ -25,7 +25,7 @@ define amdgpu_vs void @test1_idx(i32 %a1, i32 %vaddr) {
;CHECK: tbuffer_store_format_xyzw {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, dfmt:14, nfmt:4, {{s[0-9]+}} idxen offset:32 glc slc
define amdgpu_vs void @test1_scalar_offset(i32 %a1, i32 %vaddr, i32 inreg %soffset) {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
- call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
+ call void @llvm.SI.tbuffer.store.v4i32(<4 x i32> undef, <4 x i32> %vdata,
i32 4, i32 %vaddr, i32 %soffset, i32 32, i32 14, i32 4, i32 0, i32 1, i32 1,
i32 1, i32 0)
ret void
@@ -35,7 +35,7 @@ define amdgpu_vs void @test1_scalar_offset(i32 %a1, i32 %vaddr, i32 inreg %soffs
;CHECK: tbuffer_store_format_xyzw {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, dfmt:14, nfmt:4, 0 offen offset:32
define amdgpu_vs void @test1_no_glc_slc(i32 %a1, i32 %vaddr) {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
- call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
+ call void @llvm.SI.tbuffer.store.v4i32(<4 x i32> undef, <4 x i32> %vdata,
i32 4, i32 %vaddr, i32 0, i32 32, i32 14, i32 4, i32 1, i32 0, i32 0,
i32 0, i32 0)
ret void
@@ -45,7 +45,7 @@ define amdgpu_vs void @test1_no_glc_slc(i32 %a1, i32 %vaddr) {
;CHECK: tbuffer_store_format_xyz {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, dfmt:13, nfmt:4, 0 offen offset:24 glc slc
define amdgpu_vs void @test2(i32 %a1, i32 %vaddr) {
%vdata = insertelement <4 x i32> undef, i32 %a1, i32 0
- call void @llvm.SI.tbuffer.store.v4i32(<16 x i8> undef, <4 x i32> %vdata,
+ call void @llvm.SI.tbuffer.store.v4i32(<4 x i32> undef, <4 x i32> %vdata,
i32 3, i32 %vaddr, i32 0, i32 24, i32 13, i32 4, i32 1, i32 0, i32 1,
i32 1, i32 0)
ret void
@@ -55,7 +55,7 @@ define amdgpu_vs void @test2(i32 %a1, i32 %vaddr) {
;CHECK: tbuffer_store_format_xy {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, dfmt:11, nfmt:4, 0 offen offset:16 glc slc
define amdgpu_vs void @test3(i32 %a1, i32 %vaddr) {
%vdata = insertelement <2 x i32> undef, i32 %a1, i32 0
- call void @llvm.SI.tbuffer.store.v2i32(<16 x i8> undef, <2 x i32> %vdata,
+ call void @llvm.SI.tbuffer.store.v2i32(<4 x i32> undef, <2 x i32> %vdata,
i32 2, i32 %vaddr, i32 0, i32 16, i32 11, i32 4, i32 1, i32 0, i32 1,
i32 1, i32 0)
ret void
@@ -64,12 +64,12 @@ define amdgpu_vs void @test3(i32 %a1, i32 %vaddr) {
;CHECK-LABEL: {{^}}test4:
;CHECK: tbuffer_store_format_x {{v[0-9]+}}, {{v[0-9]+}}, {{s\[[0-9]+:[0-9]+\]}}, dfmt:4, nfmt:4, 0 offen offset:8 glc slc
define amdgpu_vs void @test4(i32 %vdata, i32 %vaddr) {
- call void @llvm.SI.tbuffer.store.i32(<16 x i8> undef, i32 %vdata,
+ call void @llvm.SI.tbuffer.store.i32(<4 x i32> undef, i32 %vdata,
i32 1, i32 %vaddr, i32 0, i32 8, i32 4, i32 4, i32 1, i32 0, i32 1,
i32 1, i32 0)
ret void
}
-declare void @llvm.SI.tbuffer.store.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
-declare void @llvm.SI.tbuffer.store.v2i32(<16 x i8>, <2 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
-declare void @llvm.SI.tbuffer.store.v4i32(<16 x i8>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
+declare void @llvm.SI.tbuffer.store.i32(<4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
+declare void @llvm.SI.tbuffer.store.v2i32(<4 x i32>, <2 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
+declare void @llvm.SI.tbuffer.store.v4i32(<4 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
diff --git a/test/CodeGen/AMDGPU/misched-killflags.mir b/test/CodeGen/AMDGPU/misched-killflags.mir
new file mode 100644
index 000000000000..ac3a25e5e4b3
--- /dev/null
+++ b/test/CodeGen/AMDGPU/misched-killflags.mir
@@ -0,0 +1,45 @@
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs -run-pass=post-RA-sched -o - %s | FileCheck %s
+# Make sure ScheduleDAGInstrs::fixupKills does not produce invalid kill flags.
+---
+name: func0
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3
+
+ %sgpr33 = S_MOV_B32 %sgpr7
+ %sgpr32 = S_MOV_B32 %sgpr33
+ %sgpr10 = S_MOV_B32 5
+ %sgpr9 = S_MOV_B32 4
+ %sgpr8 = S_MOV_B32 3
+ BUNDLE implicit-def %sgpr6_sgpr7, implicit-def %sgpr6, implicit-def %sgpr7, implicit-def %scc {
+ %sgpr6_sgpr7 = S_GETPC_B64
+ %sgpr6 = S_ADD_U32 internal %sgpr6, 0, implicit-def %scc
+ %sgpr7 = S_ADDC_U32 internal %sgpr7,0, implicit-def %scc, implicit internal %scc
+ }
+ %sgpr4 = S_MOV_B32 %sgpr33
+ %vgpr0 = V_MOV_B32_e32 %sgpr8, implicit %exec, implicit-def %vgpr0_vgpr1_vgpr2_vgpr3, implicit %sgpr8_sgpr9_sgpr10_sgpr11
+ %vgpr1 = V_MOV_B32_e32 %sgpr9, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
+ %vgpr2 = V_MOV_B32_e32 %sgpr10, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
+ %vgpr3 = V_MOV_B32_e32 %sgpr11, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %exec
+ S_NOP 0, implicit killed %sgpr6_sgpr7, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr4, implicit killed %vgpr0_vgpr1_vgpr2_vgpr3
+ S_ENDPGM
+...
+# CHECK-LABEL: name: func0
+# CHECK: %sgpr10 = S_MOV_B32 5
+# CHECK: %sgpr9 = S_MOV_B32 4
+# CHECK: %sgpr8 = S_MOV_B32 3
+# CHECK: %sgpr33 = S_MOV_B32 killed %sgpr7
+# CHECK: %vgpr0 = V_MOV_B32_e32 %sgpr8, implicit %exec, implicit-def %vgpr0_vgpr1_vgpr2_vgpr3, implicit %sgpr8_sgpr9_sgpr10_sgpr11
+# CHECK: BUNDLE implicit-def %sgpr6_sgpr7, implicit-def %sgpr6, implicit-def %sgpr7, implicit-def %scc {
+# CHECK: %sgpr6_sgpr7 = S_GETPC_B64
+# CHECK: %sgpr6 = S_ADD_U32 internal %sgpr6, 0, implicit-def %scc
+# CHECK: %sgpr7 = S_ADDC_U32 internal %sgpr7, 0, implicit-def %scc, implicit internal %scc
+# CHECK: }
+# CHECK: %sgpr4 = S_MOV_B32 %sgpr33
+# CHECK: %vgpr1 = V_MOV_B32_e32 %sgpr9, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
+# CHECK: %vgpr2 = V_MOV_B32_e32 %sgpr10, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
+# CHECK: %vgpr3 = V_MOV_B32_e32 killed %sgpr11, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %exec
+# CHECK: %sgpr32 = S_MOV_B32 killed %sgpr33
+# CHECK: S_NOP 0, implicit killed %sgpr6_sgpr7, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr4, implicit killed %vgpr0_vgpr1_vgpr2_vgpr3
+# CHECK: S_ENDPGM
diff --git a/test/CodeGen/AMDGPU/mubuf.ll b/test/CodeGen/AMDGPU/mubuf.ll
index d883b87ec401..b23b21118aaa 100644
--- a/test/CodeGen/AMDGPU/mubuf.ll
+++ b/test/CodeGen/AMDGPU/mubuf.ll
@@ -55,14 +55,14 @@ entry:
; CHECK-LABEL: {{^}}soffset_max_imm:
; CHECK: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 64 offen glc
-define amdgpu_gs void @soffset_max_imm([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) {
+define amdgpu_gs void @soffset_max_imm([6 x <4 x i32>] addrspace(2)* byval, [17 x <4 x i32>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) {
main_body:
- %tmp0 = getelementptr [6 x <16 x i8>], [6 x <16 x i8>] addrspace(2)* %0, i32 0, i32 0
- %tmp1 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp0
+ %tmp0 = getelementptr [6 x <4 x i32>], [6 x <4 x i32>] addrspace(2)* %0, i32 0, i32 0
+ %tmp1 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp0
%tmp2 = shl i32 %6, 2
- %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp1, i32 %tmp2, i32 64, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
+ %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> %tmp1, i32 %tmp2, i32 64, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
%tmp4 = add i32 %6, 16
- %tmp1.4xi32 = bitcast <16 x i8> %tmp1 to <4 x i32>
+ %tmp1.4xi32 = bitcast <4 x i32> %tmp1 to <4 x i32>
call void @llvm.amdgcn.tbuffer.store.i32(i32 %tmp3, <4 x i32> %tmp1.4xi32, i32 0, i32 %tmp4, i32 %4, i32 0, i32 4, i32 4, i1 1, i1 1)
ret void
}
@@ -74,14 +74,14 @@ main_body:
; CHECK-LABEL: {{^}}soffset_no_fold:
; CHECK: s_movk_i32 [[SOFFSET:s[0-9]+]], 0x41
; CHECK: buffer_load_dword v{{[0-9+]}}, v{{[0-9+]}}, s[{{[0-9]+}}:{{[0-9]+}}], [[SOFFSET]] offen glc
-define amdgpu_gs void @soffset_no_fold([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) {
+define amdgpu_gs void @soffset_no_fold([6 x <4 x i32>] addrspace(2)* byval, [17 x <4 x i32>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) {
main_body:
- %tmp0 = getelementptr [6 x <16 x i8>], [6 x <16 x i8>] addrspace(2)* %0, i32 0, i32 0
- %tmp1 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp0
+ %tmp0 = getelementptr [6 x <4 x i32>], [6 x <4 x i32>] addrspace(2)* %0, i32 0, i32 0
+ %tmp1 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp0
%tmp2 = shl i32 %6, 2
- %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp1, i32 %tmp2, i32 65, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
+ %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> %tmp1, i32 %tmp2, i32 65, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
%tmp4 = add i32 %6, 16
- %tmp1.4xi32 = bitcast <16 x i8> %tmp1 to <4 x i32>
+ %tmp1.4xi32 = bitcast <4 x i32> %tmp1 to <4 x i32>
call void @llvm.amdgcn.tbuffer.store.i32(i32 %tmp3, <4 x i32> %tmp1.4xi32, i32 0, i32 %tmp4, i32 %4, i32 0, i32 4, i32 4, i1 1, i1 1)
ret void
}
@@ -176,7 +176,7 @@ define amdgpu_kernel void @store_vgpr_ptr(i32 addrspace(1)* %out) #0 {
ret void
}
-declare i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #0
+declare i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #0
declare void @llvm.amdgcn.tbuffer.store.i32(i32, <4 x i32>, i32, i32, i32, i32, i32, i32, i1, i1)
attributes #0 = { nounwind readonly }
diff --git a/test/CodeGen/AMDGPU/rename-independent-subregs-invalid-mac-operands.mir b/test/CodeGen/AMDGPU/rename-independent-subregs-invalid-mac-operands.mir
deleted file mode 100644
index 31024277871d..000000000000
--- a/test/CodeGen/AMDGPU/rename-independent-subregs-invalid-mac-operands.mir
+++ /dev/null
@@ -1,69 +0,0 @@
-# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass=simple-register-coalescing,rename-independent-subregs -o - %s | FileCheck -check-prefix=GCN %s
----
-
-# GCN-LABEL: name: mac_invalid_operands
-# GCN: undef %18.sub0 = V_MAC_F32_e32 undef %3, undef %9, undef %18.sub0, implicit %exec
-
-name: mac_invalid_operands
-alignment: 0
-exposesReturnsTwice: false
-legalized: false
-regBankSelected: false
-selected: false
-tracksRegLiveness: true
-registers:
- - { id: 0, class: vreg_128 }
- - { id: 1, class: vreg_128 }
- - { id: 2, class: sgpr_64 }
- - { id: 3, class: vgpr_32 }
- - { id: 4, class: vgpr_32 }
- - { id: 5, class: vgpr_32 }
- - { id: 6, class: vgpr_32 }
- - { id: 7, class: sreg_64 }
- - { id: 8, class: vgpr_32 }
- - { id: 9, class: vgpr_32 }
- - { id: 10, class: vreg_64 }
- - { id: 11, class: vreg_64 }
- - { id: 12, class: vreg_128 }
- - { id: 13, class: vreg_128 }
- - { id: 14, class: vgpr_32 }
- - { id: 15, class: vreg_64 }
- - { id: 16, class: vgpr_32 }
- - { id: 17, class: vreg_128 }
-body: |
- bb.0:
- successors: %bb.2, %bb.1
-
- %7 = V_CMP_NEQ_F32_e64 0, 0, 0, undef %3, 0, 0, implicit %exec
- %vcc = COPY killed %7
- S_CBRANCH_VCCZ %bb.2, implicit killed %vcc
-
- bb.1:
- successors: %bb.3
-
- %4 = V_ADD_F32_e32 undef %6, undef %5, implicit %exec
- undef %12.sub0 = COPY killed %4
- %17 = COPY killed %12
- S_BRANCH %bb.3
-
- bb.2:
- successors: %bb.3
-
- %8 = V_MAC_F32_e32 undef %3, undef %9, undef %8, implicit %exec
- undef %13.sub0 = COPY %8
- %13.sub1 = COPY %8
- %13.sub2 = COPY killed %8
- %0 = COPY killed %13
- %17 = COPY killed %0
-
- bb.3:
- %1 = COPY killed %17
- FLAT_STORE_DWORD undef %10, %1.sub2, 0, 0, 0, implicit %exec, implicit %flat_scr
- %14 = COPY %1.sub1
- %16 = COPY killed %1.sub0
- undef %15.sub0 = COPY killed %16
- %15.sub1 = COPY killed %14
- FLAT_STORE_DWORDX2 undef %11, killed %15, 0, 0, 0, implicit %exec, implicit %flat_scr
- S_ENDPGM
-
-...
diff --git a/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir b/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
new file mode 100644
index 000000000000..770bfaddb23e
--- /dev/null
+++ b/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
@@ -0,0 +1,155 @@
+# RUN: llc -march=amdgcn -verify-machineinstrs -run-pass=simple-register-coalescing,rename-independent-subregs -o - %s | FileCheck -check-prefix=GCN %s
+---
+
+# GCN-LABEL: name: mac_invalid_operands
+# GCN: undef %18.sub0 = V_MAC_F32_e32 undef %3, undef %9, undef %18.sub0, implicit %exec
+
+name: mac_invalid_operands
+alignment: 0
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vreg_128 }
+ - { id: 1, class: vreg_128 }
+ - { id: 2, class: sgpr_64 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: vgpr_32 }
+ - { id: 5, class: vgpr_32 }
+ - { id: 6, class: vgpr_32 }
+ - { id: 7, class: sreg_64 }
+ - { id: 8, class: vgpr_32 }
+ - { id: 9, class: vgpr_32 }
+ - { id: 10, class: vreg_64 }
+ - { id: 11, class: vreg_64 }
+ - { id: 12, class: vreg_128 }
+ - { id: 13, class: vreg_128 }
+ - { id: 14, class: vgpr_32 }
+ - { id: 15, class: vreg_64 }
+ - { id: 16, class: vgpr_32 }
+ - { id: 17, class: vreg_128 }
+body: |
+ bb.0:
+ successors: %bb.2, %bb.1
+
+ %7 = V_CMP_NEQ_F32_e64 0, 0, 0, undef %3, 0, 0, implicit %exec
+ %vcc = COPY killed %7
+ S_CBRANCH_VCCZ %bb.2, implicit killed %vcc
+
+ bb.1:
+ successors: %bb.3
+
+ %4 = V_ADD_F32_e32 undef %6, undef %5, implicit %exec
+ undef %12.sub0 = COPY killed %4
+ %17 = COPY killed %12
+ S_BRANCH %bb.3
+
+ bb.2:
+ successors: %bb.3
+
+ %8 = V_MAC_F32_e32 undef %3, undef %9, undef %8, implicit %exec
+ undef %13.sub0 = COPY %8
+ %13.sub1 = COPY %8
+ %13.sub2 = COPY killed %8
+ %0 = COPY killed %13
+ %17 = COPY killed %0
+
+ bb.3:
+ %1 = COPY killed %17
+ FLAT_STORE_DWORD undef %10, %1.sub2, 0, 0, 0, implicit %exec, implicit %flat_scr
+ %14 = COPY %1.sub1
+ %16 = COPY killed %1.sub0
+ undef %15.sub0 = COPY killed %16
+ %15.sub1 = COPY killed %14
+ FLAT_STORE_DWORDX2 undef %11, killed %15, 0, 0, 0, implicit %exec, implicit %flat_scr
+ S_ENDPGM
+
+...
+---
+# Make sure others uses after the mac are properly handled and not
+# left unreplaced due to iterator issues from substituteRegister.
+
+# GCN-LABEL: name: vreg_does_not_dominate
+
+# GCN: undef %8.sub1 = V_MAC_F32_e32 undef %2, undef %1, undef %8.sub1, implicit %exec
+# GCN: undef %7.sub0 = V_MOV_B32_e32 0, implicit %exec
+# GCN: undef %9.sub2 = COPY %7.sub0
+
+# GCN: undef %6.sub3 = V_ADD_F32_e32 undef %3, undef %3, implicit %exec
+# GCN: undef %7.sub0 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit %exec
+# GCN: %8.sub1 = V_ADD_F32_e32 %8.sub1, %8.sub1, implicit %exec
+
+# GCN: BUFFER_STORE_DWORD_OFFEN %6.sub3, %0,
+# GCN: BUFFER_STORE_DWORD_OFFEN %9.sub2, %0,
+# GCN: BUFFER_STORE_DWORD_OFFEN %8.sub1, %0,
+# GCN: BUFFER_STORE_DWORD_OFFEN %7.sub0, %0,
+name: vreg_does_not_dominate
+alignment: 0
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32, preferred-register: '' }
+ - { id: 1, class: vgpr_32, preferred-register: '' }
+ - { id: 2, class: vgpr_32, preferred-register: '' }
+ - { id: 3, class: vgpr_32, preferred-register: '' }
+ - { id: 4, class: vgpr_32, preferred-register: '' }
+ - { id: 5, class: sreg_64, preferred-register: '' }
+ - { id: 6, class: vreg_128, preferred-register: '' }
+liveins:
+ - { reg: '%vgpr0', virtual-reg: '%0' }
+ - { reg: '%sgpr30_sgpr31', virtual-reg: '%5' }
+body: |
+ bb.0:
+ successors: %bb.2, %bb.1
+ liveins: %vgpr0, %sgpr30_sgpr31, %sgpr5
+
+ %5 = COPY %sgpr30_sgpr31
+ %0 = COPY %vgpr0
+ undef %6.sub1 = V_MAC_F32_e32 undef %2, undef %1, undef %6.sub1, implicit %exec
+ %6.sub0 = V_MOV_B32_e32 0, implicit %exec
+ %6.sub2 = COPY %6.sub0
+ S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc
+ S_BRANCH %bb.1
+
+ bb.1:
+ successors: %bb.2
+
+ %6.sub3 = V_ADD_F32_e32 undef %3, undef %3, implicit %exec
+ %6.sub0 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit %exec
+ %6.sub1 = V_ADD_F32_e32 %6.sub1, %6.sub1, implicit %exec
+ %6.sub2 = COPY %6.sub0
+
+ bb.2:
+ BUFFER_STORE_DWORD_OFFEN %6.sub3, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 12, 0, 0, 0, implicit %exec
+ BUFFER_STORE_DWORD_OFFEN %6.sub2, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 8, 0, 0, 0, implicit %exec
+ BUFFER_STORE_DWORD_OFFEN %6.sub1, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 4, 0, 0, 0, implicit %exec
+ BUFFER_STORE_DWORD_OFFEN %6.sub0, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+ %sgpr30_sgpr31 = COPY %5
+ %sgpr5 = COPY %sgpr5
+ S_SETPC_B64_return %sgpr30_sgpr31, implicit %sgpr5
+
+...
+
+# GCN-LABEL: name: inf_loop_tied_operand
+# GCN: bb.0:
+# GCN-NEXT: undef %2.sub0 = V_MAC_F32_e32 1073741824, undef %0, undef %2.sub0, implicit %exec
+# GCN-NEXT: dead undef %3.sub1 = COPY %2.sub0
+
+name: inf_loop_tied_operand
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vgpr_32, preferred-register: '' }
+ - { id: 1, class: vgpr_32, preferred-register: '' }
+ - { id: 2, class: vreg_128, preferred-register: '' }
+body: |
+ bb.0:
+ %1 = V_MAC_F32_e32 1073741824, undef %0, undef %1, implicit %exec
+ undef %2.sub0 = COPY %1
+ %2.sub1 = COPY %1
+
+...
diff --git a/test/CodeGen/AMDGPU/ret_jump.ll b/test/CodeGen/AMDGPU/ret_jump.ll
index e7a05d94cdc4..1acae60f3057 100644
--- a/test/CodeGen/AMDGPU/ret_jump.ll
+++ b/test/CodeGen/AMDGPU/ret_jump.ll
@@ -23,7 +23,7 @@
; GCN-NEXT: [[RET_BB]]:
; GCN-NEXT: ; return
; GCN-NEXT: .Lfunc_end0
-define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @uniform_br_trivial_ret_divergent_br_trivial_unreachable([9 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <8 x i32>] addrspace(2)* byval %arg2, i32 addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, i32 inreg %arg17, i32 %arg18, i32 %arg19, float %arg20, i32 %arg21) #0 {
+define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @uniform_br_trivial_ret_divergent_br_trivial_unreachable([9 x <4 x i32>] addrspace(2)* byval %arg, [17 x <4 x i32>] addrspace(2)* byval %arg1, [17 x <8 x i32>] addrspace(2)* byval %arg2, i32 addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, i32 inreg %arg17, i32 %arg18, i32 %arg19, float %arg20, i32 %arg21) #0 {
entry:
%i.i = extractelement <2 x i32> %arg7, i32 0
%j.i = extractelement <2 x i32> %arg7, i32 1
@@ -75,7 +75,7 @@ ret.bb: ; preds = %else, %main_body
; GCN-NEXT: s_waitcnt
; GCN-NEXT: ; return
; GCN-NEXT: .Lfunc_end
-define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable([9 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <8 x i32>] addrspace(2)* byval %arg2, i32 addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, i32 inreg %arg18, i32 %arg19, float %arg20, i32 %arg21) #0 {
+define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable([9 x <4 x i32>] addrspace(2)* byval %arg, [17 x <4 x i32>] addrspace(2)* byval %arg1, [17 x <8 x i32>] addrspace(2)* byval %arg2, i32 addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, i32 inreg %arg18, i32 %arg19, float %arg20, i32 %arg21) #0 {
main_body:
%i.i = extractelement <2 x i32> %arg7, i32 0
%j.i = extractelement <2 x i32> %arg7, i32 1
@@ -119,9 +119,6 @@ declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #1
; Function Attrs: nounwind readnone
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
-
-; Function Attrs: nounwind readnone
declare float @llvm.fabs.f32(float) #1
; Function Attrs: nounwind readnone
diff --git a/test/CodeGen/AMDGPU/scheduler-subrange-crash.ll b/test/CodeGen/AMDGPU/scheduler-subrange-crash.ll
index 47e32724d9ca..5edc2c5c9b71 100644
--- a/test/CodeGen/AMDGPU/scheduler-subrange-crash.ll
+++ b/test/CodeGen/AMDGPU/scheduler-subrange-crash.ll
@@ -15,16 +15,16 @@ target triple = "amdgcn--"
define amdgpu_gs void @main(i32 inreg %arg) #0 {
main_body:
- %tmp = call float @llvm.SI.load.const(<16 x i8> undef, i32 20)
- %tmp1 = call float @llvm.SI.load.const(<16 x i8> undef, i32 24)
- %tmp2 = call float @llvm.SI.load.const(<16 x i8> undef, i32 48)
+ %tmp = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 20)
+ %tmp1 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 24)
+ %tmp2 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 48)
%array_vector3 = insertelement <4 x float> zeroinitializer, float %tmp2, i32 3
%array_vector5 = insertelement <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, float %tmp, i32 1
%array_vector6 = insertelement <4 x float> %array_vector5, float undef, i32 2
%array_vector9 = insertelement <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, float %tmp1, i32 1
%array_vector10 = insertelement <4 x float> %array_vector9, float 0.000000e+00, i32 2
%array_vector11 = insertelement <4 x float> %array_vector10, float undef, i32 3
- %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> undef, i32 undef, i32 4864, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
+ %tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> undef, i32 undef, i32 4864, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
call void @llvm.amdgcn.tbuffer.store.i32(i32 %tmp3, <4 x i32> undef, i32 0, i32 0, i32 %arg, i32 36, i32 4, i32 4, i1 1, i1 1)
%bc = bitcast <4 x float> %array_vector3 to <4 x i32>
%tmp4 = extractelement <4 x i32> %bc, i32 undef
@@ -45,8 +45,8 @@ main_body:
ret void
}
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
-declare i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8>, i32, i32, i32, i32, i32, i32, i32, i32) #2
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
+declare i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #2
declare void @llvm.amdgcn.tbuffer.store.i32(i32, <4 x i32>, i32, i32, i32, i32, i32, i32, i1, i1) #3
attributes #0 = { nounwind "target-cpu"="tonga" }
diff --git a/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir b/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
new file mode 100644
index 000000000000..4f5c582f8b58
--- /dev/null
+++ b/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
@@ -0,0 +1,446 @@
+# RUN: llc -march=amdgcn -mcpu=fiji -run-pass=si-peephole-sdwa -verify-machineinstrs -o - %s | FileCheck -check-prefix=VI -check-prefix=GFX89 -check-prefix=GCN %s
+# RUN: llc -march=amdgcn -mcpu=gfx900 -run-pass=si-peephole-sdwa -verify-machineinstrs -o - %s | FileCheck -check-prefix=GFX9 -check-prefix=GFX89 -check-prefix=GCN %s
+
+# GFX89-LABEL: {{^}}name: vop1_instructions
+
+# GFX89: %{{[0-9]+}} = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
+# GFX89: %{{[0-9]+}} = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
+# GFX89: %{{[0-9]+}} = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
+# GFX89: %{{[0-9]+}} = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
+# GFX89: %{{[0-9]+}} = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
+
+
+# GFX89: %{{[0-9]+}} = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 6, 0, 5, implicit %exec
+# GFX89: %{{[0-9]+}} = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
+# GFX89: %{{[0-9]+}} = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
+# GFX89: %{{[0-9]+}} = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
+# GFX89: %{{[0-9]+}} = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
+
+
+# VI: %{{[0-9]+}} = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
+# VI: %{{[0-9]+}} = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit %exec
+# VI: %{{[0-9]+}} = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
+# VI: %{{[0-9]+}} = V_CVT_F32_I32_e64 %{{[0-9]+}}, 0, 1, implicit %exec
+
+# GFX9: %{{[0-9]+}} = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
+# GFX9: %{{[0-9]+}} = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit %exec
+# GFX9: %{{[0-9]+}} = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
+# GFX9: %{{[0-9]+}} = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 1, 5, 0, 5, implicit %exec
+
+
+---
+name: vop1_instructions
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vreg_64 }
+ - { id: 1, class: vreg_64 }
+ - { id: 2, class: sreg_64 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_32_xm0 }
+ - { id: 5, class: sreg_32_xm0 }
+ - { id: 6, class: sreg_32_xm0 }
+ - { id: 7, class: sreg_32_xm0 }
+ - { id: 8, class: sreg_32 }
+ - { id: 9, class: vgpr_32 }
+ - { id: 10, class: vgpr_32 }
+ - { id: 11, class: vgpr_32 }
+ - { id: 12, class: vgpr_32 }
+ - { id: 13, class: vgpr_32 }
+ - { id: 14, class: vgpr_32 }
+ - { id: 15, class: vgpr_32 }
+ - { id: 16, class: vgpr_32 }
+ - { id: 17, class: vgpr_32 }
+ - { id: 18, class: vgpr_32 }
+ - { id: 19, class: vgpr_32 }
+ - { id: 20, class: vgpr_32 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vgpr_32 }
+ - { id: 23, class: vgpr_32 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vgpr_32 }
+ - { id: 26, class: vgpr_32 }
+ - { id: 27, class: vgpr_32 }
+ - { id: 28, class: vgpr_32 }
+ - { id: 29, class: vgpr_32 }
+ - { id: 30, class: vgpr_32 }
+ - { id: 31, class: vgpr_32 }
+ - { id: 32, class: vgpr_32 }
+ - { id: 33, class: vgpr_32 }
+ - { id: 34, class: vgpr_32 }
+ - { id: 35, class: vgpr_32 }
+ - { id: 36, class: vgpr_32 }
+ - { id: 37, class: vgpr_32 }
+ - { id: 38, class: vgpr_32 }
+ - { id: 39, class: vgpr_32 }
+ - { id: 40, class: vgpr_32 }
+ - { id: 41, class: vgpr_32 }
+ - { id: 42, class: vgpr_32 }
+ - { id: 43, class: vgpr_32 }
+ - { id: 44, class: vgpr_32 }
+ - { id: 45, class: vgpr_32 }
+ - { id: 46, class: vgpr_32 }
+ - { id: 47, class: vgpr_32 }
+ - { id: 48, class: vgpr_32 }
+ - { id: 100, class: vgpr_32 }
+body: |
+ bb.0:
+ liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+
+ %2 = COPY %sgpr30_sgpr31
+ %1 = COPY %vgpr2_vgpr3
+ %0 = COPY %vgpr0_vgpr1
+ %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+
+ %5 = S_MOV_B32 65535
+ %6 = S_MOV_B32 65535
+
+ %10 = V_LSHRREV_B32_e64 16, %3, implicit %exec
+ %11 = V_MOV_B32_e32 %10, implicit %exec
+ %12 = V_LSHLREV_B32_e64 16, %11, implicit %exec
+ %14 = V_FRACT_F32_e32 123, implicit %exec
+ %15 = V_LSHLREV_B32_e64 16, %14, implicit %exec
+ %16 = V_LSHRREV_B32_e64 16, %15, implicit %exec
+ %17 = V_SIN_F32_e32 %16, implicit %exec
+ %18 = V_LSHLREV_B32_e64 16, %17, implicit %exec
+ %19 = V_LSHRREV_B32_e64 16, %18, implicit %exec
+ %20 = V_CVT_U32_F32_e32 %19, implicit %exec
+ %21 = V_LSHLREV_B32_e64 16, %20, implicit %exec
+ %23 = V_CVT_F32_I32_e32 123, implicit %exec
+ %24 = V_LSHLREV_B32_e64 16, %23, implicit %exec
+
+ %25 = V_LSHRREV_B32_e64 16, %3, implicit %exec
+ %26 = V_MOV_B32_e64 %25, implicit %exec
+ %26 = V_LSHLREV_B32_e64 16, %26, implicit %exec
+ %27 = V_FRACT_F32_e64 0, %6, 0, 0, implicit %exec
+ %28 = V_LSHLREV_B32_e64 16, %27, implicit %exec
+ %29 = V_LSHRREV_B32_e64 16, %28, implicit %exec
+ %30 = V_SIN_F32_e64 0, %29, 0, 0, implicit %exec
+ %31 = V_LSHLREV_B32_e64 16, %30, implicit %exec
+ %32 = V_LSHRREV_B32_e64 16, %31, implicit %exec
+ %33 = V_CVT_U32_F32_e64 0, %32, 0, 0, implicit %exec
+ %34 = V_LSHLREV_B32_e64 16, %33, implicit %exec
+ %35 = V_CVT_F32_I32_e64 %6, 0, 0, implicit %exec
+ %36 = V_LSHLREV_B32_e64 16, %35, implicit %exec
+
+
+ %37 = V_LSHRREV_B32_e64 16, %36, implicit %exec
+ %38 = V_FRACT_F32_e64 1, %37, 0, 0, implicit %exec
+ %39 = V_LSHLREV_B32_e64 16, %38, implicit %exec
+ %40 = V_LSHRREV_B32_e64 16, %39, implicit %exec
+ %41 = V_SIN_F32_e64 0, %40, 1, 0, implicit %exec
+ %42 = V_LSHLREV_B32_e64 16, %41, implicit %exec
+ %43 = V_LSHRREV_B32_e64 16, %42, implicit %exec
+ %44 = V_CVT_U32_F32_e64 1, %43, 0, 0, implicit %exec
+ %45 = V_LSHLREV_B32_e64 16, %44, implicit %exec
+ %46 = V_LSHRREV_B32_e64 16, %45, implicit %exec
+ %47 = V_CVT_F32_I32_e64 %46, 0, 1, implicit %exec
+ %48 = V_LSHLREV_B32_e64 16, %47, implicit %exec
+
+
+ %100 = V_MOV_B32_e32 %48, implicit %exec
+
+ FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
+ %sgpr30_sgpr31 = COPY %2
+ S_SETPC_B64_return %sgpr30_sgpr31
+
+...
+---
+# GCN-LABEL: {{^}}name: vop2_instructions
+
+
+# VI: %{{[0-9]+}} = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit %exec
+# VI: %{{[0-9]+}} = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
+
+# GFX9: %{{[0-9]+}} = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit %exec
+# GFX9: %{{[0-9]+}} = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
+# GFX9: %{{[0-9]+}} = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MAC_F16_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit %exec
+
+
+# VI: %{{[0-9]+}} = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec
+# VI: %{{[0-9]+}} = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
+
+# GFX9: %{{[0-9]+}} = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec
+# GFX9: %{{[0-9]+}} = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
+# GFX9: %{{[0-9]+}} = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MAC_F32_e64 0, 23, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MAC_F16_e64 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit %exec
+
+
+# VI: %{{[0-9]+}} = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_MAC_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, %{{[0-9]+}}, 1, 0, 6, 0, 6, 1, implicit %exec
+# VI: %{{[0-9]+}} = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit %exec
+
+# GFX9: %{{[0-9]+}} = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
+# GFX9: %{{[0-9]+}} = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MAC_F32_e64 1, 23, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 0, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit %exec
+
+name: vop2_instructions
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vreg_64 }
+ - { id: 1, class: vreg_64 }
+ - { id: 2, class: sreg_64 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_32_xm0 }
+ - { id: 5, class: sreg_32_xm0 }
+ - { id: 6, class: sreg_32_xm0 }
+ - { id: 7, class: sreg_32_xm0 }
+ - { id: 8, class: sreg_32 }
+ - { id: 9, class: vgpr_32 }
+ - { id: 10, class: vgpr_32 }
+ - { id: 11, class: vgpr_32 }
+ - { id: 12, class: vgpr_32 }
+ - { id: 13, class: vgpr_32 }
+ - { id: 14, class: vgpr_32 }
+ - { id: 15, class: vgpr_32 }
+ - { id: 16, class: vgpr_32 }
+ - { id: 17, class: vgpr_32 }
+ - { id: 18, class: vgpr_32 }
+ - { id: 19, class: vgpr_32 }
+ - { id: 20, class: vgpr_32 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vgpr_32 }
+ - { id: 23, class: vgpr_32 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vgpr_32 }
+ - { id: 26, class: vgpr_32 }
+ - { id: 27, class: vgpr_32 }
+ - { id: 28, class: vgpr_32 }
+ - { id: 29, class: vgpr_32 }
+ - { id: 30, class: vgpr_32 }
+ - { id: 31, class: vgpr_32 }
+ - { id: 32, class: vgpr_32 }
+ - { id: 33, class: vgpr_32 }
+ - { id: 34, class: vgpr_32 }
+ - { id: 35, class: vgpr_32 }
+ - { id: 36, class: vgpr_32 }
+ - { id: 37, class: vgpr_32 }
+ - { id: 38, class: vgpr_32 }
+ - { id: 39, class: vgpr_32 }
+ - { id: 40, class: vgpr_32 }
+ - { id: 41, class: vgpr_32 }
+ - { id: 42, class: vgpr_32 }
+ - { id: 43, class: vgpr_32 }
+ - { id: 44, class: vgpr_32 }
+ - { id: 45, class: vgpr_32 }
+ - { id: 46, class: vgpr_32 }
+ - { id: 47, class: vgpr_32 }
+ - { id: 48, class: vgpr_32 }
+ - { id: 49, class: vgpr_32 }
+ - { id: 50, class: vgpr_32 }
+ - { id: 51, class: vgpr_32 }
+ - { id: 52, class: vgpr_32 }
+ - { id: 53, class: vgpr_32 }
+ - { id: 54, class: vgpr_32 }
+ - { id: 55, class: vgpr_32 }
+ - { id: 56, class: vgpr_32 }
+ - { id: 57, class: vgpr_32 }
+ - { id: 58, class: vgpr_32 }
+ - { id: 59, class: vgpr_32 }
+ - { id: 60, class: vgpr_32 }
+ - { id: 100, class: vgpr_32 }
+body: |
+ bb.0:
+ liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+
+ %2 = COPY %sgpr30_sgpr31
+ %1 = COPY %vgpr2_vgpr3
+ %0 = COPY %vgpr0_vgpr1
+ %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+
+ %5 = S_MOV_B32 65535
+ %6 = S_MOV_B32 65535
+
+ %11 = V_LSHRREV_B32_e64 16, %3, implicit %exec
+ %12 = V_AND_B32_e32 %6, %11, implicit %exec
+ %13 = V_LSHLREV_B32_e64 16, %12, implicit %exec
+ %14 = V_LSHRREV_B32_e64 16, %13, implicit %exec
+ %15 = V_BFE_U32 %13, 8, 8, implicit %exec
+ %16 = V_ADD_F32_e32 %14, %15, implicit %exec
+ %17 = V_LSHLREV_B32_e64 16, %16, implicit %exec
+ %18 = V_LSHRREV_B32_e64 16, %17, implicit %exec
+ %19 = V_BFE_U32 %17, 8, 8, implicit %exec
+ %20 = V_SUB_F16_e32 %18, %19, implicit %exec
+ %21 = V_LSHLREV_B32_e64 16, %20, implicit %exec
+ %22 = V_BFE_U32 %20, 8, 8, implicit %exec
+ %23 = V_MAC_F32_e32 %21, %22, %22, implicit %exec
+ %24 = V_LSHLREV_B32_e64 16, %23, implicit %exec
+ %25 = V_LSHRREV_B32_e64 16, %24, implicit %exec
+ %26 = V_BFE_U32 %24, 8, 8, implicit %exec
+ %27 = V_MAC_F16_e32 %25, %26, %26, implicit %exec
+ %28 = V_LSHLREV_B32_e64 16, %27, implicit %exec
+
+ %29 = V_LSHRREV_B32_e64 16, %28, implicit %exec
+ %30 = V_AND_B32_e64 23, %29, implicit %exec
+ %31 = V_LSHLREV_B32_e64 16, %30, implicit %exec
+ %32 = V_LSHRREV_B32_e64 16, %31, implicit %exec
+ %33 = V_BFE_U32 %31, 8, 8, implicit %exec
+ %34 = V_ADD_F32_e64 0, %32, 0, %33, 0, 0, implicit %exec
+ %35 = V_LSHLREV_B32_e64 16, %34, implicit %exec
+ %37 = V_BFE_U32 %35, 8, 8, implicit %exec
+ %38 = V_SUB_F16_e64 0, 23, 0, %37, 0, 0, implicit %exec
+ %39 = V_LSHLREV_B32_e64 16, %38, implicit %exec
+ %40 = V_BFE_U32 %39, 8, 8, implicit %exec
+ %41 = V_MAC_F32_e64 0, 23, 0, %40, 0, %40, 0, 0, implicit %exec
+ %42 = V_LSHLREV_B32_e64 16, %41, implicit %exec
+ %43 = V_LSHRREV_B32_e64 16, %42, implicit %exec
+ %44 = V_BFE_U32 %42, 8, 8, implicit %exec
+ %45 = V_MAC_F16_e64 0, %43, 0, %44, 0, %44, 0, 0, implicit %exec
+ %46 = V_LSHLREV_B32_e64 16, %45, implicit %exec
+
+ %47 = V_LSHRREV_B32_e64 16, %46, implicit %exec
+ %48 = V_BFE_U32 %46, 8, 8, implicit %exec
+ %49 = V_ADD_F32_e64 0, %47, 1, %48, 0, 0, implicit %exec
+ %50 = V_LSHLREV_B32_e64 16, %49, implicit %exec
+ %51 = V_BFE_U32 %50, 8, 8, implicit %exec
+ %52 = V_SUB_F16_e64 1, 23, 1, %51, 0, 0, implicit %exec
+ %53 = V_LSHLREV_B32_e64 16, %52, implicit %exec
+ %54 = V_BFE_U32 %53, 8, 8, implicit %exec
+ %55 = V_MAC_F32_e64 1, 23, 1, %54, 1, %54, 1, 0, implicit %exec
+ %56 = V_LSHLREV_B32_e64 16, %55, implicit %exec
+ %57 = V_LSHRREV_B32_e64 16, %56, implicit %exec
+ %58 = V_BFE_U32 %56, 8, 8, implicit %exec
+ %59 = V_MAC_F16_e64 1, %57, 1, %58, 1, %58, 0, 2, implicit %exec
+ %60 = V_LSHLREV_B32_e64 16, %59, implicit %exec
+
+ %100 = V_MOV_B32_e32 %60, implicit %exec
+
+ FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
+ %sgpr30_sgpr31 = COPY %2
+ S_SETPC_B64_return %sgpr30_sgpr31
+
+...
+---
+
+# GCN-LABEL: {{^}}name: vopc_instructions
+
+# GFX89: %{{[0-9]+}} = V_MOV_B32_e32 123, implicit %exec
+# GFX89: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
+# GFX89: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# GFX89: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
+# GFX89: %vcc = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+
+
+# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
+# VI: %{{[0-9]+}} = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, 0, implicit-def %exec, implicit %exec
+# VI: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %3, 0, 6, 4, implicit-def %vcc, implicit %exec
+# VI: %{{[0-9]+}} = V_CMPX_EQ_I32_e64 23, killed %{{[0-9]+}}, implicit-def %exec, implicit %exec
+
+# GFX9: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MOV_B32_e32 23, implicit %exec
+# GFX9: %{{[0-9]+}} = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# GFX9: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
+# GFX9: %{{[0-9]+}} = V_MOV_B32_e32 23, implicit %exec
+# GFX9: %{{[0-9]+}} = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+
+
+# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def %vcc, implicit %exec
+# VI: %vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, 2, implicit-def %exec, implicit %exec
+# VI: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, 2, implicit %exec
+# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# VI: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# VI: %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, 2, implicit-def %exec, implicit %exec
+
+# GFX9: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, 0, implicit %exec
+# GFX9: %vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, 2, implicit-def %exec, implicit %exec
+# GFX9: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, 2, implicit %exec
+# GFX9: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# GFX9: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# GFX9: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# GFX9: %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, 2, implicit-def %exec, implicit %exec
+
+
+name: vopc_instructions
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vreg_64 }
+ - { id: 1, class: vreg_64 }
+ - { id: 2, class: sreg_64 }
+ - { id: 3, class: vgpr_32 }
+ - { id: 4, class: sreg_32_xm0 }
+ - { id: 5, class: sreg_32_xm0 }
+ - { id: 6, class: sreg_32_xm0 }
+ - { id: 7, class: sreg_32_xm0 }
+ - { id: 8, class: sreg_32 }
+ - { id: 9, class: vgpr_32 }
+ - { id: 10, class: vgpr_32 }
+ - { id: 11, class: vgpr_32 }
+ - { id: 12, class: vgpr_32 }
+ - { id: 13, class: vgpr_32 }
+ - { id: 14, class: vgpr_32 }
+ - { id: 15, class: vgpr_32 }
+ - { id: 16, class: vgpr_32 }
+ - { id: 17, class: vgpr_32 }
+ - { id: 18, class: sreg_64 }
+ - { id: 19, class: sreg_64 }
+ - { id: 20, class: vgpr_32 }
+ - { id: 21, class: vgpr_32 }
+ - { id: 22, class: vgpr_32 }
+ - { id: 23, class: vgpr_32 }
+ - { id: 24, class: vgpr_32 }
+ - { id: 25, class: vgpr_32 }
+ - { id: 26, class: vgpr_32 }
+ - { id: 27, class: vgpr_32 }
+ - { id: 100, class: vgpr_32 }
+body: |
+ bb.0:
+ liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+
+ %2 = COPY %sgpr30_sgpr31
+ %1 = COPY %vgpr2_vgpr3
+ %0 = COPY %vgpr0_vgpr1
+ %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+
+ %5 = S_MOV_B32 65535
+ %6 = S_MOV_B32 65535
+
+ %10 = V_AND_B32_e64 %5, %3, implicit %exec
+ V_CMP_EQ_F32_e32 123, killed %10, implicit-def %vcc, implicit %exec
+ %11 = V_AND_B32_e64 %5, %3, implicit %exec
+ V_CMPX_GT_F32_e32 123, killed %11, implicit-def %vcc, implicit-def %exec, implicit %exec
+ %12 = V_AND_B32_e64 %5, %3, implicit %exec
+ V_CMP_LT_I32_e32 123, killed %12, implicit-def %vcc, implicit %exec
+ %13 = V_AND_B32_e64 %5, %3, implicit %exec
+ V_CMPX_EQ_I32_e32 123, killed %13, implicit-def %vcc, implicit-def %exec, implicit %exec
+
+ %14 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %14, 0, 0, implicit %exec
+ %15 = V_AND_B32_e64 %5, %3, implicit %exec
+ %18 = V_CMPX_GT_F32_e64 0, 23, 0, killed %15, 0, 0, implicit-def %exec, implicit %exec
+ %16 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMP_LT_I32_e64 %6, killed %16, implicit %exec
+ %17 = V_AND_B32_e64 %5, %3, implicit %exec
+ %19 = V_CMPX_EQ_I32_e64 23, killed %17, implicit-def %exec, implicit %exec
+
+ %20 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %20, 1, 0, implicit %exec
+ %21 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %21, 0, 2, implicit-def %exec, implicit %exec
+ %23 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %23, 1, 2, implicit %exec
+ %24 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 1, 23, 0, killed %24, 0, 0, implicit-def %exec, implicit %exec
+ %25 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 0, 23, 1, killed %25, 0, 0, implicit-def %exec, implicit %exec
+ %26 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %26, 0, 0, implicit-def %exec, implicit %exec
+ %27 = V_AND_B32_e64 %5, %3, implicit %exec
+ %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %27, 1, 2, implicit-def %exec, implicit %exec
+
+
+ %100 = V_MOV_B32_e32 %vcc_lo, implicit %exec
+
+ FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
+ %sgpr30_sgpr31 = COPY %2
+ S_SETPC_B64_return %sgpr30_sgpr31
diff --git a/test/CodeGen/AMDGPU/select-vectors.ll b/test/CodeGen/AMDGPU/select-vectors.ll
index 4b00a48211ec..ebbc675b2bab 100644
--- a/test/CodeGen/AMDGPU/select-vectors.ll
+++ b/test/CodeGen/AMDGPU/select-vectors.ll
@@ -66,7 +66,7 @@ define amdgpu_kernel void @v_select_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8
}
; GCN-LABEL: {{^}}select_v4i8:
-; GCN: v_cndmask_b32_e32
+; GCN: v_cndmask_b32
; GCN-NOT: cndmask
define amdgpu_kernel void @select_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, <4 x i8> %b, i8 %c) #0 {
%cmp = icmp eq i8 %c, 0
diff --git a/test/CodeGen/AMDGPU/setcc-sext.ll b/test/CodeGen/AMDGPU/setcc-sext.ll
new file mode 100644
index 000000000000..eadce225e350
--- /dev/null
+++ b/test/CodeGen/AMDGPU/setcc-sext.ll
@@ -0,0 +1,292 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}setcc_sgt_true_sext:
+; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_sgt_true_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp sgt i32 %ext, -1
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_sgt_true_sext_swap:
+; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_sgt_true_sext_swap(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp slt i32 -1, %ext
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_ne_true_sext:
+; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_ne_true_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp ne i32 %ext, -1
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_ult_true_sext:
+; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_ult_true_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp ult i32 %ext, -1
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_eq_true_sext:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_eq_true_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp eq i32 %ext, -1
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_sle_true_sext:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_sle_true_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp sle i32 %ext, -1
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_uge_true_sext:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_uge_true_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp uge i32 %ext, -1
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_eq_false_sext:
+; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_eq_false_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp eq i32 %ext, 0
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_sge_false_sext:
+; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_sge_false_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp sge i32 %ext, 0
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_ule_false_sext:
+; GCN: v_cmp_le_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_ule_false_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp ule i32 %ext, 0
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+; GCN-LABEL: {{^}}setcc_ne_false_sext:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_ne_false_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp ne i32 %ext, 0
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+; GCN-LABEL: {{^}}setcc_ugt_false_sext:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_ugt_false_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp ugt i32 %ext, 0
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+; GCN-LABEL: {{^}}setcc_slt_false_sext:
+; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN-NEXT: s_and_saveexec_b64 {{[^,]+}}, [[CC]]
+; GCN-NOT: v_cndmask_
+
+define amdgpu_kernel void @setcc_slt_false_sext(i32 addrspace(1)* nocapture %arg) {
+bb:
+ %x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %y = tail call i32 @llvm.amdgcn.workitem.id.y()
+ %cmp = icmp ugt i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ %cond = icmp slt i32 %ext, 0
+ br i1 %cond, label %then, label %endif
+
+then:
+ store i32 1, i32 addrspace(1)* %arg, align 4
+ br label %endif
+
+endif:
+ ret void
+}
+
+
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+declare i32 @llvm.amdgcn.workitem.id.y() #0
+
+attributes #0 = { nounwind readnone speculatable }
diff --git a/test/CodeGen/AMDGPU/sgpr-copy.ll b/test/CodeGen/AMDGPU/sgpr-copy.ll
index 5c20e9a8d585..931051102cd5 100644
--- a/test/CodeGen/AMDGPU/sgpr-copy.ll
+++ b/test/CodeGen/AMDGPU/sgpr-copy.ll
@@ -4,13 +4,13 @@
; CHECK-LABEL: {{^}}phi1:
; CHECK: s_buffer_load_dword [[DST:s[0-9]]], {{s\[[0-9]+:[0-9]+\]}}, 0x0
; CHECK: v_mov_b32_e32 v{{[0-9]}}, [[DST]]
-define amdgpu_ps void @phi1(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @phi1(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 0)
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 16)
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 32)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 0)
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 16)
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 32)
%tmp24 = fptosi float %tmp22 to i32
%tmp25 = icmp ne i32 %tmp24, 0
br i1 %tmp25, label %ENDIF, label %ELSE
@@ -28,29 +28,29 @@ ENDIF: ; preds = %ELSE, %main_body
; Make sure this program doesn't crash
; CHECK-LABEL: {{^}}phi2:
-define amdgpu_ps void @phi2(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #1 {
+define amdgpu_ps void @phi2(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #1 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 16)
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 32)
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 36)
- %tmp24 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 40)
- %tmp25 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 48)
- %tmp26 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 52)
- %tmp27 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 56)
- %tmp28 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 64)
- %tmp29 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 68)
- %tmp30 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 72)
- %tmp31 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 76)
- %tmp32 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 80)
- %tmp33 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 84)
- %tmp34 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 88)
- %tmp35 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 92)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 16)
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 32)
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 36)
+ %tmp24 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 40)
+ %tmp25 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 48)
+ %tmp26 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 52)
+ %tmp27 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 56)
+ %tmp28 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 64)
+ %tmp29 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 68)
+ %tmp30 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 72)
+ %tmp31 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 76)
+ %tmp32 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 80)
+ %tmp33 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 84)
+ %tmp34 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 88)
+ %tmp35 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 92)
%tmp36 = getelementptr <8 x i32>, <8 x i32> addrspace(2)* %arg2, i32 0
%tmp37 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp36, !tbaa !0
- %tmp38 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg1, i32 0
- %tmp39 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp38, !tbaa !0
+ %tmp38 = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg1, i32 0
+ %tmp39 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp38, !tbaa !0
%i.i = extractelement <2 x i32> %arg5, i32 0
%j.i = extractelement <2 x i32> %arg5, i32 1
%i.f.i = bitcast i32 %i.i to float
@@ -85,7 +85,7 @@ main_body:
%tmp46 = bitcast float %p2.i24 to i32
%tmp47 = insertelement <2 x i32> undef, i32 %tmp45, i32 0
%tmp48 = insertelement <2 x i32> %tmp47, i32 %tmp46, i32 1
- %tmp39.bc = bitcast <16 x i8> %tmp39 to <4 x i32>
+ %tmp39.bc = bitcast <4 x i32> %tmp39 to <4 x i32>
%a.bc.i = bitcast <2 x i32> %tmp48 to <2 x float>
%tmp1 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i, <8 x i32> %tmp37, <4 x i32> %tmp39.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp50 = extractelement <4 x float> %tmp1, i32 2
@@ -173,14 +173,14 @@ ENDIF24: ; preds = %IF25, %ENDIF
; We just want ot make sure the program doesn't crash
; CHECK-LABEL: {{^}}loop:
-define amdgpu_ps void @loop(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @loop(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 0)
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 4)
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 8)
- %tmp24 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 12)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 0)
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 4)
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 8)
+ %tmp24 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 12)
%tmp25 = fptosi float %tmp24 to i32
%tmp26 = bitcast i32 %tmp25 to float
%tmp27 = bitcast float %tmp26 to i32
@@ -226,17 +226,17 @@ ENDIF: ; preds = %LOOP
; CHECK: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[SAMPLE_LO]]:[[SAMPLE_HI]]{{\]}}
; CHECK: exp
; CHECK: s_endpgm
-define amdgpu_ps void @sample_v3([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
+define amdgpu_ps void @sample_v3([17 x <4 x i32>] addrspace(2)* byval %arg, [32 x <4 x i32>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
entry:
- %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg, i64 0, i32 0
- %tmp21 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 16)
+ %tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(2)* %arg, i64 0, i32 0
+ %tmp21 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 16)
%tmp23 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 0
%tmp24 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp23, !tbaa !0
- %tmp25 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 0
- %tmp26 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp25, !tbaa !0
+ %tmp25 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 0
+ %tmp26 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp25, !tbaa !0
%tmp27 = fcmp oeq float %tmp22, 0.000000e+00
- %tmp26.bc = bitcast <16 x i8> %tmp26 to <4 x i32>
+ %tmp26.bc = bitcast <4 x i32> %tmp26 to <4 x i32>
br i1 %tmp27, label %if, label %else
if: ; preds = %entry
@@ -290,7 +290,7 @@ endif: ; preds = %if1, %if0, %entry
; This test is just checking that we don't crash / assertion fail.
; CHECK-LABEL: {{^}}copy2:
; CHECK: s_endpgm
-define amdgpu_ps void @copy2([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
+define amdgpu_ps void @copy2([17 x <4 x i32>] addrspace(2)* byval %arg, [32 x <4 x i32>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
entry:
br label %LOOP68
@@ -326,11 +326,11 @@ ENDIF69: ; preds = %LOOP68
; [[END]]:
; CHECK: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+}}:[[ADD]]{{\]}}
; CHECK: s_endpgm
-define amdgpu_ps void @sample_rsrc([6 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <4 x i32>] addrspace(2)* byval %arg2, [32 x <8 x i32>] addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 {
+define amdgpu_ps void @sample_rsrc([6 x <4 x i32>] addrspace(2)* byval %arg, [17 x <4 x i32>] addrspace(2)* byval %arg1, [16 x <4 x i32>] addrspace(2)* byval %arg2, [32 x <8 x i32>] addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 {
bb:
- %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg1, i32 0, i32 0
- %tmp22 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !3
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp22, i32 16)
+ %tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(2)* %arg1, i32 0, i32 0
+ %tmp22 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !3
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp22, i32 16)
%tmp25 = getelementptr [32 x <8 x i32>], [32 x <8 x i32>] addrspace(2)* %arg3, i32 0, i32 0
%tmp26 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp25, !tbaa !3
%tmp27 = getelementptr [16 x <4 x i32>], [16 x <4 x i32>] addrspace(2)* %arg2, i32 0, i32 0
@@ -420,7 +420,7 @@ declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1)
declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll b/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll
index a6026785b173..c70eb9b9c4a5 100644
--- a/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll
+++ b/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll
@@ -151,10 +151,11 @@ define amdgpu_kernel void @v_uextract_bit_1_31_i64(i64 addrspace(1)* %out, i64 a
ret void
}
-; Spans the dword boundary, so requires full shift
+; Spans the dword boundary, so requires full shift.
+; Truncated after the shift, so only low shift result is used.
; GCN-LABEL: {{^}}v_uextract_bit_31_32_i64:
-; GCN: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
-; GCN: v_lshr_b64 v{{\[}}[[SHRLO:[0-9]+]]:[[SHRHI:[0-9]+]]{{\]}}, [[VAL]], 31
+; GCN: buffer_load_dwordx2 v{{\[}}[[VALLO:[0-9]+]]:[[VALHI:[0-9]+]]{{\]}}
+; GCN: v_alignbit_b32 v[[SHRLO:[0-9]+]], v[[VALHI]], v[[VALLO]], 31
; GCN-DAG: v_and_b32_e32 v[[AND:[0-9]+]], 3, v[[SHRLO]]{{$}}
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
; GCN: buffer_store_dwordx2 v{{\[}}[[AND]]:[[ZERO]]{{\]}}
@@ -188,8 +189,8 @@ define amdgpu_kernel void @v_uextract_bit_32_33_i64(i64 addrspace(1)* %out, i64
; GCN-LABEL: {{^}}v_uextract_bit_30_60_i64:
; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
-; GCN: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
-; GCN: v_lshr_b64 v{{\[}}[[SHRLO:[0-9]+]]:[[SHRHI:[0-9]+]]{{\]}}, [[VAL]], 30
+; GCN: buffer_load_dwordx2 v{{\[}}[[VALLO:[0-9]+]]:[[VALHI:[0-9]+]]{{\]}}
+; GCN: v_alignbit_b32 v[[SHRLO:[0-9]+]], v[[VALHI]], v[[VALLO]], 30
; GCN-DAG: v_and_b32_e32 v[[AND:[0-9]+]], 0x3fffffff, v[[SHRLO]]{{$}}
; GCN-DAG: v_mov_b32_e32 v[[ZERO1:[0-9]+]], v[[ZERO]]
; GCN: buffer_store_dwordx2 v{{\[}}[[AND]]:[[ZERO1]]{{\]}}
@@ -223,10 +224,9 @@ define amdgpu_kernel void @v_uextract_bit_33_63_i64(i64 addrspace(1)* %out, i64
; GCN-LABEL: {{^}}v_uextract_bit_31_63_i64:
; GCN: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
-; GCN: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
-; GCN: v_lshr_b64 v{{\[}}[[SHRLO:[0-9]+]]:[[SHRHI:[0-9]+]]{{\]}}, [[VAL]], 31
-; GCN-NEXT: v_mov_b32_e32 v[[SHRHI]], v[[ZERO]]
-; GCN: buffer_store_dwordx2 v{{\[}}[[SHRLO]]:[[SHRHI]]{{\]}}
+; GCN: buffer_load_dwordx2 v{{\[}}[[VALLO:[0-9]+]]:[[VALHI:[0-9]+]]{{\]}}
+; GCN: v_alignbit_b32 v[[SHRLO:[0-9]+]], v[[VALHI]], v[[VALLO]], 31
+; GCN: buffer_store_dwordx2 v{{\[}}[[SHRLO]]:[[ZERO]]{{\]}}
define amdgpu_kernel void @v_uextract_bit_31_63_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #1 {
%id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x
@@ -288,8 +288,8 @@ define amdgpu_kernel void @v_uextract_bit_33_i64_trunc_i32(i32 addrspace(1)* %ou
}
; GCN-LABEL: {{^}}v_uextract_bit_31_32_i64_trunc_i32:
-; GCN: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
-; GCN: v_lshr_b64 v{{\[}}[[SHRLO:[0-9]+]]:[[SHRHI:[0-9]+]]{{\]}}, [[VAL]], 31
+; GCN: buffer_load_dwordx2 v{{\[}}[[VALLO:[0-9]+]]:[[VALHI:[0-9]+]]{{\]}}
+; GCN: v_alignbit_b32 v[[SHRLO:[0-9]+]], v[[VALHI]], v[[VALLO]], 31
; GCN-NEXT: v_and_b32_e32 v[[SHRLO]], 3, v[[SHRLO]]
; GCN-NOT: v[[SHRLO]]
; GCN: buffer_store_dword v[[SHRLO]]
diff --git a/test/CodeGen/AMDGPU/shift-i64-opts.ll b/test/CodeGen/AMDGPU/shift-i64-opts.ll
index a803849be02c..5306e190a4f9 100644
--- a/test/CodeGen/AMDGPU/shift-i64-opts.ll
+++ b/test/CodeGen/AMDGPU/shift-i64-opts.ll
@@ -243,3 +243,77 @@ define amdgpu_kernel void @trunc_shl_31_i32_i64_multi_use(i32 addrspace(1)* %out
store volatile i64 %shl, i64 addrspace(1)* %in
ret void
}
+
+; GCN-LABEL: {{^}}trunc_shl_and31:
+; GCN: s_and_b32 s[[AMT:[0-9]+]], s{{[0-9]+}}, 31
+; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, s[[AMT]], v{{[0-9]+}}
+; GCN-NOT: v_lshl_b64
+; GCN-NOT: v_lshlrev_b64
+define amdgpu_kernel void @trunc_shl_and31(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp3 = and i32 %arg2, 31
+ %tmp4 = zext i32 %tmp3 to i64
+ %tmp5 = shl i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}trunc_shl_and30:
+; GCN: s_and_b32 s[[AMT:[0-9]+]], s{{[0-9]+}}, 30
+; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, s[[AMT]], v{{[0-9]+}}
+; GCN-NOT: v_lshl_b64
+; GCN-NOT: v_lshlrev_b64
+define amdgpu_kernel void @trunc_shl_and30(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp3 = and i32 %arg2, 30
+ %tmp4 = zext i32 %tmp3 to i64
+ %tmp5 = shl i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}trunc_shl_wrong_and63:
+; Negative test, wrong constant
+; GCN: v_lshl_b64
+define amdgpu_kernel void @trunc_shl_wrong_and63(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp3 = and i32 %arg2, 63
+ %tmp4 = zext i32 %tmp3 to i64
+ %tmp5 = shl i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}trunc_shl_no_and:
+; Negative test, shift can be full 64 bit
+; GCN: v_lshl_b64
+define amdgpu_kernel void @trunc_shl_no_and(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
+bb:
+ %tmp = load i64, i64 addrspace(1)* %arg, align 8
+ %tmp4 = zext i32 %arg2 to i64
+ %tmp5 = shl i64 %tmp, %tmp4
+ %tmp6 = trunc i64 %tmp5 to i32
+ store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}trunc_shl_vec_vec:
+; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}
+; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 4, v{{[0-9]+}}
+; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
+; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 6, v{{[0-9]+}}
+; GCN-NOT: v_lshl_b64
+; GCN-NOT: v_lshlrev_b64
+define amdgpu_kernel void @trunc_shl_vec_vec(<4 x i64> addrspace(1)* %arg) {
+bb:
+ %v = load <4 x i64>, <4 x i64> addrspace(1)* %arg, align 32
+ %shl = shl <4 x i64> %v, <i64 3, i64 4, i64 5, i64 6>
+ store <4 x i64> %shl, <4 x i64> addrspace(1)* %arg, align 32
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/si-lod-bias.ll b/test/CodeGen/AMDGPU/si-lod-bias.ll
index 3a7359ea4ffa..422498066509 100644
--- a/test/CodeGen/AMDGPU/si-lod-bias.ll
+++ b/test/CodeGen/AMDGPU/si-lod-bias.ll
@@ -6,15 +6,15 @@
; GCN-LABEL: {{^}}main:
; GCN: image_sample_b v{{\[[0-9]:[0-9]\]}}, v{{\[[0-9]:[0-9]\]}}, s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0xf
-define amdgpu_ps void @main(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @main(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <8 x i32> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 16)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 16)
%tmp22 = getelementptr <8 x i32>, <8 x i32> addrspace(2)* %arg2, i32 0
%tmp23 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp22, !tbaa !0
- %tmp24 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg1, i32 0
- %tmp25 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp24, !tbaa !0
+ %tmp24 = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg1, i32 0
+ %tmp25 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp24, !tbaa !0
%i.i = extractelement <2 x i32> %arg5, i32 0
%j.i = extractelement <2 x i32> %arg5, i32 1
%i.f.i = bitcast i32 %i.i to float
@@ -34,9 +34,8 @@ main_body:
%tmp32 = insertelement <4 x i32> %tmp31, i32 %tmp29, i32 1
%tmp33 = insertelement <4 x i32> %tmp32, i32 %tmp30, i32 2
%tmp34 = insertelement <4 x i32> %tmp33, i32 undef, i32 3
- %tmp25.bc = bitcast <16 x i8> %tmp25 to <4 x i32>
%tmp34.bc = bitcast <4 x i32> %tmp34 to <4 x float>
- %tmp35 = call <4 x float> @llvm.amdgcn.image.sample.b.v4f32.v4f32.v8i32(<4 x float> %tmp34.bc, <8 x i32> %tmp23, <4 x i32> %tmp25.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
+ %tmp35 = call <4 x float> @llvm.amdgcn.image.sample.b.v4f32.v4f32.v8i32(<4 x float> %tmp34.bc, <8 x i32> %tmp23, <4 x i32> %tmp25, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp36 = extractelement <4 x float> %tmp35, i32 0
%tmp37 = extractelement <4 x float> %tmp35, i32 1
%tmp38 = extractelement <4 x float> %tmp35, i32 2
@@ -49,7 +48,7 @@ declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1
declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
declare <4 x float> @llvm.amdgcn.image.sample.b.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/si-sgpr-spill.ll b/test/CodeGen/AMDGPU/si-sgpr-spill.ll
index 8731e74d63a0..3e70f2c77826 100644
--- a/test/CodeGen/AMDGPU/si-sgpr-spill.ll
+++ b/test/CodeGen/AMDGPU/si-sgpr-spill.ll
@@ -24,81 +24,81 @@
; GCN: s_endpgm
; TOVGPR: ScratchSize: 0{{$}}
-define amdgpu_ps void @main([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) {
+define amdgpu_ps void @main([17 x <4 x i32>] addrspace(2)* byval %arg, [32 x <4 x i32>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) {
main_body:
- %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg, i64 0, i32 0
- %tmp21 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 96)
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 100)
- %tmp24 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 104)
- %tmp25 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 112)
- %tmp26 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 116)
- %tmp27 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 120)
- %tmp28 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 128)
- %tmp29 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 132)
- %tmp30 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 140)
- %tmp31 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 144)
- %tmp32 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 160)
- %tmp33 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 176)
- %tmp34 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 180)
- %tmp35 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 184)
- %tmp36 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 192)
- %tmp37 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 196)
- %tmp38 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 200)
- %tmp39 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 208)
- %tmp40 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 212)
- %tmp41 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 216)
- %tmp42 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 224)
- %tmp43 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 240)
- %tmp44 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 244)
- %tmp45 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 248)
- %tmp46 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 256)
- %tmp47 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 272)
- %tmp48 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 276)
- %tmp49 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 280)
- %tmp50 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 288)
- %tmp51 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 292)
- %tmp52 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 296)
- %tmp53 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 304)
- %tmp54 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 308)
- %tmp55 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 312)
- %tmp56 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 368)
- %tmp57 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 372)
- %tmp58 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 376)
- %tmp59 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 384)
+ %tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(2)* %arg, i64 0, i32 0
+ %tmp21 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 96)
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 100)
+ %tmp24 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 104)
+ %tmp25 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 112)
+ %tmp26 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 116)
+ %tmp27 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 120)
+ %tmp28 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 128)
+ %tmp29 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 132)
+ %tmp30 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 140)
+ %tmp31 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 144)
+ %tmp32 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 160)
+ %tmp33 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 176)
+ %tmp34 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 180)
+ %tmp35 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 184)
+ %tmp36 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 192)
+ %tmp37 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 196)
+ %tmp38 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 200)
+ %tmp39 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 208)
+ %tmp40 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 212)
+ %tmp41 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 216)
+ %tmp42 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 224)
+ %tmp43 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 240)
+ %tmp44 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 244)
+ %tmp45 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 248)
+ %tmp46 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 256)
+ %tmp47 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 272)
+ %tmp48 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 276)
+ %tmp49 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 280)
+ %tmp50 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 288)
+ %tmp51 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 292)
+ %tmp52 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 296)
+ %tmp53 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 304)
+ %tmp54 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 308)
+ %tmp55 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 312)
+ %tmp56 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 368)
+ %tmp57 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 372)
+ %tmp58 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 376)
+ %tmp59 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 384)
%tmp60 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 0
%tmp61 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp60, !tbaa !0
- %tmp62 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 0
- %tmp63 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp62, !tbaa !0
- %tmp63.bc = bitcast <16 x i8> %tmp63 to <4 x i32>
+ %tmp62 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 0
+ %tmp63 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp62, !tbaa !0
+ %tmp63.bc = bitcast <4 x i32> %tmp63 to <4 x i32>
%tmp64 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 1
%tmp65 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp64, !tbaa !0
- %tmp66 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 1
- %tmp67 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp66, !tbaa !0
+ %tmp66 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 1
+ %tmp67 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp66, !tbaa !0
%tmp68 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 2
%tmp69 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp68, !tbaa !0
- %tmp70 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 2
- %tmp71 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp70, !tbaa !0
+ %tmp70 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 2
+ %tmp71 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp70, !tbaa !0
%tmp72 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 3
%tmp73 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp72, !tbaa !0
- %tmp74 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 3
- %tmp75 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp74, !tbaa !0
+ %tmp74 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 3
+ %tmp75 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp74, !tbaa !0
%tmp76 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 4
%tmp77 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp76, !tbaa !0
- %tmp78 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 4
- %tmp79 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp78, !tbaa !0
+ %tmp78 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 4
+ %tmp79 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp78, !tbaa !0
%tmp80 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 5
%tmp81 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp80, !tbaa !0
- %tmp82 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 5
- %tmp83 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp82, !tbaa !0
+ %tmp82 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 5
+ %tmp83 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp82, !tbaa !0
%tmp84 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 6
%tmp85 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp84, !tbaa !0
- %tmp86 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 6
- %tmp87 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp86, !tbaa !0
+ %tmp86 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 6
+ %tmp87 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp86, !tbaa !0
%tmp88 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 7
%tmp89 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp88, !tbaa !0
- %tmp90 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 7
- %tmp91 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp90, !tbaa !0
+ %tmp90 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 7
+ %tmp91 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp90, !tbaa !0
%i.i = extractelement <2 x i32> %arg6, i32 0
%j.i = extractelement <2 x i32> %arg6, i32 1
%i.f.i = bitcast i32 %i.i to float
@@ -410,7 +410,7 @@ IF67: ; preds = %LOOP65
%tmp274 = insertelement <8 x i32> %tmp273, i32 %tmp268, i32 5
%tmp275 = insertelement <8 x i32> %tmp274, i32 undef, i32 6
%tmp276 = insertelement <8 x i32> %tmp275, i32 undef, i32 7
- %tmp67.bc = bitcast <16 x i8> %tmp67 to <4 x i32>
+ %tmp67.bc = bitcast <4 x i32> %tmp67 to <4 x i32>
%tmp276.bc = bitcast <8 x i32> %tmp276 to <8 x float>
%tmp277 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp276.bc, <8 x i32> %tmp65, <4 x i32> %tmp67.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp278 = extractelement <4 x float> %tmp277, i32 0
@@ -432,7 +432,7 @@ IF67: ; preds = %LOOP65
%tmp294 = insertelement <8 x i32> %tmp293, i32 %tmp288, i32 5
%tmp295 = insertelement <8 x i32> %tmp294, i32 undef, i32 6
%tmp296 = insertelement <8 x i32> %tmp295, i32 undef, i32 7
- %tmp83.bc = bitcast <16 x i8> %tmp83 to <4 x i32>
+ %tmp83.bc = bitcast <4 x i32> %tmp83 to <4 x i32>
%tmp296.bc = bitcast <8 x i32> %tmp296 to <8 x float>
%tmp297 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp296.bc, <8 x i32> %tmp81, <4 x i32> %tmp83.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp298 = extractelement <4 x float> %tmp297, i32 0
@@ -452,7 +452,7 @@ IF67: ; preds = %LOOP65
%tmp312 = insertelement <8 x i32> %tmp311, i32 %tmp306, i32 5
%tmp313 = insertelement <8 x i32> %tmp312, i32 undef, i32 6
%tmp314 = insertelement <8 x i32> %tmp313, i32 undef, i32 7
- %tmp79.bc = bitcast <16 x i8> %tmp79 to <4 x i32>
+ %tmp79.bc = bitcast <4 x i32> %tmp79 to <4 x i32>
%tmp314.bc = bitcast <8 x i32> %tmp314 to <8 x float>
%tmp315 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp314.bc, <8 x i32> %tmp77, <4 x i32> %tmp79.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp316 = extractelement <4 x float> %tmp315, i32 0
@@ -515,7 +515,7 @@ IF67: ; preds = %LOOP65
%tmp372 = insertelement <8 x i32> %tmp371, i32 %tmp366, i32 5
%tmp373 = insertelement <8 x i32> %tmp372, i32 undef, i32 6
%tmp374 = insertelement <8 x i32> %tmp373, i32 undef, i32 7
- %tmp71.bc = bitcast <16 x i8> %tmp71 to <4 x i32>
+ %tmp71.bc = bitcast <4 x i32> %tmp71 to <4 x i32>
%tmp374.bc = bitcast <8 x i32> %tmp374 to <8 x float>
%tmp375 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp374.bc, <8 x i32> %tmp69, <4 x i32> %tmp71.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp376 = extractelement <4 x float> %tmp375, i32 0
@@ -571,7 +571,7 @@ IF67: ; preds = %LOOP65
%tmp426 = insertelement <8 x i32> %tmp425, i32 %tmp420, i32 5
%tmp427 = insertelement <8 x i32> %tmp426, i32 undef, i32 6
%tmp428 = insertelement <8 x i32> %tmp427, i32 undef, i32 7
- %tmp87.bc = bitcast <16 x i8> %tmp87 to <4 x i32>
+ %tmp87.bc = bitcast <4 x i32> %tmp87 to <4 x i32>
%tmp428.bc = bitcast <8 x i32> %tmp428 to <8 x float>
%tmp429 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp428.bc, <8 x i32> %tmp85, <4 x i32> %tmp87.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp430 = extractelement <4 x float> %tmp429, i32 0
@@ -624,7 +624,7 @@ IF67: ; preds = %LOOP65
%tmp467 = insertelement <4 x i32> %tmp466, i32 %tmp464, i32 1
%tmp468 = insertelement <4 x i32> %tmp467, i32 %tmp465, i32 2
%tmp469 = insertelement <4 x i32> %tmp468, i32 undef, i32 3
- %tmp91.bc = bitcast <16 x i8> %tmp91 to <4 x i32>
+ %tmp91.bc = bitcast <4 x i32> %tmp91 to <4 x i32>
%tmp469.bc = bitcast <4 x i32> %tmp469 to <4 x float>
%tmp470 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float> %tmp469.bc, <8 x i32> %tmp89, <4 x i32> %tmp91.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false) #0
%tmp471 = extractelement <4 x float> %tmp470, i32 0
@@ -727,7 +727,7 @@ IF67: ; preds = %LOOP65
%tmp568 = insertelement <8 x i32> %tmp567, i32 %tmp562, i32 5
%tmp569 = insertelement <8 x i32> %tmp568, i32 undef, i32 6
%tmp570 = insertelement <8 x i32> %tmp569, i32 undef, i32 7
- %tmp75.bc = bitcast <16 x i8> %tmp75 to <4 x i32>
+ %tmp75.bc = bitcast <4 x i32> %tmp75 to <4 x i32>
%tmp570.bc = bitcast <8 x i32> %tmp570 to <8 x float>
%tmp571 = call <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float> %tmp570.bc, <8 x i32> %tmp73, <4 x i32> %tmp75.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp572 = extractelement <4 x float> %tmp571, i32 0
@@ -778,149 +778,149 @@ ENDIF66: ; preds = %LOOP65
; GCN-LABEL: {{^}}main1:
; GCN: s_endpgm
; TOVGPR: ScratchSize: 0{{$}}
-define amdgpu_ps void @main1([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
+define amdgpu_ps void @main1([17 x <4 x i32>] addrspace(2)* byval %arg, [32 x <4 x i32>] addrspace(2)* byval %arg1, [16 x <8 x i32>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 {
main_body:
- %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg, i64 0, i32 0
- %tmp21 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 0)
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 4)
- %tmp24 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 8)
- %tmp25 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 12)
- %tmp26 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 28)
- %tmp27 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 48)
- %tmp28 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 52)
- %tmp29 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 56)
- %tmp30 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 64)
- %tmp31 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 68)
- %tmp32 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 72)
- %tmp33 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 76)
- %tmp34 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 128)
- %tmp35 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 132)
- %tmp36 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 144)
- %tmp37 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 148)
- %tmp38 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 152)
- %tmp39 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 160)
- %tmp40 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 164)
- %tmp41 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 168)
- %tmp42 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 172)
- %tmp43 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 176)
- %tmp44 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 180)
- %tmp45 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 184)
- %tmp46 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 192)
- %tmp47 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 196)
- %tmp48 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 200)
- %tmp49 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 208)
- %tmp50 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 212)
- %tmp51 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 216)
- %tmp52 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 220)
- %tmp53 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 236)
- %tmp54 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 240)
- %tmp55 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 244)
- %tmp56 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 248)
- %tmp57 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 252)
- %tmp58 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 256)
- %tmp59 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 260)
- %tmp60 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 264)
- %tmp61 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 268)
- %tmp62 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 272)
- %tmp63 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 276)
- %tmp64 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 280)
- %tmp65 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 284)
- %tmp66 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 288)
- %tmp67 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 292)
- %tmp68 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 464)
- %tmp69 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 468)
- %tmp70 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 472)
- %tmp71 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 496)
- %tmp72 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 500)
- %tmp73 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 504)
- %tmp74 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 512)
- %tmp75 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 516)
- %tmp76 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 524)
- %tmp77 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 532)
- %tmp78 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 536)
- %tmp79 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 540)
- %tmp80 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 544)
- %tmp81 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 548)
- %tmp82 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 552)
- %tmp83 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 556)
- %tmp84 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 560)
- %tmp85 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 564)
- %tmp86 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 568)
- %tmp87 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 572)
- %tmp88 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 576)
- %tmp89 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 580)
- %tmp90 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 584)
- %tmp91 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 588)
- %tmp92 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 592)
- %tmp93 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 596)
- %tmp94 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 600)
- %tmp95 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 604)
- %tmp96 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 608)
- %tmp97 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 612)
- %tmp98 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 616)
- %tmp99 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 624)
- %tmp100 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 628)
- %tmp101 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 632)
- %tmp102 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 636)
- %tmp103 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 640)
- %tmp104 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 644)
- %tmp105 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 648)
- %tmp106 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 652)
- %tmp107 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 656)
- %tmp108 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 660)
- %tmp109 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 664)
- %tmp110 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 668)
- %tmp111 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 672)
- %tmp112 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 676)
- %tmp113 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 680)
- %tmp114 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 684)
- %tmp115 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 688)
- %tmp116 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 692)
- %tmp117 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 696)
- %tmp118 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 700)
- %tmp119 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 704)
- %tmp120 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 708)
- %tmp121 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 712)
- %tmp122 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 716)
- %tmp123 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 864)
- %tmp124 = call float @llvm.SI.load.const(<16 x i8> %tmp21, i32 868)
+ %tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(2)* %arg, i64 0, i32 0
+ %tmp21 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, !tbaa !0
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 0)
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 4)
+ %tmp24 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 8)
+ %tmp25 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 12)
+ %tmp26 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 28)
+ %tmp27 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 48)
+ %tmp28 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 52)
+ %tmp29 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 56)
+ %tmp30 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 64)
+ %tmp31 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 68)
+ %tmp32 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 72)
+ %tmp33 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 76)
+ %tmp34 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 128)
+ %tmp35 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 132)
+ %tmp36 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 144)
+ %tmp37 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 148)
+ %tmp38 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 152)
+ %tmp39 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 160)
+ %tmp40 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 164)
+ %tmp41 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 168)
+ %tmp42 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 172)
+ %tmp43 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 176)
+ %tmp44 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 180)
+ %tmp45 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 184)
+ %tmp46 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 192)
+ %tmp47 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 196)
+ %tmp48 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 200)
+ %tmp49 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 208)
+ %tmp50 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 212)
+ %tmp51 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 216)
+ %tmp52 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 220)
+ %tmp53 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 236)
+ %tmp54 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 240)
+ %tmp55 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 244)
+ %tmp56 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 248)
+ %tmp57 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 252)
+ %tmp58 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 256)
+ %tmp59 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 260)
+ %tmp60 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 264)
+ %tmp61 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 268)
+ %tmp62 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 272)
+ %tmp63 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 276)
+ %tmp64 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 280)
+ %tmp65 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 284)
+ %tmp66 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 288)
+ %tmp67 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 292)
+ %tmp68 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 464)
+ %tmp69 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 468)
+ %tmp70 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 472)
+ %tmp71 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 496)
+ %tmp72 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 500)
+ %tmp73 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 504)
+ %tmp74 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 512)
+ %tmp75 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 516)
+ %tmp76 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 524)
+ %tmp77 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 532)
+ %tmp78 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 536)
+ %tmp79 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 540)
+ %tmp80 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 544)
+ %tmp81 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 548)
+ %tmp82 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 552)
+ %tmp83 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 556)
+ %tmp84 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 560)
+ %tmp85 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 564)
+ %tmp86 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 568)
+ %tmp87 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 572)
+ %tmp88 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 576)
+ %tmp89 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 580)
+ %tmp90 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 584)
+ %tmp91 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 588)
+ %tmp92 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 592)
+ %tmp93 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 596)
+ %tmp94 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 600)
+ %tmp95 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 604)
+ %tmp96 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 608)
+ %tmp97 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 612)
+ %tmp98 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 616)
+ %tmp99 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 624)
+ %tmp100 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 628)
+ %tmp101 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 632)
+ %tmp102 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 636)
+ %tmp103 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 640)
+ %tmp104 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 644)
+ %tmp105 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 648)
+ %tmp106 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 652)
+ %tmp107 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 656)
+ %tmp108 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 660)
+ %tmp109 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 664)
+ %tmp110 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 668)
+ %tmp111 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 672)
+ %tmp112 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 676)
+ %tmp113 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 680)
+ %tmp114 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 684)
+ %tmp115 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 688)
+ %tmp116 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 692)
+ %tmp117 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 696)
+ %tmp118 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 700)
+ %tmp119 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 704)
+ %tmp120 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 708)
+ %tmp121 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 712)
+ %tmp122 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 716)
+ %tmp123 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 864)
+ %tmp124 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp21, i32 868)
%tmp125 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 0
%tmp126 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp125, !tbaa !0
- %tmp127 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 0
- %tmp128 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp127, !tbaa !0
+ %tmp127 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 0
+ %tmp128 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp127, !tbaa !0
%tmp129 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 1
%tmp130 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp129, !tbaa !0
- %tmp131 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 1
- %tmp132 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp131, !tbaa !0
+ %tmp131 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 1
+ %tmp132 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp131, !tbaa !0
%tmp133 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 2
%tmp134 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp133, !tbaa !0
- %tmp135 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 2
- %tmp136 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp135, !tbaa !0
+ %tmp135 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 2
+ %tmp136 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp135, !tbaa !0
%tmp137 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 3
%tmp138 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp137, !tbaa !0
- %tmp139 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 3
- %tmp140 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp139, !tbaa !0
+ %tmp139 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 3
+ %tmp140 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp139, !tbaa !0
%tmp141 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 4
%tmp142 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp141, !tbaa !0
- %tmp143 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 4
- %tmp144 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp143, !tbaa !0
+ %tmp143 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 4
+ %tmp144 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp143, !tbaa !0
%tmp145 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 5
%tmp146 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp145, !tbaa !0
- %tmp147 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 5
- %tmp148 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp147, !tbaa !0
+ %tmp147 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 5
+ %tmp148 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp147, !tbaa !0
%tmp149 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 6
%tmp150 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp149, !tbaa !0
- %tmp151 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 6
- %tmp152 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp151, !tbaa !0
+ %tmp151 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 6
+ %tmp152 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp151, !tbaa !0
%tmp153 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 7
%tmp154 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp153, !tbaa !0
- %tmp155 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 7
- %tmp156 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp155, !tbaa !0
+ %tmp155 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 7
+ %tmp156 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp155, !tbaa !0
%tmp157 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(2)* %arg2, i64 0, i32 8
%tmp158 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp157, !tbaa !0
- %tmp159 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %arg1, i64 0, i32 8
- %tmp160 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp159, !tbaa !0
+ %tmp159 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(2)* %arg1, i64 0, i32 8
+ %tmp160 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp159, !tbaa !0
%tmp161 = fcmp ugt float %arg17, 0.000000e+00
%tmp162 = select i1 %tmp161, float 1.000000e+00, float 0.000000e+00
%i.i = extractelement <2 x i32> %arg6, i32 0
@@ -1144,7 +1144,7 @@ main_body:
%tmp222 = bitcast float %p2.i126 to i32
%tmp223 = insertelement <2 x i32> undef, i32 %tmp221, i32 0
%tmp224 = insertelement <2 x i32> %tmp223, i32 %tmp222, i32 1
- %tmp132.bc = bitcast <16 x i8> %tmp132 to <4 x i32>
+ %tmp132.bc = bitcast <4 x i32> %tmp132 to <4 x i32>
%tmp224.bc = bitcast <2 x i32> %tmp224 to <2 x float>
%tmp225 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %tmp224.bc, <8 x i32> %tmp130, <4 x i32> %tmp132.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp226 = extractelement <4 x float> %tmp225, i32 0
@@ -1218,7 +1218,7 @@ LOOP: ; preds = %LOOP, %main_body
%tmp279 = insertelement <4 x i32> %tmp278, i32 %tmp277, i32 1
%tmp280 = insertelement <4 x i32> %tmp279, i32 0, i32 2
%tmp281 = insertelement <4 x i32> %tmp280, i32 undef, i32 3
- %tmp148.bc = bitcast <16 x i8> %tmp148 to <4 x i32>
+ %tmp148.bc = bitcast <4 x i32> %tmp148 to <4 x i32>
%tmp281.bc = bitcast <4 x i32> %tmp281 to <4 x float>
%tmp282 = call <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float> %tmp281.bc, <8 x i32> %tmp146, <4 x i32> %tmp148.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp283 = extractelement <4 x float> %tmp282, i32 3
@@ -1283,7 +1283,7 @@ IF189: ; preds = %LOOP
%tmp339 = bitcast float %tmp335 to i32
%tmp340 = insertelement <2 x i32> undef, i32 %tmp338, i32 0
%tmp341 = insertelement <2 x i32> %tmp340, i32 %tmp339, i32 1
- %tmp136.bc = bitcast <16 x i8> %tmp136 to <4 x i32>
+ %tmp136.bc = bitcast <4 x i32> %tmp136 to <4 x i32>
%a.bc.i = bitcast <2 x i32> %tmp341 to <2 x float>
%tmp0 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i, <8 x i32> %tmp134, <4 x i32> %tmp136.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp343 = extractelement <4 x float> %tmp0, i32 0
@@ -1317,7 +1317,7 @@ IF189: ; preds = %LOOP
%tmp359 = bitcast float %tmp337 to i32
%tmp360 = insertelement <2 x i32> undef, i32 %tmp358, i32 0
%tmp361 = insertelement <2 x i32> %tmp360, i32 %tmp359, i32 1
- %tmp152.bc = bitcast <16 x i8> %tmp152 to <4 x i32>
+ %tmp152.bc = bitcast <4 x i32> %tmp152 to <4 x i32>
%a.bc.i3 = bitcast <2 x i32> %tmp361 to <2 x float>
%tmp1 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i3, <8 x i32> %tmp150, <4 x i32> %tmp152.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp363 = extractelement <4 x float> %tmp1, i32 2
@@ -1329,7 +1329,7 @@ IF189: ; preds = %LOOP
%tmp369 = bitcast float %tmp311 to i32
%tmp370 = insertelement <2 x i32> undef, i32 %tmp368, i32 0
%tmp371 = insertelement <2 x i32> %tmp370, i32 %tmp369, i32 1
- %tmp140.bc = bitcast <16 x i8> %tmp140 to <4 x i32>
+ %tmp140.bc = bitcast <4 x i32> %tmp140 to <4 x i32>
%a.bc.i2 = bitcast <2 x i32> %tmp371 to <2 x float>
%tmp2 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i2, <8 x i32> %tmp138, <4 x i32> %tmp140.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp373 = extractelement <4 x float> %tmp2, i32 0
@@ -1347,7 +1347,7 @@ IF189: ; preds = %LOOP
%tmp383 = bitcast float %tmp321 to i32
%tmp384 = insertelement <2 x i32> undef, i32 %tmp382, i32 0
%tmp385 = insertelement <2 x i32> %tmp384, i32 %tmp383, i32 1
- %tmp144.bc = bitcast <16 x i8> %tmp144 to <4 x i32>
+ %tmp144.bc = bitcast <4 x i32> %tmp144 to <4 x i32>
%a.bc.i1 = bitcast <2 x i32> %tmp385 to <2 x float>
%tmp3 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %a.bc.i1, <8 x i32> %tmp142, <4 x i32> %tmp144.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp387 = extractelement <4 x float> %tmp3, i32 0
@@ -1446,7 +1446,7 @@ ENDIF197: ; preds = %IF198, %IF189
%tmp467 = bitcast float %tmp220 to i32
%tmp468 = insertelement <2 x i32> undef, i32 %tmp466, i32 0
%tmp469 = insertelement <2 x i32> %tmp468, i32 %tmp467, i32 1
- %tmp160.bc = bitcast <16 x i8> %tmp160 to <4 x i32>
+ %tmp160.bc = bitcast <4 x i32> %tmp160 to <4 x i32>
%tmp469.bc = bitcast <2 x i32> %tmp469 to <2 x float>
%tmp470 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %tmp469.bc, <8 x i32> %tmp158, <4 x i32> %tmp160.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp471 = extractelement <4 x float> %tmp470, i32 0
@@ -1465,7 +1465,7 @@ ENDIF197: ; preds = %IF198, %IF189
%tmp484 = bitcast float %p2.i138 to i32
%tmp485 = insertelement <2 x i32> undef, i32 %tmp483, i32 0
%tmp486 = insertelement <2 x i32> %tmp485, i32 %tmp484, i32 1
- %tmp156.bc = bitcast <16 x i8> %tmp156 to <4 x i32>
+ %tmp156.bc = bitcast <4 x i32> %tmp156 to <4 x i32>
%tmp486.bc = bitcast <2 x i32> %tmp486 to <2 x float>
%tmp487 = call <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float> %tmp486.bc, <8 x i32> %tmp154, <4 x i32> %tmp156.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp488 = extractelement <4 x float> %tmp487, i32 0
@@ -1674,7 +1674,7 @@ ENDIF209: ; preds = %ELSE214, %ELSE211,
%tmp657 = insertelement <4 x i32> %tmp656, i32 %tmp654, i32 1
%tmp658 = insertelement <4 x i32> %tmp657, i32 %tmp655, i32 2
%tmp659 = insertelement <4 x i32> %tmp658, i32 undef, i32 3
- %tmp128.bc = bitcast <16 x i8> %tmp128 to <4 x i32>
+ %tmp128.bc = bitcast <4 x i32> %tmp128 to <4 x i32>
%tmp659.bc = bitcast <4 x i32> %tmp659 to <4 x float>
%tmp660 = call <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float> %tmp659.bc, <8 x i32> %tmp126, <4 x i32> %tmp128.bc, i32 15, i1 false, i1 false, i1 false, i1 false, i1 false)
%tmp661 = extractelement <4 x float> %tmp660, i32 0
@@ -1869,7 +1869,7 @@ declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8
declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
declare <4 x float> @llvm.amdgcn.image.sample.d.v4f32.v8f32.v8i32(<8 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
declare <4 x float> @llvm.amdgcn.image.sample.l.v4f32.v4f32.v8i32(<4 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/si-spill-cf.ll b/test/CodeGen/AMDGPU/si-spill-cf.ll
index 926702645d9e..2a8ced59ddef 100644
--- a/test/CodeGen/AMDGPU/si-spill-cf.ll
+++ b/test/CodeGen/AMDGPU/si-spill-cf.ll
@@ -9,73 +9,73 @@
define amdgpu_ps void @main() #0 {
main_body:
- %tmp = call float @llvm.SI.load.const(<16 x i8> undef, i32 16)
- %tmp1 = call float @llvm.SI.load.const(<16 x i8> undef, i32 32)
- %tmp2 = call float @llvm.SI.load.const(<16 x i8> undef, i32 80)
- %tmp3 = call float @llvm.SI.load.const(<16 x i8> undef, i32 84)
- %tmp4 = call float @llvm.SI.load.const(<16 x i8> undef, i32 88)
- %tmp5 = call float @llvm.SI.load.const(<16 x i8> undef, i32 96)
- %tmp6 = call float @llvm.SI.load.const(<16 x i8> undef, i32 100)
- %tmp7 = call float @llvm.SI.load.const(<16 x i8> undef, i32 104)
- %tmp8 = call float @llvm.SI.load.const(<16 x i8> undef, i32 112)
- %tmp9 = call float @llvm.SI.load.const(<16 x i8> undef, i32 116)
- %tmp10 = call float @llvm.SI.load.const(<16 x i8> undef, i32 120)
- %tmp11 = call float @llvm.SI.load.const(<16 x i8> undef, i32 128)
- %tmp12 = call float @llvm.SI.load.const(<16 x i8> undef, i32 132)
- %tmp13 = call float @llvm.SI.load.const(<16 x i8> undef, i32 136)
- %tmp14 = call float @llvm.SI.load.const(<16 x i8> undef, i32 144)
- %tmp15 = call float @llvm.SI.load.const(<16 x i8> undef, i32 148)
- %tmp16 = call float @llvm.SI.load.const(<16 x i8> undef, i32 152)
- %tmp17 = call float @llvm.SI.load.const(<16 x i8> undef, i32 160)
- %tmp18 = call float @llvm.SI.load.const(<16 x i8> undef, i32 164)
- %tmp19 = call float @llvm.SI.load.const(<16 x i8> undef, i32 168)
- %tmp20 = call float @llvm.SI.load.const(<16 x i8> undef, i32 176)
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> undef, i32 180)
- %tmp22 = call float @llvm.SI.load.const(<16 x i8> undef, i32 184)
- %tmp23 = call float @llvm.SI.load.const(<16 x i8> undef, i32 192)
- %tmp24 = call float @llvm.SI.load.const(<16 x i8> undef, i32 196)
- %tmp25 = call float @llvm.SI.load.const(<16 x i8> undef, i32 200)
- %tmp26 = call float @llvm.SI.load.const(<16 x i8> undef, i32 208)
- %tmp27 = call float @llvm.SI.load.const(<16 x i8> undef, i32 212)
- %tmp28 = call float @llvm.SI.load.const(<16 x i8> undef, i32 216)
- %tmp29 = call float @llvm.SI.load.const(<16 x i8> undef, i32 224)
- %tmp30 = call float @llvm.SI.load.const(<16 x i8> undef, i32 228)
- %tmp31 = call float @llvm.SI.load.const(<16 x i8> undef, i32 232)
- %tmp32 = call float @llvm.SI.load.const(<16 x i8> undef, i32 240)
- %tmp33 = call float @llvm.SI.load.const(<16 x i8> undef, i32 244)
- %tmp34 = call float @llvm.SI.load.const(<16 x i8> undef, i32 248)
- %tmp35 = call float @llvm.SI.load.const(<16 x i8> undef, i32 256)
- %tmp36 = call float @llvm.SI.load.const(<16 x i8> undef, i32 260)
- %tmp37 = call float @llvm.SI.load.const(<16 x i8> undef, i32 264)
- %tmp38 = call float @llvm.SI.load.const(<16 x i8> undef, i32 272)
- %tmp39 = call float @llvm.SI.load.const(<16 x i8> undef, i32 276)
- %tmp40 = call float @llvm.SI.load.const(<16 x i8> undef, i32 280)
- %tmp41 = call float @llvm.SI.load.const(<16 x i8> undef, i32 288)
- %tmp42 = call float @llvm.SI.load.const(<16 x i8> undef, i32 292)
- %tmp43 = call float @llvm.SI.load.const(<16 x i8> undef, i32 296)
- %tmp44 = call float @llvm.SI.load.const(<16 x i8> undef, i32 304)
- %tmp45 = call float @llvm.SI.load.const(<16 x i8> undef, i32 308)
- %tmp46 = call float @llvm.SI.load.const(<16 x i8> undef, i32 312)
- %tmp47 = call float @llvm.SI.load.const(<16 x i8> undef, i32 320)
- %tmp48 = call float @llvm.SI.load.const(<16 x i8> undef, i32 324)
- %tmp49 = call float @llvm.SI.load.const(<16 x i8> undef, i32 328)
- %tmp50 = call float @llvm.SI.load.const(<16 x i8> undef, i32 336)
- %tmp51 = call float @llvm.SI.load.const(<16 x i8> undef, i32 340)
- %tmp52 = call float @llvm.SI.load.const(<16 x i8> undef, i32 344)
- %tmp53 = call float @llvm.SI.load.const(<16 x i8> undef, i32 352)
- %tmp54 = call float @llvm.SI.load.const(<16 x i8> undef, i32 356)
- %tmp55 = call float @llvm.SI.load.const(<16 x i8> undef, i32 360)
- %tmp56 = call float @llvm.SI.load.const(<16 x i8> undef, i32 368)
- %tmp57 = call float @llvm.SI.load.const(<16 x i8> undef, i32 372)
- %tmp58 = call float @llvm.SI.load.const(<16 x i8> undef, i32 376)
- %tmp59 = call float @llvm.SI.load.const(<16 x i8> undef, i32 384)
- %tmp60 = call float @llvm.SI.load.const(<16 x i8> undef, i32 388)
- %tmp61 = call float @llvm.SI.load.const(<16 x i8> undef, i32 392)
- %tmp62 = call float @llvm.SI.load.const(<16 x i8> undef, i32 400)
- %tmp63 = call float @llvm.SI.load.const(<16 x i8> undef, i32 404)
- %tmp64 = call float @llvm.SI.load.const(<16 x i8> undef, i32 408)
- %tmp65 = call float @llvm.SI.load.const(<16 x i8> undef, i32 416)
- %tmp66 = call float @llvm.SI.load.const(<16 x i8> undef, i32 420)
+ %tmp = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 16)
+ %tmp1 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 32)
+ %tmp2 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 80)
+ %tmp3 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 84)
+ %tmp4 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 88)
+ %tmp5 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 96)
+ %tmp6 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 100)
+ %tmp7 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 104)
+ %tmp8 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 112)
+ %tmp9 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 116)
+ %tmp10 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 120)
+ %tmp11 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 128)
+ %tmp12 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 132)
+ %tmp13 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 136)
+ %tmp14 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 144)
+ %tmp15 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 148)
+ %tmp16 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 152)
+ %tmp17 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 160)
+ %tmp18 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 164)
+ %tmp19 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 168)
+ %tmp20 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 176)
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 180)
+ %tmp22 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 184)
+ %tmp23 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 192)
+ %tmp24 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 196)
+ %tmp25 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 200)
+ %tmp26 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 208)
+ %tmp27 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 212)
+ %tmp28 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 216)
+ %tmp29 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 224)
+ %tmp30 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 228)
+ %tmp31 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 232)
+ %tmp32 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 240)
+ %tmp33 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 244)
+ %tmp34 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 248)
+ %tmp35 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 256)
+ %tmp36 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 260)
+ %tmp37 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 264)
+ %tmp38 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 272)
+ %tmp39 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 276)
+ %tmp40 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 280)
+ %tmp41 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 288)
+ %tmp42 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 292)
+ %tmp43 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 296)
+ %tmp44 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 304)
+ %tmp45 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 308)
+ %tmp46 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 312)
+ %tmp47 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 320)
+ %tmp48 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 324)
+ %tmp49 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 328)
+ %tmp50 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 336)
+ %tmp51 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 340)
+ %tmp52 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 344)
+ %tmp53 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 352)
+ %tmp54 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 356)
+ %tmp55 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 360)
+ %tmp56 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 368)
+ %tmp57 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 372)
+ %tmp58 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 376)
+ %tmp59 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 384)
+ %tmp60 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 388)
+ %tmp61 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 392)
+ %tmp62 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 400)
+ %tmp63 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 404)
+ %tmp64 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 408)
+ %tmp65 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 416)
+ %tmp66 = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 420)
br label %LOOP
LOOP: ; preds = %ENDIF2795, %main_body
@@ -497,7 +497,7 @@ declare float @llvm.minnum.f32(float, float) #1
declare float @llvm.maxnum.f32(float, float) #1
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/smrd.ll b/test/CodeGen/AMDGPU/smrd.ll
index 50f72c670598..3f1e1cacb879 100644
--- a/test/CodeGen/AMDGPU/smrd.ll
+++ b/test/CodeGen/AMDGPU/smrd.ll
@@ -84,34 +84,34 @@ entry:
ret void
}
-; SMRD load using the load.const intrinsic with an immediate offset
+; SMRD load using the load.const.v4i32 intrinsic with an immediate offset
; GCN-LABEL: {{^}}smrd_load_const0:
; SICI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4 ; encoding: [0x04
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x10
-define amdgpu_ps void @smrd_load_const0(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @smrd_load_const0(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 16)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 16)
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
-; SMRD load using the load.const intrinsic with the largest possible immediate
+; SMRD load using the load.const.v4i32 intrinsic with the largest possible immediate
; offset.
; GCN-LABEL: {{^}}smrd_load_const1:
; SICI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
-define amdgpu_ps void @smrd_load_const1(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @smrd_load_const1(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 1020)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1020)
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
-; SMRD load using the load.const intrinsic with an offset greater than the
+; SMRD load using the load.const.v4i32 intrinsic with an offset greater than the
; largets possible immediate.
; immediate offset.
; GCN-LABEL: {{^}}smrd_load_const2:
@@ -119,11 +119,11 @@ main_body:
; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]]
; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x100
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400
-define amdgpu_ps void @smrd_load_const2(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @smrd_load_const2(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 1024)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1024)
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
@@ -134,11 +134,11 @@ main_body:
; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3ffff
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xffffc
-define amdgpu_ps void @smrd_load_const3(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @smrd_load_const3(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 1048572)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1048572)
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
@@ -149,17 +149,17 @@ main_body:
; SIVI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]]
; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x40000
; GCN: s_endpgm
-define amdgpu_ps void @smrd_load_const4(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
+define amdgpu_ps void @smrd_load_const4(<4 x i32> addrspace(2)* inreg %arg, <4 x i32> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 {
main_body:
- %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg, i32 0
- %tmp20 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp
- %tmp21 = call float @llvm.SI.load.const(<16 x i8> %tmp20, i32 1048576)
+ %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %arg, i32 0
+ %tmp20 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp
+ %tmp21 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp20, i32 1048576)
call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0
ret void
}
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/spill-to-smem-m0.ll b/test/CodeGen/AMDGPU/spill-to-smem-m0.ll
new file mode 100644
index 000000000000..c6691e7bb2f8
--- /dev/null
+++ b/test/CodeGen/AMDGPU/spill-to-smem-m0.ll
@@ -0,0 +1,22 @@
+; RUN: llc -O0 -march=amdgcn -mcpu=fiji -amdgpu-spill-sgpr-to-smem=1 -verify-machineinstrs -stop-before=prologepilog < %s
+
+; Spill to SMEM clobbers M0. Check that the implicit-def dead operand is present
+; in the pseudo instructions.
+
+; CHECK-LABEL: {{^}}spill_sgpr:
+; CHECK: SI_SPILL_S32_SAVE {{.*}}, implicit-def dead %m0
+; CHECK: SI_SPILL_S32_RESTORE {{.*}}, implicit-def dead %m0
+define amdgpu_kernel void @spill_sgpr(i32 addrspace(1)* %out, i32 %in) #0 {
+ %sgpr = call i32 asm sideeffect "; def $0", "=s" () #0
+ %cmp = icmp eq i32 %in, 0
+ br i1 %cmp, label %bb0, label %ret
+
+bb0:
+ call void asm sideeffect "; use $0", "s"(i32 %sgpr) #0
+ br label %ret
+
+ret:
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AMDGPU/split-smrd.ll b/test/CodeGen/AMDGPU/split-smrd.ll
index cdb1b1e3b503..5fc69067760a 100644
--- a/test/CodeGen/AMDGPU/split-smrd.ll
+++ b/test/CodeGen/AMDGPU/split-smrd.ll
@@ -8,7 +8,7 @@
; GCN: image_sample v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0x1
define amdgpu_ps void @split_smrd_add_worklist([34 x <8 x i32>] addrspace(2)* byval %arg) #0 {
bb:
- %tmp = call float @llvm.SI.load.const(<16 x i8> undef, i32 96)
+ %tmp = call float @llvm.SI.load.const.v4i32(<4 x i32> undef, i32 96)
%tmp1 = bitcast float %tmp to i32
br i1 undef, label %bb2, label %bb3
@@ -31,7 +31,7 @@ bb3: ; preds = %bb
declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #1
declare void @llvm.amdgcn.exp.compr.v2f16(i32, i32, <2 x half>, <2 x half>, i1, i1) #0
declare <4 x float> @llvm.amdgcn.image.sample.v4f32.v2f32.v8i32(<2 x float>, <8 x i32>, <4 x i32>, i32, i1, i1, i1, i1, i1) #2
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll b/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
index c9c8583d5e87..ca2366a361fb 100644
--- a/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
+++ b/test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll
@@ -27,17 +27,17 @@
; GCN: NumVgprs: 256
; GCN: ScratchSize: 1536
-define amdgpu_vs void @main([9 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <4 x i32>] addrspace(2)* byval %arg2, [34 x <8 x i32>] addrspace(2)* byval %arg3, [16 x <16 x i8>] addrspace(2)* byval %arg4, i32 inreg %arg5, i32 inreg %arg6, i32 %arg7, i32 %arg8, i32 %arg9, i32 %arg10) #0 {
+define amdgpu_vs void @main([9 x <4 x i32>] addrspace(2)* byval %arg, [17 x <4 x i32>] addrspace(2)* byval %arg1, [17 x <4 x i32>] addrspace(2)* byval %arg2, [34 x <8 x i32>] addrspace(2)* byval %arg3, [16 x <4 x i32>] addrspace(2)* byval %arg4, i32 inreg %arg5, i32 inreg %arg6, i32 %arg7, i32 %arg8, i32 %arg9, i32 %arg10) #0 {
bb:
- %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg1, i64 0, i64 0
- %tmp11 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, align 16, !tbaa !0
- %tmp12 = call float @llvm.SI.load.const(<16 x i8> %tmp11, i32 0)
- %tmp13 = call float @llvm.SI.load.const(<16 x i8> %tmp11, i32 16)
- %tmp14 = call float @llvm.SI.load.const(<16 x i8> %tmp11, i32 32)
- %tmp15 = getelementptr [16 x <16 x i8>], [16 x <16 x i8>] addrspace(2)* %arg4, i64 0, i64 0
- %tmp16 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp15, align 16, !tbaa !0
+ %tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(2)* %arg1, i64 0, i64 0
+ %tmp11 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp, align 16, !tbaa !0
+ %tmp12 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp11, i32 0)
+ %tmp13 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp11, i32 16)
+ %tmp14 = call float @llvm.SI.load.const.v4i32(<4 x i32> %tmp11, i32 32)
+ %tmp15 = getelementptr [16 x <4 x i32>], [16 x <4 x i32>] addrspace(2)* %arg4, i64 0, i64 0
+ %tmp16 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp15, align 16, !tbaa !0
%tmp17 = add i32 %arg5, %arg7
- %tmp16.cast = bitcast <16 x i8> %tmp16 to <4 x i32>
+ %tmp16.cast = bitcast <4 x i32> %tmp16 to <4 x i32>
%tmp18 = call <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32> %tmp16.cast, i32 %tmp17, i32 0, i1 false, i1 false)
%tmp19 = extractelement <4 x float> %tmp18, i32 0
%tmp20 = extractelement <4 x float> %tmp18, i32 1
@@ -488,7 +488,7 @@ bb157: ; preds = %bb24
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
-declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+declare float @llvm.SI.load.const.v4i32(<4 x i32>, i32) #1
declare <4 x float> @llvm.amdgcn.buffer.load.format.v4f32(<4 x i32>, i32, i32, i1, i1) #2
attributes #0 = { nounwind }
diff --git a/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll b/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll
index ff3b7e16188e..fefe16747f10 100644
--- a/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll
+++ b/test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll
@@ -24,7 +24,7 @@ entry:
; CHECK-LABEL: caller:
define void @caller() {
-; CHECK: ldm r0, {r1, r2, r3}
+; CHECK: ldm r{{[0-9]+}}, {r1, r2, r3}
call void @t(i32 0, %struct.s* @v);
ret void
}
diff --git a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
index 16642d85d9cf..6a1da0dfe85f 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
@@ -42,6 +42,9 @@
define void @test_constant_imm() { ret void }
define void @test_constant_cimm() { ret void }
+ define void @test_select_s32() { ret void }
+ define void @test_select_ptr() { ret void }
+
define void @test_soft_fp_double() #0 { ret void }
attributes #0 = { "target-features"="+vfp2,-neonfp" }
@@ -1100,6 +1103,76 @@ body: |
BX_RET 14, _, implicit %r0
...
---
+name: test_select_s32
+# CHECK-LABEL: name: test_select_s32
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2
+
+ %0(s32) = COPY %r0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0
+
+ %1(s32) = COPY %r1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %r1
+
+ %2(s1) = COPY %r2
+ ; CHECK: [[VREGC:%[0-9]+]] = COPY %r2
+
+ %3(s32) = G_SELECT %2(s1), %0, %1
+ ; CHECK: CMPri [[VREGC]], 0, 14, _, implicit-def %cpsr
+ ; CHECK: [[RES:%[0-9]+]] = MOVCCr [[VREGX]], [[VREGY]], 0, %cpsr
+
+ %r0 = COPY %3(s32)
+ ; CHECK: %r0 = COPY [[RES]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
+name: test_select_ptr
+# CHECK-LABEL: name: test_select_ptr
+legalized: true
+regBankSelected: true
+selected: false
+# CHECK: selected: true
+registers:
+ - { id: 0, class: gprb }
+ - { id: 1, class: gprb }
+ - { id: 2, class: gprb }
+ - { id: 3, class: gprb }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2
+
+ %0(p0) = COPY %r0
+ ; CHECK: [[VREGX:%[0-9]+]] = COPY %r0
+
+ %1(p0) = COPY %r1
+ ; CHECK: [[VREGY:%[0-9]+]] = COPY %r1
+
+ %2(s1) = COPY %r2
+ ; CHECK: [[VREGC:%[0-9]+]] = COPY %r2
+
+ %3(p0) = G_SELECT %2(s1), %0, %1
+ ; CHECK: CMPri [[VREGC]], 0, 14, _, implicit-def %cpsr
+ ; CHECK: [[RES:%[0-9]+]] = MOVCCr [[VREGX]], [[VREGY]], 0, %cpsr
+
+ %r0 = COPY %3(p0)
+ ; CHECK: %r0 = COPY [[RES]]
+
+ BX_RET 14, _, implicit %r0
+ ; CHECK: BX_RET 14, _, implicit %r0
+...
+---
name: test_soft_fp_double
# CHECK-LABEL: name: test_soft_fp_double
legalized: true
diff --git a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
index 0ff8d52e94c6..f50916e4b474 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll
@@ -910,7 +910,7 @@ define arm_aapcscc {i32, i32} @test_structs({i32, i32} %x) {
define i32 @test_shufflevector_s32_v2s32(i32 %arg) {
; CHECK-LABEL: name: test_shufflevector_s32_v2s32
; CHECK: [[ARG:%[0-9]+]](s32) = COPY %r0
-; CHECK-DAG: [[UNDEF:%[0-9]+]](s32) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](s32) = G_IMPLICIT_DEF
; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[MASK:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32)
; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_SHUFFLE_VECTOR [[ARG]](s32), [[UNDEF]], [[MASK]](<2 x s32>)
@@ -925,7 +925,7 @@ define i32 @test_shufflevector_v2s32_v3s32(i32 %arg1, i32 %arg2) {
; CHECK-LABEL: name: test_shufflevector_v2s32_v3s32
; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %r0
; CHECK: [[ARG2:%[0-9]+]](s32) = COPY %r1
-; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
; CHECK-DAG: [[MASK:%[0-9]+]](<3 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C0]](s32), [[C1]](s32)
@@ -945,7 +945,7 @@ define i32 @test_shufflevector_v2s32_v4s32(i32 %arg1, i32 %arg2) {
; CHECK-LABEL: name: test_shufflevector_v2s32_v4s32
; CHECK: [[ARG1:%[0-9]+]](s32) = COPY %r0
; CHECK: [[ARG2:%[0-9]+]](s32) = COPY %r1
-; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<2 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
; CHECK-DAG: [[MASK:%[0-9]+]](<4 x s32>) = G_MERGE_VALUES [[C0]](s32), [[C0]](s32), [[C0]](s32), [[C0]](s32)
@@ -966,7 +966,7 @@ define i32 @test_shufflevector_v4s32_v2s32(i32 %arg1, i32 %arg2, i32 %arg3, i32
; CHECK: [[ARG2:%[0-9]+]](s32) = COPY %r1
; CHECK: [[ARG3:%[0-9]+]](s32) = COPY %r2
; CHECK: [[ARG4:%[0-9]+]](s32) = COPY %r3
-; CHECK-DAG: [[UNDEF:%[0-9]+]](<4 x s32>) = IMPLICIT_DEF
+; CHECK-DAG: [[UNDEF:%[0-9]+]](<4 x s32>) = G_IMPLICIT_DEF
; CHECK-DAG: [[C0:%[0-9]+]](s32) = G_CONSTANT i32 0
; CHECK-DAG: [[C1:%[0-9]+]](s32) = G_CONSTANT i32 1
; CHECK-DAG: [[C2:%[0-9]+]](s32) = G_CONSTANT i32 2
@@ -1009,7 +1009,7 @@ define i32 @test_constantstruct_v2s32_s32_s32() {
; CHECK: [[VEC:%[0-9]+]](<2 x s32>) = G_MERGE_VALUES [[C1]](s32), [[C2]](s32)
; CHECK: [[C3:%[0-9]+]](s32) = G_CONSTANT i32 3
; CHECK: [[C4:%[0-9]+]](s32) = G_CONSTANT i32 4
-; CHECK: [[C5:%[0-9]+]](s128) = IMPLICIT_DEF
+; CHECK: [[C5:%[0-9]+]](s128) = G_IMPLICIT_DEF
; CHECK: [[C6:%[0-9]+]](s128) = G_INSERT [[C5]], [[VEC]](<2 x s32>), 0
; CHECK: [[C7:%[0-9]+]](s128) = G_INSERT [[C6]], [[C3]](s32), 64
; CHECK: [[C8:%[0-9]+]](s128) = G_INSERT [[C7]], [[C4]](s32), 96
diff --git a/test/CodeGen/ARM/GlobalISel/arm-isel.ll b/test/CodeGen/ARM/GlobalISel/arm-isel.ll
index 76fb39ecea01..4c498ff6ca9b 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-isel.ll
+++ b/test/CodeGen/ARM/GlobalISel/arm-isel.ll
@@ -400,3 +400,23 @@ entry:
%r = zext i1 %v to i32
ret i32 %r
}
+
+define arm_aapcscc i32 @test_select_i32(i32 %a, i32 %b, i1 %cond) {
+; CHECK-LABEL: test_select_i32
+; CHECK: cmp r2, #0
+; CHECK: moveq r0, r1
+; CHECK: bx lr
+entry:
+ %r = select i1 %cond, i32 %a, i32 %b
+ ret i32 %r
+}
+
+define arm_aapcscc i32* @test_select_ptr(i32* %a, i32* %b, i1 %cond) {
+; CHECK-LABEL: test_select_ptr
+; CHECK: cmp r2, #0
+; CHECK: moveq r0, r1
+; CHECK: bx lr
+entry:
+ %r = select i1 %cond, i32* %a, i32* %b
+ ret i32* %r
+}
diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
index 2def31eb1592..bf759728c365 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-legalizer.mir
@@ -39,6 +39,9 @@
define void @test_icmp_s16() { ret void }
define void @test_icmp_s32() { ret void }
+ define void @test_select_s32() { ret void }
+ define void @test_select_ptr() { ret void }
+
define void @test_fadd_s32() #0 { ret void }
define void @test_fadd_s64() #0 { ret void }
@@ -775,6 +778,58 @@ body: |
BX_RET 14, _, implicit %r0
...
---
+name: test_select_s32
+# CHECK-LABEL: name: test_select_s32
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s1) = COPY %r2
+ %3(s32) = G_SELECT %2(s1), %0, %1
+ ; G_SELECT with s32 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}(s32) = G_SELECT {{%[0-9]+}}(s1), {{%[0-9]+}}, {{%[0-9]+}}
+ %r0 = COPY %3(s32)
+ BX_RET 14, _, implicit %r0
+...
+---
+name: test_select_ptr
+# CHECK-LABEL: name: test_select_ptr
+legalized: false
+# CHECK: legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2
+
+ %0(p0) = COPY %r0
+ %1(p0) = COPY %r1
+ %2(s1) = COPY %r2
+ %3(p0) = G_SELECT %2(s1), %0, %1
+ ; G_SELECT with p0 is legal, so we should find it unchanged in the output
+ ; CHECK: {{%[0-9]+}}(p0) = G_SELECT {{%[0-9]+}}(s1), {{%[0-9]+}}, {{%[0-9]+}}
+ %r0 = COPY %3(p0)
+ BX_RET 14, _, implicit %r0
+...
+---
name: test_fadd_s32
# CHECK-LABEL: name: test_fadd_s32
legalized: false
diff --git a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
index d97dd60bac22..d3b93e488ef4 100644
--- a/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
+++ b/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
@@ -36,6 +36,8 @@
define void @test_icmp_eq_s32() { ret void }
+ define void @test_select_s32() { ret void }
+
define void @test_fadd_s32() #0 { ret void }
define void @test_fadd_s64() #0 { ret void }
@@ -741,6 +743,35 @@ body: |
...
---
+name: test_select_s32
+# CHECK-LABEL: name: test_select_s32
+legalized: true
+regBankSelected: false
+selected: false
+# CHECK: registers:
+# CHECK: - { id: 0, class: gprb, preferred-register: '' }
+# CHECK: - { id: 1, class: gprb, preferred-register: '' }
+# CHECK: - { id: 2, class: gprb, preferred-register: '' }
+# CHECK: - { id: 3, class: gprb, preferred-register: '' }
+
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+ - { id: 3, class: _ }
+body: |
+ bb.0:
+ liveins: %r0, %r1, %r2
+
+ %0(s32) = COPY %r0
+ %1(s32) = COPY %r1
+ %2(s1) = COPY %r2
+ %3(s32) = G_SELECT %2(s1), %0, %1
+ %r0 = COPY %3(s32)
+ BX_RET 14, _, implicit %r0
+
+...
+---
name: test_fadd_s32
# CHECK-LABEL: name: test_fadd_s32
legalized: true
diff --git a/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll b/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll
index d303e9da8604..a73a7cf8414f 100644
--- a/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll
+++ b/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll
@@ -19,9 +19,9 @@ entry:
; CHECK-LABEL: isel
; CHECK: push {r4, r5}
-; CHECK: movw r4, #{{\d*}}
; CHECK: movw r12, #0
; CHECK: movt r12, #0
+; CHECK: movw r4, #{{\d*}}
; CHECK: blx r12
; CHECK: sub.w sp, sp, r4
diff --git a/test/CodeGen/ARM/Windows/no-arm-mode.ll b/test/CodeGen/ARM/Windows/no-arm-mode.ll
deleted file mode 100644
index 30353640a4cc..000000000000
--- a/test/CodeGen/ARM/Windows/no-arm-mode.ll
+++ /dev/null
@@ -1,10 +0,0 @@
-; RUN: not llc -mtriple=armv7-windows-itanium -mcpu=cortex-a9 -o /dev/null %s 2>&1 \
-; RUN: | FileCheck %s -check-prefix CHECK-WIN
-
-; RUN: not llc -mtriple=armv7-windows-gnu -mcpu=cortex-a9 -o /dev/null %s 2>&1 \
-; RUN: | FileCheck %s -check-prefix CHECK-GNU
-
-; CHECK-WIN: does not support ARM mode execution
-
-; CHECK-GNU: does not support ARM mode execution
-
diff --git a/test/CodeGen/ARM/Windows/tls.ll b/test/CodeGen/ARM/Windows/tls.ll
index 947e29dfa65c..2c38ad3e58f7 100644
--- a/test/CodeGen/ARM/Windows/tls.ll
+++ b/test/CodeGen/ARM/Windows/tls.ll
@@ -15,11 +15,11 @@ define i32 @f() {
; CHECK: mrc p15, #0, [[TEB:r[0-9]]], c13, c0, #2
+; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK: movw [[TLS_INDEX:r[0-9]]], :lower16:_tls_index
; CHECK-NEXT: movt [[TLS_INDEX]], :upper16:_tls_index
; CHECK-NEXT: ldr [[INDEX:r[0-9]]], {{\[}}[[TLS_INDEX]]]
-; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK-NEXT: ldr{{.w}} [[TLS:r[0-9]]], {{\[}}[[TLS_POINTER]], [[INDEX]], lsl #2]
; CHECK-NEXT: ldr [[SLOT:r[0-9]]], [[CPI:\.LCPI[0-9]+_[0-9]+]]
@@ -36,11 +36,11 @@ define i32 @e() {
; CHECK: mrc p15, #0, [[TEB:r[0-9]]], c13, c0, #2
+; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK: movw [[TLS_INDEX:r[0-9]]], :lower16:_tls_index
; CHECK-NEXT: movt [[TLS_INDEX]], :upper16:_tls_index
; CHECK-NEXT: ldr [[INDEX:r[0-9]]], {{\[}}[[TLS_INDEX]]]
-; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK-NEXT: ldr{{.w}} [[TLS:r[0-9]]], {{\[}}[[TLS_POINTER]], [[INDEX]], lsl #2]
; CHECK-NEXT: ldr [[SLOT:r[0-9]]], [[CPI:\.LCPI[0-9]+_[0-9]+]]
@@ -57,11 +57,11 @@ define i32 @d() {
; CHECK: mrc p15, #0, [[TEB:r[0-9]]], c13, c0, #2
+; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK: movw [[TLS_INDEX:r[0-9]]], :lower16:_tls_index
; CHECK-NEXT: movt [[TLS_INDEX]], :upper16:_tls_index
; CHECK-NEXT: ldr [[INDEX:r[0-9]]], {{\[}}[[TLS_INDEX]]]
-; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK-NEXT: ldr{{.w}} [[TLS:r[0-9]]], {{\[}}[[TLS_POINTER]], [[INDEX]], lsl #2]
; CHECK-NEXT: ldr [[SLOT:r[0-9]]], [[CPI:\.LCPI[0-9]+_[0-9]+]]
@@ -78,11 +78,11 @@ define i32 @c() {
; CHECK: mrc p15, #0, [[TEB:r[0-9]]], c13, c0, #2
+; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK: movw [[TLS_INDEX:r[0-9]]], :lower16:_tls_index
; CHECK-NEXT: movt [[TLS_INDEX]], :upper16:_tls_index
; CHECK-NEXT: ldr [[INDEX:r[0-9]]], {{\[}}[[TLS_INDEX]]]
-; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK-NEXT: ldr{{.w}} [[TLS:r[0-9]]], {{\[}}[[TLS_POINTER]], [[INDEX]], lsl #2]
; CHECK-NEXT: ldr [[SLOT:r[0-9]]], [[CPI:\.LCPI[0-9]+_[0-9]+]]
@@ -99,11 +99,11 @@ define i32 @b() {
; CHECK: mrc p15, #0, [[TEB:r[0-9]]], c13, c0, #2
+; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK: movw [[TLS_INDEX:r[0-9]]], :lower16:_tls_index
; CHECK-NEXT: movt [[TLS_INDEX]], :upper16:_tls_index
; CHECK-NEXT: ldr [[INDEX:r[0-9]]], {{\[}}[[TLS_INDEX]]]
-; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK-NEXT: ldr{{.w}} [[TLS:r[0-9]]], {{\[}}[[TLS_POINTER]], [[INDEX]], lsl #2]
; CHECK-NEXT: ldr [[SLOT:r[0-9]]], [[CPI:\.LCPI[0-9]+_[0-9]+]]
@@ -120,11 +120,11 @@ define i16 @a() {
; CHECK: mrc p15, #0, [[TEB:r[0-9]]], c13, c0, #2
+; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK: movw [[TLS_INDEX:r[0-9]]], :lower16:_tls_index
; CHECK-NEXT: movt [[TLS_INDEX]], :upper16:_tls_index
; CHECK-NEXT: ldr [[INDEX:r[0-9]]], {{\[}}[[TLS_INDEX]]]
-; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK-NEXT: ldr{{.w}} [[TLS:r[0-9]]], {{\[}}[[TLS_POINTER]], [[INDEX]], lsl #2]
; CHECK-NEXT: ldr [[SLOT:r[0-9]]], [[CPI:\.LCPI[0-9]+_[0-9]+]]
@@ -141,11 +141,11 @@ define i8 @Z() {
; CHECK: mrc p15, #0, [[TEB:r[0-9]]], c13, c0, #2
+; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK: movw [[TLS_INDEX:r[0-9]]], :lower16:_tls_index
; CHECK-NEXT: movt [[TLS_INDEX]], :upper16:_tls_index
; CHECK-NEXT: ldr [[INDEX:r[0-9]]], {{\[}}[[TLS_INDEX]]]
-; CHECK: ldr [[TLS_POINTER:r[0-9]]], {{\[}}[[TEB]], #44]
; CHECK-NEXT: ldr{{.w}} [[TLS:r[0-9]]], {{\[}}[[TLS_POINTER]], [[INDEX]], lsl #2]
; CHECK-NEXT: ldr [[SLOT:r[0-9]]], [[CPI:\.LCPI[0-9]+_[0-9]+]]
diff --git a/test/CodeGen/ARM/alloca.ll b/test/CodeGen/ARM/alloca.ll
index 4a0835a2c0ca..82b6b11ea4b2 100644
--- a/test/CodeGen/ARM/alloca.ll
+++ b/test/CodeGen/ARM/alloca.ll
@@ -2,11 +2,11 @@
define void @f(i32 %a) {
entry:
-; CHECK: add r11, sp, #4
+; CHECK: add r11, sp, #8
%tmp = alloca i8, i32 %a ; <i8*> [#uses=1]
call void @g( i8* %tmp, i32 %a, i32 1, i32 2, i32 3 )
ret void
-; CHECK: sub sp, r11, #4
+; CHECK: sub sp, r11, #8
}
declare void @g(i8*, i32, i32, i32, i32)
diff --git a/test/CodeGen/ARM/arg-copy-elide.ll b/test/CodeGen/ARM/arg-copy-elide.ll
index 739b560b0833..625b57073406 100644
--- a/test/CodeGen/ARM/arg-copy-elide.ll
+++ b/test/CodeGen/ARM/arg-copy-elide.ll
@@ -31,8 +31,8 @@ entry:
; CHECK-LABEL: use_arg:
; CHECK: push {[[csr:[^ ]*]], lr}
-; CHECK: ldr [[csr]], [sp, #8]
; CHECK: add r0, sp, #8
+; CHECK: ldr [[csr]], [sp, #8]
; CHECK: bl addrof_i32
; CHECK: mov r0, [[csr]]
; CHECK: pop {[[csr]], pc}
@@ -50,8 +50,8 @@ entry:
; CHECK: push {r4, r5, r11, lr}
; CHECK: sub sp, sp, #8
; CHECK: ldr r4, [sp, #28]
-; CHECK: ldr r5, [sp, #24]
; CHECK: mov r0, sp
+; CHECK: ldr r5, [sp, #24]
; CHECK: str r4, [sp, #4]
; CHECK: str r5, [sp]
; CHECK: bl addrof_i64
diff --git a/test/CodeGen/ARM/arm-abi-attr.ll b/test/CodeGen/ARM/arm-abi-attr.ll
index 61cb6cefa170..f05e6e788d6f 100644
--- a/test/CodeGen/ARM/arm-abi-attr.ll
+++ b/test/CodeGen/ARM/arm-abi-attr.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=arm-linux-gnu < %s | FileCheck %s --check-prefix=APCS
+; RUN: llc -mtriple=arm-linux-gnu < %s | FileCheck %s --check-prefix=AAPCS
; RUN: llc -mtriple=arm-linux-gnu -target-abi=apcs < %s | \
; RUN: FileCheck %s --check-prefix=APCS
; RUN: llc -mtriple=arm-linux-gnueabi -target-abi=apcs < %s | \
diff --git a/test/CodeGen/ARM/arm-and-tst-peephole.ll b/test/CodeGen/ARM/arm-and-tst-peephole.ll
index 31691e9468c9..af05392c98a5 100644
--- a/test/CodeGen/ARM/arm-and-tst-peephole.ll
+++ b/test/CodeGen/ARM/arm-and-tst-peephole.ll
@@ -171,8 +171,8 @@ define i32 @test_tst_assessment(i32 %a, i32 %b) {
;
; V8-LABEL: test_tst_assessment:
; V8: @ BB#0:
-; V8-NEXT: lsls r1, r1, #31
; V8-NEXT: and r0, r0, #1
+; V8-NEXT: lsls r1, r1, #31
; V8-NEXT: it ne
; V8-NEXT: subne r0, #1
; V8-NEXT: bx lr
diff --git a/test/CodeGen/ARM/arm-position-independence-jump-table.ll b/test/CodeGen/ARM/arm-position-independence-jump-table.ll
index 790b4f41776e..afc2d38be18c 100644
--- a/test/CodeGen/ARM/arm-position-independence-jump-table.ll
+++ b/test/CodeGen/ARM/arm-position-independence-jump-table.ll
@@ -47,8 +47,8 @@ lab4:
; CHECK-LABEL: jump_table:
-; ARM: lsl r[[R_TAB_IDX:[0-9]+]], r{{[0-9]+}}, #2
; ARM: adr r[[R_TAB_BASE:[0-9]+]], [[LJTI:\.LJTI[0-9]+_[0-9]+]]
+; ARM: lsl r[[R_TAB_IDX:[0-9]+]], r{{[0-9]+}}, #2
; ARM_ABS: ldr pc, [r[[R_TAB_IDX]], r[[R_TAB_BASE]]]
; ARM_PC: ldr r[[R_OFFSET:[0-9]+]], [r[[R_TAB_IDX]], r[[R_TAB_BASE]]]
; ARM_PC: add pc, r[[R_OFFSET]], r[[R_TAB_BASE]]
diff --git a/test/CodeGen/ARM/arm-shrink-wrapping-linux.ll b/test/CodeGen/ARM/arm-shrink-wrapping-linux.ll
index 1434f40137b5..7007018dd0b2 100644
--- a/test/CodeGen/ARM/arm-shrink-wrapping-linux.ll
+++ b/test/CodeGen/ARM/arm-shrink-wrapping-linux.ll
@@ -45,15 +45,19 @@ target triple = "armv7--linux-gnueabi"
; CHECK: @ %while.cond2
; CHECK: add
; CHECK-NEXT: cmp r{{[0-1]+}}, #1
-; Set the return value.
-; CHECK-NEXT: moveq r0,
-; CHECK-NEXT: popeq
+; Jump to the return block
+; CHECK-NEXT: beq [[RETURN_BLOCK:[.a-zA-Z0-9_]+]]
;
; Use the back edge to check we get the label of the loop right.
; This is to make sure we check the right loop pattern.
; CHECK: @ %while.body24.land.rhs14_crit_edge
; CHECK: cmp r{{[0-9]+}}, #192
; CHECK-NEXT bhs [[LOOP_HEADER]]
+;
+; CHECK: [[RETURN_BLOCK]]:
+; Set the return value.
+; CHECK-NEXT: mov r0,
+; CHECK-NEXT: pop
define fastcc i8* @wrongUseOfPostDominate(i8* readonly %s, i32 %off, i8* readnone %lim) {
entry:
%cmp = icmp sgt i32 %off, -1
diff --git a/test/CodeGen/ARM/atomic-cmpxchg.ll b/test/CodeGen/ARM/atomic-cmpxchg.ll
index e026bae361e1..a136e44fc196 100644
--- a/test/CodeGen/ARM/atomic-cmpxchg.ll
+++ b/test/CodeGen/ARM/atomic-cmpxchg.ll
@@ -70,8 +70,8 @@ entry:
; CHECK-ARMV7-NEXT: ldrexb [[SUCCESS]], [r0]
; CHECK-ARMV7-NEXT: cmp [[SUCCESS]], r1
; CHECK-ARMV7-NEXT: beq [[HEAD]]
-; CHECK-ARMV7-NEXT: clrex
; CHECK-ARMV7-NEXT: mov r0, #0
+; CHECK-ARMV7-NEXT: clrex
; CHECK-ARMV7-NEXT: bx lr
; CHECK-THUMBV7-LABEL: test_cmpxchg_res_i8:
@@ -88,6 +88,6 @@ entry:
; CHECK-THUMBV7-NEXT: ldrexb [[LD:r[0-9]+]], [r0]
; CHECK-THUMBV7-NEXT: cmp [[LD]], [[DESIRED]]
; CHECK-THUMBV7-NEXT: beq [[TRYST:.LBB[0-9_]+]]
-; CHECK-THUMBV7-NEXT: clrex
; CHECK-THUMBV7-NEXT: movs r0, #0
+; CHECK-THUMBV7-NEXT: clrex
; CHECK-THUMBV7-NEXT: bx lr
diff --git a/test/CodeGen/ARM/bool-ext-inc.ll b/test/CodeGen/ARM/bool-ext-inc.ll
index 5f2ba8b109a7..ca9c9ab079db 100644
--- a/test/CodeGen/ARM/bool-ext-inc.ll
+++ b/test/CodeGen/ARM/bool-ext-inc.ll
@@ -16,8 +16,8 @@ define <4 x i32> @sext_inc_vec(<4 x i1> %x) {
; CHECK: @ BB#0:
; CHECK-NEXT: vmov.i16 d16, #0x1
; CHECK-NEXT: vmov d17, r0, r1
-; CHECK-NEXT: vmov.i32 q9, #0x1
; CHECK-NEXT: veor d16, d17, d16
+; CHECK-NEXT: vmov.i32 q9, #0x1
; CHECK-NEXT: vmovl.u16 q8, d16
; CHECK-NEXT: vand q8, q8, q9
; CHECK-NEXT: vmov r0, r1, d16
@@ -31,13 +31,13 @@ define <4 x i32> @sext_inc_vec(<4 x i1> %x) {
define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmpgt_sext_inc_vec:
; CHECK: @ BB#0:
-; CHECK-NEXT: mov r12, sp
-; CHECK-NEXT: vmov d19, r2, r3
-; CHECK-NEXT: vmov.i32 q10, #0x1
-; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: vcge.s32 q8, q8, q9
-; CHECK-NEXT: vand q8, q8, q10
+; CHECK-NEXT: vmov d17, r2, r3
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: mov r0, sp
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vcge.s32 q8, q9, q8
+; CHECK-NEXT: vmov.i32 q9, #0x1
+; CHECK-NEXT: vand q8, q8, q9
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
@@ -50,13 +50,13 @@ define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @cmpne_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmpne_sext_inc_vec:
; CHECK: @ BB#0:
+; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: mov r12, sp
-; CHECK-NEXT: vmov d19, r2, r3
-; CHECK-NEXT: vmov.i32 q10, #0x1
-; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
-; CHECK-NEXT: vmov d18, r0, r1
-; CHECK-NEXT: vceq.i32 q8, q9, q8
-; CHECK-NEXT: vand q8, q8, q10
+; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vceq.i32 q8, q8, q9
+; CHECK-NEXT: vmov.i32 q9, #0x1
+; CHECK-NEXT: vand q8, q8, q9
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: vmov r2, r3, d17
; CHECK-NEXT: mov pc, lr
diff --git a/test/CodeGen/ARM/cmpxchg-O0-be.ll b/test/CodeGen/ARM/cmpxchg-O0-be.ll
new file mode 100644
index 000000000000..9e9a93e19b6a
--- /dev/null
+++ b/test/CodeGen/ARM/cmpxchg-O0-be.ll
@@ -0,0 +1,26 @@
+; RUN: llc -verify-machineinstrs -mtriple=armebv8-linux-gnueabi -O0 %s -o - | FileCheck %s
+
+@x = global i64 10, align 8
+@y = global i64 20, align 8
+@z = global i64 20, align 8
+
+; CHECK_LABEL: main:
+; CHECK: ldr [[R2:r[0-9]+]], {{\[}}[[R1:r[0-9]+]]{{\]}}
+; CHECK-NEXT: ldr [[R1]], {{\[}}[[R1]], #4]
+; CHECK: mov [[R4:r[0-9]+]], [[R2]]
+; CHECK-NEXT: mov [[R5:r[0-9]+]], [[R1]]
+; CHECK: ldr [[R2]], {{\[}}[[R1]]{{\]}}
+; CHECK-NEXT: ldr [[R1]], {{\[}}[[R1]], #4]
+; CHECK: mov [[R6:r[0-9]+]], [[R2]]
+; CHECK-NEXT: mov [[R7:r[0-9]+]], [[R1]]
+
+define arm_aapcs_vfpcc i32 @main() #0 {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval, align 4
+ %0 = load i64, i64* @z, align 8
+ %1 = load i64, i64* @x, align 8
+ %2 = cmpxchg i64* @y, i64 %0, i64 %1 seq_cst seq_cst
+ %3 = extractvalue { i64, i1 } %2, 1
+ ret i32 0
+}
diff --git a/test/CodeGen/ARM/cmpxchg-weak.ll b/test/CodeGen/ARM/cmpxchg-weak.ll
index 0d5681aafbcb..29d97fef0606 100644
--- a/test/CodeGen/ARM/cmpxchg-weak.ll
+++ b/test/CodeGen/ARM/cmpxchg-weak.ll
@@ -47,12 +47,12 @@ define i1 @test_cmpxchg_weak_to_bool(i32, i32 *%addr, i32 %desired, i32 %new) {
; CHECK-NEXT: strex [[SUCCESS:r[0-9]+]], r3, [r1]
; CHECK-NEXT: cmp [[SUCCESS]], #0
; CHECK-NEXT: bxne lr
-; CHECK-NEXT: dmb ish
; CHECK-NEXT: mov r0, #1
+; CHECK-NEXT: dmb ish
; CHECK-NEXT: bx lr
; CHECK-NEXT: [[LDFAILBB]]:
-; CHECK-NEXT: clrex
; CHECK-NEXT: mov r0, #0
+; CHECK-NEXT: clrex
; CHECK-NEXT: bx lr
ret i1 %success
diff --git a/test/CodeGen/ARM/code-placement.ll b/test/CodeGen/ARM/code-placement.ll
index b9d90249e9f6..b381aecc69a6 100644
--- a/test/CodeGen/ARM/code-placement.ll
+++ b/test/CodeGen/ARM/code-placement.ll
@@ -38,9 +38,8 @@ entry:
br i1 %0, label %bb5, label %bb.nph15
bb1: ; preds = %bb2.preheader, %bb1
-; CHECK: LBB1_[[BB3:.]]: @ %bb3
; CHECK: LBB1_[[PREHDR:.]]: @ %bb2.preheader
-; CHECK: blt LBB1_[[BB3]]
+; CHECK: blt LBB1_[[BB3:.]]
%indvar = phi i32 [ %indvar.next, %bb1 ], [ 0, %bb2.preheader ] ; <i32> [#uses=2]
%sum.08 = phi i32 [ %2, %bb1 ], [ %sum.110, %bb2.preheader ] ; <i32> [#uses=1]
%tmp17 = sub i32 %i.07, %indvar ; <i32> [#uses=1]
@@ -54,7 +53,7 @@ bb1: ; preds = %bb2.preheader, %bb1
bb3: ; preds = %bb1, %bb2.preheader
; CHECK: LBB1_[[BB1:.]]: @ %bb1
; CHECK: bne LBB1_[[BB1]]
-; CHECK: b LBB1_[[BB3]]
+; CHECK: LBB1_[[BB3]]: @ %bb3
%sum.0.lcssa = phi i32 [ %sum.110, %bb2.preheader ], [ %2, %bb1 ] ; <i32> [#uses=2]
%3 = add i32 %pass.011, 1 ; <i32> [#uses=2]
%exitcond18 = icmp eq i32 %3, %passes ; <i1> [#uses=1]
diff --git a/test/CodeGen/ARM/constantfp.ll b/test/CodeGen/ARM/constantfp.ll
index 0b431f47f50b..f825061d1169 100644
--- a/test/CodeGen/ARM/constantfp.ll
+++ b/test/CodeGen/ARM/constantfp.ll
@@ -5,25 +5,25 @@
; RUN: llc -mtriple=thumbv7m -mcpu=cortex-m4 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-NO-XO %s
-; RUN: llc -mtriple=thumbv7m -arm-execute-only -mcpu=cortex-m4 %s -o - \
+; RUN: llc -mtriple=thumbv7m -mattr=+execute-only -mcpu=cortex-m4 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-XO-FLOAT --check-prefix=CHECK-XO-DOUBLE %s
-; RUN: llc -mtriple=thumbv7meb -arm-execute-only -mcpu=cortex-m4 %s -o - \
+; RUN: llc -mtriple=thumbv7meb -mattr=+execute-only -mcpu=cortex-m4 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-XO-FLOAT --check-prefix=CHECK-XO-DOUBLE-BE %s
-; RUN: llc -mtriple=thumbv7m -arm-execute-only -mcpu=cortex-m4 -relocation-model=ropi %s -o - \
+; RUN: llc -mtriple=thumbv7m -mattr=+execute-only -mcpu=cortex-m4 -relocation-model=ropi %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-XO-ROPI %s
; RUN: llc -mtriple=thumbv8m.main -mattr=fp-armv8 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-NO-XO %s
-; RUN: llc -mtriple=thumbv8m.main -arm-execute-only -mattr=fp-armv8 %s -o - \
+; RUN: llc -mtriple=thumbv8m.main -mattr=+execute-only -mattr=fp-armv8 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-XO-FLOAT --check-prefix=CHECK-XO-DOUBLE %s
-; RUN: llc -mtriple=thumbv8m.maineb -arm-execute-only -mattr=fp-armv8 %s -o - \
+; RUN: llc -mtriple=thumbv8m.maineb -mattr=+execute-only -mattr=fp-armv8 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-XO-FLOAT --check-prefix=CHECK-XO-DOUBLE-BE %s
-; RUN: llc -mtriple=thumbv8m.main -arm-execute-only -mattr=fp-armv8 -relocation-model=ropi %s -o - \
+; RUN: llc -mtriple=thumbv8m.main -mattr=+execute-only -mattr=fp-armv8 -relocation-model=ropi %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-XO-ROPI %s
define arm_aapcs_vfpcc float @test_vmov_f32() {
diff --git a/test/CodeGen/ARM/cortex-a57-misched-basic.ll b/test/CodeGen/ARM/cortex-a57-misched-basic.ll
index 2ec50b9d3343..cfbef7bd4293 100644
--- a/test/CodeGen/ARM/cortex-a57-misched-basic.ll
+++ b/test/CodeGen/ARM/cortex-a57-misched-basic.ll
@@ -8,14 +8,14 @@
; CHECK: ********** MI Scheduling **********
; CHECK: foo:BB#0 entry
-; GENERIC: SDIV
+; GENERIC: LDRi12
; GENERIC: Latency : 1
; GENERIC: EORrr
; GENERIC: Latency : 1
-; GENERIC: LDRi12
-; GENERIC: Latency : 4
; GENERIC: ADDrr
; GENERIC: Latency : 1
+; GENERIC: SDIV
+; GENERIC: Latency : 0
; GENERIC: SUBrr
; GENERIC: Latency : 1
diff --git a/test/CodeGen/ARM/cortexr52-misched-basic.ll b/test/CodeGen/ARM/cortexr52-misched-basic.ll
index eb2c29a3a5d1..614157eb0e10 100644
--- a/test/CodeGen/ARM/cortexr52-misched-basic.ll
+++ b/test/CodeGen/ARM/cortexr52-misched-basic.ll
@@ -12,10 +12,10 @@
; GENERIC: Latency : 1
; R52_SCHED: Latency : 3
; CHECK: MLA
-; GENERIC: Latency : 1
+; GENERIC: Latency : 2
; R52_SCHED: Latency : 4
; CHECK: SDIV
-; GENERIC: Latency : 1
+; GENERIC: Latency : 0
; R52_SCHED: Latency : 8
; CHECK: ** Final schedule for BB#0 ***
; GENERIC: EORrr
diff --git a/test/CodeGen/ARM/ctor_order.ll b/test/CodeGen/ARM/ctor_order.ll
index 7fcc8cba0c8f..0cf87d7a97b7 100644
--- a/test/CodeGen/ARM/ctor_order.ll
+++ b/test/CodeGen/ARM/ctor_order.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=DARWIN
; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=dynamic-no-pic | FileCheck %s --check-prefix=DARWIN
; RUN: llc < %s -mtriple=arm-apple-darwin -relocation-model=static | FileCheck %s -check-prefix=DARWIN-STATIC
-; RUN: llc < %s -mtriple=arm-linux-gnu | FileCheck %s -check-prefix=ELF
+; RUN: llc < %s -mtriple=arm-linux-gnu -target-abi=apcs | FileCheck %s -check-prefix=ELF
; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s -check-prefix=GNUEABI
; DARWIN: .section __DATA,__mod_init_func,mod_init_funcs
diff --git a/test/CodeGen/ARM/ctors_dtors.ll b/test/CodeGen/ARM/ctors_dtors.ll
index fb94626ab7dd..c097ade3c846 100644
--- a/test/CodeGen/ARM/ctors_dtors.ll
+++ b/test/CodeGen/ARM/ctors_dtors.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -mtriple=arm-apple-darwin | FileCheck %s -check-prefix=DARWIN
-; RUN: llc < %s -mtriple=arm-linux-gnu | FileCheck %s -check-prefix=ELF
+; RUN: llc < %s -mtriple=arm-linux-gnu -target-abi=apcs | FileCheck %s -check-prefix=ELF
; RUN: llc < %s -mtriple=arm-linux-gnueabi | FileCheck %s -check-prefix=GNUEABI
; DARWIN: .section __DATA,__mod_init_func,mod_init_funcs
diff --git a/test/CodeGen/ARM/cttz.ll b/test/CodeGen/ARM/cttz.ll
index dacfca505931..cba7be583310 100644
--- a/test/CodeGen/ARM/cttz.ll
+++ b/test/CodeGen/ARM/cttz.ll
@@ -40,8 +40,8 @@ define i64 @test_i64(i64 %a) {
; CHECK-LABEL: test_i64:
; CHECK: rbit
; CHECK: rbit
-; CHECK: cmp
; CHECK: clz
+; CHECK: cmp
; CHECK: add
; CHECK: clzne
%tmp = call i64 @llvm.cttz.i64(i64 %a, i1 false)
@@ -81,8 +81,8 @@ define i64 @test_i64_zero_undef(i64 %a) {
; CHECK-LABEL: test_i64_zero_undef:
; CHECK: rbit
; CHECK: rbit
-; CHECK: cmp
; CHECK: clz
+; CHECK: cmp
; CHECK: add
; CHECK: clzne
%tmp = call i64 @llvm.cttz.i64(i64 %a, i1 true)
diff --git a/test/CodeGen/ARM/cttz_vector.ll b/test/CodeGen/ARM/cttz_vector.ll
index 9480d75db47a..bed644980415 100644
--- a/test/CodeGen/ARM/cttz_vector.ll
+++ b/test/CodeGen/ARM/cttz_vector.ll
@@ -168,17 +168,17 @@ define void @test_v4i32(<4 x i32>* %p) {
define void @test_v1i64(<1 x i64>* %p) {
; CHECK-LABEL: test_v1i64:
-; CHECK: vldr [[D1:d[0-9]+]], [r0]
; CHECK: vmov.i32 [[D2:d[0-9]+]], #0x0
+; CHECK: vldr [[D1:d[0-9]+]], [r0]
; CHECK: vmov.i64 [[D3:d[0-9]+]], #0xffffffffffffffff
; CHECK: vsub.i64 [[D2]], [[D2]], [[D1]]
-; CHECK: vand [[D1]], [[D1]], [[D2]]
-; CHECK: vadd.i64 [[D1]], [[D1]], [[D3]]
-; CHECK: vcnt.8 [[D1]], [[D1]]
-; CHECK: vpaddl.u8 [[D1]], [[D1]]
-; CHECK: vpaddl.u16 [[D1]], [[D1]]
-; CHECK: vpaddl.u32 [[D1]], [[D1]]
-; CHECK: vstr [[D1]], [r0]
+; CHECK: vand [[D2]], [[D1]], [[D2]]
+; CHECK: vadd.i64 [[D2]], [[D2]], [[D3]]
+; CHECK: vcnt.8 [[D2]], [[D2]]
+; CHECK: vpaddl.u8 [[D2]], [[D2]]
+; CHECK: vpaddl.u16 [[D2]], [[D2]]
+; CHECK: vpaddl.u32 [[D2]], [[D2]]
+; CHECK: vstr [[D2]], [r0]
%a = load <1 x i64>, <1 x i64>* %p
%tmp = call <1 x i64> @llvm.cttz.v1i64(<1 x i64> %a, i1 false)
store <1 x i64> %tmp, <1 x i64>* %p
@@ -187,17 +187,17 @@ define void @test_v1i64(<1 x i64>* %p) {
define void @test_v2i64(<2 x i64>* %p) {
; CHECK-LABEL: test_v2i64:
-; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
; CHECK: vmov.i32 [[Q2:q[0-9]+]], #0x0
+; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
; CHECK: vmov.i64 [[Q3:q[0-9]+]], #0xffffffffffffffff
; CHECK: vsub.i64 [[Q2]], [[Q2]], [[Q1:q[0-9]+]]
-; CHECK: vand [[Q1]], [[Q1]], [[Q2]]
-; CHECK: vadd.i64 [[Q1]], [[Q1]], [[Q3]]
-; CHECK: vcnt.8 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u8 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u16 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u32 [[Q1]], [[Q1]]
-; CHECK: vst1.64 {[[D1]], [[D2]]}, [r0]
+; CHECK: vand [[Q2]], [[Q1]], [[Q2]]
+; CHECK: vadd.i64 [[Q2]], [[Q2]], [[Q3]]
+; CHECK: vcnt.8 [[Q2]], [[Q2]]
+; CHECK: vpaddl.u8 [[Q2]], [[Q2]]
+; CHECK: vpaddl.u16 [[Q2]], [[Q2]]
+; CHECK: vpaddl.u32 [[Q2]], [[Q2]]
+; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
%a = load <2 x i64>, <2 x i64>* %p
%tmp = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a, i1 false)
store <2 x i64> %tmp, <2 x i64>* %p
@@ -346,17 +346,17 @@ define void @test_v4i32_zero_undef(<4 x i32>* %p) {
define void @test_v1i64_zero_undef(<1 x i64>* %p) {
; CHECK-LABEL: test_v1i64_zero_undef:
-; CHECK: vldr [[D1:d[0-9]+]], [r0]
; CHECK: vmov.i32 [[D2:d[0-9]+]], #0x0
+; CHECK: vldr [[D1:d[0-9]+]], [r0]
; CHECK: vmov.i64 [[D3:d[0-9]+]], #0xffffffffffffffff
; CHECK: vsub.i64 [[D2]], [[D2]], [[D1]]
-; CHECK: vand [[D1]], [[D1]], [[D2]]
-; CHECK: vadd.i64 [[D1]], [[D1]], [[D3]]
-; CHECK: vcnt.8 [[D1]], [[D1]]
-; CHECK: vpaddl.u8 [[D1]], [[D1]]
-; CHECK: vpaddl.u16 [[D1]], [[D1]]
-; CHECK: vpaddl.u32 [[D1]], [[D1]]
-; CHECK: vstr [[D1]], [r0]
+; CHECK: vand [[D2]], [[D1]], [[D2]]
+; CHECK: vadd.i64 [[D2]], [[D2]], [[D3]]
+; CHECK: vcnt.8 [[D2]], [[D2]]
+; CHECK: vpaddl.u8 [[D2]], [[D2]]
+; CHECK: vpaddl.u16 [[D2]], [[D2]]
+; CHECK: vpaddl.u32 [[D2]], [[D2]]
+; CHECK: vstr [[D2]], [r0]
%a = load <1 x i64>, <1 x i64>* %p
%tmp = call <1 x i64> @llvm.cttz.v1i64(<1 x i64> %a, i1 true)
store <1 x i64> %tmp, <1 x i64>* %p
@@ -365,17 +365,17 @@ define void @test_v1i64_zero_undef(<1 x i64>* %p) {
define void @test_v2i64_zero_undef(<2 x i64>* %p) {
; CHECK-LABEL: test_v2i64_zero_undef:
-; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
; CHECK: vmov.i32 [[Q2:q[0-9]+]], #0x0
+; CHECK: vld1.64 {[[D1:d[0-9]+]], [[D2:d[0-9]+]]}, [r0]
; CHECK: vmov.i64 [[Q3:q[0-9]+]], #0xffffffffffffffff
; CHECK: vsub.i64 [[Q2]], [[Q2]], [[Q1:q[0-9]+]]
-; CHECK: vand [[Q1]], [[Q1]], [[Q2]]
-; CHECK: vadd.i64 [[Q1]], [[Q1]], [[Q3]]
-; CHECK: vcnt.8 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u8 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u16 [[Q1]], [[Q1]]
-; CHECK: vpaddl.u32 [[Q1]], [[Q1]]
-; CHECK: vst1.64 {[[D1]], [[D2]]}, [r0]
+; CHECK: vand [[Q2]], [[Q1]], [[Q2]]
+; CHECK: vadd.i64 [[Q2]], [[Q2]], [[Q3]]
+; CHECK: vcnt.8 [[Q2]], [[Q2]]
+; CHECK: vpaddl.u8 [[Q2]], [[Q2]]
+; CHECK: vpaddl.u16 [[Q2]], [[Q2]]
+; CHECK: vpaddl.u32 [[Q2]], [[Q2]]
+; CHECK: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
%a = load <2 x i64>, <2 x i64>* %p
%tmp = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a, i1 true)
store <2 x i64> %tmp, <2 x i64>* %p
diff --git a/test/CodeGen/ARM/cxx-tlscc.ll b/test/CodeGen/ARM/cxx-tlscc.ll
index 6a5aa12ac5a6..6a66c5f197ef 100644
--- a/test/CodeGen/ARM/cxx-tlscc.ll
+++ b/test/CodeGen/ARM/cxx-tlscc.ll
@@ -26,7 +26,7 @@ declare i32 @_tlv_atexit(void (i8*)*, i8*, i8*)
; THUMB-LABEL: _ZTW2sg
; THUMB: push {{.*}}lr
; THUMB: blx
-; THUMB: bne [[TH_end:.?LBB0_[0-9]+]]
+; THUMB: bne{{(.w)?}} [[TH_end:.?LBB0_[0-9]+]]
; THUMB: blx
; THUMB: tlv_atexit
; THUMB: [[TH_end]]:
diff --git a/test/CodeGen/ARM/execute-only-big-stack-frame.ll b/test/CodeGen/ARM/execute-only-big-stack-frame.ll
index 0fe67f9863a5..24c6a06d6af1 100644
--- a/test/CodeGen/ARM/execute-only-big-stack-frame.ll
+++ b/test/CodeGen/ARM/execute-only-big-stack-frame.ll
@@ -1,8 +1,8 @@
-; RUN: llc < %s -mtriple=thumbv7m -arm-execute-only -O0 %s -o - \
+; RUN: llc < %s -mtriple=thumbv7m -mattr=+execute-only -O0 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-SUBW-ADDW %s
-; RUN: llc < %s -mtriple=thumbv8m.base -arm-execute-only -O0 %s -o - \
+; RUN: llc < %s -mtriple=thumbv8m.base -mattr=+execute-only -O0 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-MOVW-MOVT-ADD %s
-; RUN: llc < %s -mtriple=thumbv8m.main -arm-execute-only -O0 %s -o - \
+; RUN: llc < %s -mtriple=thumbv8m.main -mattr=+execute-only -O0 %s -o - \
; RUN: | FileCheck --check-prefix=CHECK-SUBW-ADDW %s
define i8 @test_big_stack_frame() {
diff --git a/test/CodeGen/ARM/execute-only-section.ll b/test/CodeGen/ARM/execute-only-section.ll
index 6e1973cd0f14..a3313d8c2f73 100644
--- a/test/CodeGen/ARM/execute-only-section.ll
+++ b/test/CodeGen/ARM/execute-only-section.ll
@@ -1,6 +1,6 @@
-; RUN: llc < %s -mtriple=thumbv7m -arm-execute-only %s -o - | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8m.base -arm-execute-only %s -o - | FileCheck %s
-; RUN: llc < %s -mtriple=thumbv8m.main -arm-execute-only %s -o - | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv7m -mattr=+execute-only %s -o - | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8m.base -mattr=+execute-only %s -o - | FileCheck %s
+; RUN: llc < %s -mtriple=thumbv8m.main -mattr=+execute-only %s -o - | FileCheck %s
; CHECK: .section .text,"axy",%progbits,unique,0
; CHECK-NOT: .section
diff --git a/test/CodeGen/ARM/execute-only.ll b/test/CodeGen/ARM/execute-only.ll
index 1f9e8bf2813c..f8c3d279573b 100644
--- a/test/CodeGen/ARM/execute-only.ll
+++ b/test/CodeGen/ARM/execute-only.ll
@@ -1,6 +1,6 @@
-; RUN: llc -mtriple=thumbv8m.base-eabi -arm-execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2BASE %s
-; RUN: llc -mtriple=thumbv7m-eabi -arm-execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2 %s
-; RUN: llc -mtriple=thumbv8m.main-eabi -arm-execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2 %s
+; RUN: llc -mtriple=thumbv8m.base-eabi -mattr=+execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2BASE %s
+; RUN: llc -mtriple=thumbv7m-eabi -mattr=+execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2 %s
+; RUN: llc -mtriple=thumbv8m.main-eabi -mattr=+execute-only %s -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-T2 %s
@var = global i32 0
diff --git a/test/CodeGen/ARM/fp16-promote.ll b/test/CodeGen/ARM/fp16-promote.ll
index 9148ac109ae3..257d99d11928 100644
--- a/test/CodeGen/ARM/fp16-promote.ll
+++ b/test/CodeGen/ARM/fp16-promote.ll
@@ -687,8 +687,8 @@ define void @test_maxnan(half* %p) #0 {
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-LIBCALL: bl __aeabi_h2f
; CHECK-VFP-LIBCALL: vbsl
-; CHECK-NOVFP: bic
; CHECK-NOVFP: and
+; CHECK-NOVFP: bic
; CHECK-NOVFP: orr
; CHECK-LIBCALL: bl __aeabi_f2h
define void @test_copysign(half* %p, half* %q) #0 {
@@ -818,25 +818,24 @@ define void @test_fmuladd(half* %p, half* %q, half* %r) #0 {
; CHECK-ALL-LABEL: test_insertelement:
; CHECK-ALL: sub sp, sp, #8
; CHECK-ALL: ldrh
-; CHECK-ALL: strh
; CHECK-ALL: ldrh
-; CHECK-ALL: strh
; CHECK-ALL: ldrh
-; CHECK-ALL: strh
; CHECK-ALL: ldrh
-; CHECK-ALL: strh
-; CHECK-ALL: mov
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: mov
; CHECK-ALL-DAG: ldrh
; CHECK-ALL-DAG: orr
-; CHECK-ALL: strh
-; CHECK-ALL: ldrh
-; CHECK-ALL: strh
-; CHECK-ALL: ldrh
-; CHECK-ALL: strh
-; CHECK-ALL: ldrh
-; CHECK-ALL: strh
-; CHECK-ALL: ldrh
-; CHECK-ALL: strh
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: ldrh
+; CHECK-ALL-DAG: ldrh
+; CHECK-ALL-DAG: ldrh
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: strh
+; CHECK-ALL-DAG: strh
; CHECK-ALL: add sp, sp, #8
define void @test_insertelement(half* %p, <4 x half>* %q, i32 %i) #0 {
%a = load half, half* %p, align 2
diff --git a/test/CodeGen/ARM/fp16-v3.ll b/test/CodeGen/ARM/fp16-v3.ll
index a37f71d9ba88..e84fee2c2e1b 100644
--- a/test/CodeGen/ARM/fp16-v3.ll
+++ b/test/CodeGen/ARM/fp16-v3.ll
@@ -11,8 +11,8 @@ target triple = "armv7a--none-eabi"
; CHECK: vadd.f32 [[SREG5:s[0-9]+]], [[SREG4]], [[SREG1]]
; CHECK-NEXT: vcvtb.f16.f32 [[SREG6:s[0-9]+]], [[SREG5]]
; CHECK-NEXT: vmov [[RREG1:r[0-9]+]], [[SREG6]]
-; CHECK-NEXT: uxth [[RREG2:r[0-9]+]], [[RREG1]]
-; CHECK-NEXT: pkhbt [[RREG3:r[0-9]+]], [[RREG1]], [[RREG1]], lsl #16
+; CHECK-DAG: uxth [[RREG2:r[0-9]+]], [[RREG1]]
+; CHECK-DAG: pkhbt [[RREG3:r[0-9]+]], [[RREG1]], [[RREG1]], lsl #16
; CHECK-DAG: strh [[RREG1]], [r0, #4]
; CHECK-DAG: vmov [[DREG:d[0-9]+]], [[RREG3]], [[RREG2]]
; CHECK-DAG: vst1.32 {[[DREG]][0]}, [r0:32]
diff --git a/test/CodeGen/ARM/ifcvt7.ll b/test/CodeGen/ARM/ifcvt7.ll
index e0d2b7cffb44..ed443a1814e6 100644
--- a/test/CodeGen/ARM/ifcvt7.ll
+++ b/test/CodeGen/ARM/ifcvt7.ll
@@ -5,8 +5,6 @@
define fastcc i32 @CountTree(%struct.quad_struct* %tree) {
; CHECK: cmpeq
-; CHECK: moveq
-; CHECK: popeq
entry:
br label %tailrecurse
diff --git a/test/CodeGen/ARM/illegal-bitfield-loadstore.ll b/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
index 74117d3896bd..a633c0291c60 100644
--- a/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
+++ b/test/CodeGen/ARM/illegal-bitfield-loadstore.ll
@@ -55,8 +55,8 @@ define void @i24_and_or(i24* %a) {
define void @i24_insert_bit(i24* %a, i1 zeroext %bit) {
; LE-LABEL: i24_insert_bit:
; LE: @ BB#0:
-; LE-NEXT: ldrh r2, [r0]
; LE-NEXT: mov r3, #255
+; LE-NEXT: ldrh r2, [r0]
; LE-NEXT: orr r3, r3, #57088
; LE-NEXT: and r2, r2, r3
; LE-NEXT: orr r1, r2, r1, lsl #13
@@ -99,8 +99,8 @@ define void @i56_or(i56* %a) {
; BE-NEXT: orr r2, r3, r2, lsl #8
; BE-NEXT: orr r2, r2, r12, lsl #24
; BE-NEXT: orr r2, r2, #384
-; BE-NEXT: lsr r3, r2, #8
; BE-NEXT: strb r2, [r1, #2]
+; BE-NEXT: lsr r3, r2, #8
; BE-NEXT: strh r3, [r1]
; BE-NEXT: bic r1, r12, #255
; BE-NEXT: orr r1, r1, r2, lsr #24
@@ -127,8 +127,8 @@ define void @i56_and_or(i56* %a) {
; BE-NEXT: mov r3, #128
; BE-NEXT: ldrh r2, [r1, #4]!
; BE-NEXT: strb r3, [r1, #2]
-; BE-NEXT: lsl r2, r2, #8
; BE-NEXT: ldr r12, [r0]
+; BE-NEXT: lsl r2, r2, #8
; BE-NEXT: orr r2, r2, r12, lsl #24
; BE-NEXT: orr r2, r2, #384
; BE-NEXT: lsr r3, r2, #8
diff --git a/test/CodeGen/ARM/indirectbr.ll b/test/CodeGen/ARM/indirectbr.ll
index 90defad43a7d..a3ec2a7f3e77 100644
--- a/test/CodeGen/ARM/indirectbr.ll
+++ b/test/CodeGen/ARM/indirectbr.ll
@@ -56,9 +56,11 @@ L2: ; preds = %L3, %bb2
L1: ; preds = %L2, %bb2
%res.3 = phi i32 [ %phitmp, %L2 ], [ 2, %bb2 ] ; <i32> [#uses=1]
; ARM-LABEL: %L1
+; ARM: ldr [[R_NEXTADDR:r[0-9]+]], LCPI
; ARM: ldr [[R1:r[0-9]+]], LCPI
+; ARM: add [[R_NEXTADDR_b:r[0-9]+]], pc, [[R_NEXTADDR]]
; ARM: add [[R1b:r[0-9]+]], pc, [[R1]]
-; ARM: str [[R1b]]
+; ARM: str [[R1b]], {{\[}}[[R_NEXTADDR_b]]]
; THUMB-LABEL: %L1
; THUMB: ldr [[R2:r[0-9]+]], LCPI
diff --git a/test/CodeGen/ARM/jump-table-islands.ll b/test/CodeGen/ARM/jump-table-islands.ll
index 6b4f174c0928..755ca30199ad 100644
--- a/test/CodeGen/ARM/jump-table-islands.ll
+++ b/test/CodeGen/ARM/jump-table-islands.ll
@@ -13,7 +13,7 @@ define %BigInt @test_moved_jumptable(i1 %tst, i32 %sw, %BigInt %l) {
; CHECK: .long LBB{{[0-9]+_[0-9]+}}-[[JUMP_TABLE]]
; CHECK: [[SKIP_TABLE]]:
-; CHECK: add pc, {{r[0-9]+}}, {{r[0-9]+}}
+; CHECK: add pc, {{r[0-9]+|lr}}, {{r[0-9]+|lr}}
br i1 %tst, label %simple, label %complex
simple:
diff --git a/test/CodeGen/ARM/jump-table-tbh.ll b/test/CodeGen/ARM/jump-table-tbh.ll
index 2da8a5fafc40..b3ee68ea0758 100644
--- a/test/CodeGen/ARM/jump-table-tbh.ll
+++ b/test/CodeGen/ARM/jump-table-tbh.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=thumbv7m-linux-gnu -o - %s | FileCheck %s --check-prefix=T2
-; RUN: llc -mtriple=thumbv6m-linux-gnu -o - %s | FileCheck %s --check-prefix=T1
+; RUN: llc -mtriple=thumbv7m-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s --check-prefix=T2
+; RUN: llc -mtriple=thumbv6m-linux-gnu -verify-machineinstrs -o - %s | FileCheck %s --check-prefix=T1
declare void @foo(double)
declare i32 @llvm.arm.space(i32, i32)
@@ -10,7 +10,7 @@ define i32 @test_tbh(i1 %tst, i32 %sw, i32 %l) {
; T2-LABEL: test_tbh:
; T2: [[ANCHOR:.LCPI[0-9_]+]]:
; T2: tbh [pc, r{{[0-9]+}}, lsl #1]
-; T2-NEXT: @ BB#1
+; T2-NEXT: @ BB#{{[0-9]+}}
; T2-NEXT: LJTI
; T2-NEXT: .short (.LBB0_[[x:[0-9]+]]-([[ANCHOR]]+4))/2
; T2-NEXT: .short (.LBB0_{{[0-9]+}}-([[ANCHOR]]+4))/2
diff --git a/test/CodeGen/ARM/ldm-stm-i256.ll b/test/CodeGen/ARM/ldm-stm-i256.ll
index 7b4151dabf6d..151c42e0e158 100644
--- a/test/CodeGen/ARM/ldm-stm-i256.ll
+++ b/test/CodeGen/ARM/ldm-stm-i256.ll
@@ -17,22 +17,24 @@ entry:
%add6 = add nsw i256 %or, %d
store i256 %add6, i256* %b, align 8
ret void
- ; CHECK-DAG: ldm r3
; CHECK-DAG: ldm r2
- ; CHECK-DAG: ldr {{.*}}, [r3, #20]
+ ; CHECK-DAG: ldr {{.*}}, [r3]
+ ; CHECK-DAG: ldr {{.*}}, [r3, #4]
+ ; CHECK-DAG: ldr {{.*}}, [r3, #8]
+ ; CHECK-DAG: ldr {{.*}}, [r3, #12]
; CHECK-DAG: ldr {{.*}}, [r3, #16]
- ; CHECK-DAG: ldr {{.*}}, [r3, #28]
+ ; CHECK-DAG: ldr {{.*}}, [r3, #20]
; CHECK-DAG: ldr {{.*}}, [r3, #24]
+ ; CHECK-DAG: ldr {{.*}}, [r3, #28]
; CHECK-DAG: ldr {{.*}}, [r2, #20]
- ; CHECK-DAG: ldr {{.*}}, [r2, #16]
- ; CHECK-DAG: ldr {{.*}}, [r2, #28]
; CHECK-DAG: ldr {{.*}}, [r2, #24]
- ; CHECK-DAG: stmib r0
- ; CHECK-DAG: str {{.*}}, [r0]
+ ; CHECK-DAG: ldr {{.*}}, [r2, #28]
+ ; CHECK-DAG: stm r0
+ ; CHECK-DAG: str {{.*}}, [r0, #20]
; CHECK-DAG: str {{.*}}, [r0, #24]
; CHECK-DAG: str {{.*}}, [r0, #28]
- ; CHECK-DAG: str {{.*}}, [r1]
- ; CHECK-DAG: stmib r1
+ ; CHECK-DAG: stm r1
+ ; CHECK-DAG: str {{.*}}, [r1, #20]
; CHECK-DAG: str {{.*}}, [r1, #24]
; CHECK-DAG: str {{.*}}, [r1, #28]
}
diff --git a/test/CodeGen/ARM/legalize-unaligned-load.ll b/test/CodeGen/ARM/legalize-unaligned-load.ll
index eb4e942f0742..ccf93c3ef55e 100644
--- a/test/CodeGen/ARM/legalize-unaligned-load.ll
+++ b/test/CodeGen/ARM/legalize-unaligned-load.ll
@@ -10,7 +10,7 @@
; CHECK-NOT: str
; CHECK: ldr
; CHECK: str
-; CHECK: bx
+; CHECK: {{bx|pop.*pc}}
define i32 @get_set_complex({ float, float }* noalias nocapture %retptr,
{ i8*, i32 }** noalias nocapture readnone %excinfo,
i8* noalias nocapture readnone %env,
diff --git a/test/CodeGen/ARM/long-setcc.ll b/test/CodeGen/ARM/long-setcc.ll
index f09167ed9e78..1fbc3f2c0838 100644
--- a/test/CodeGen/ARM/long-setcc.ll
+++ b/test/CodeGen/ARM/long-setcc.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi < %s | FileCheck %s
define i1 @t1(i64 %x) {
%B = icmp slt i64 %x, 0
diff --git a/test/CodeGen/ARM/long_shift.ll b/test/CodeGen/ARM/long_shift.ll
index 3ec5fa41aa6f..cf8396db9db5 100644
--- a/test/CodeGen/ARM/long_shift.ll
+++ b/test/CodeGen/ARM/long_shift.ll
@@ -28,15 +28,15 @@ define i32 @f1(i64 %x, i64 %y) {
define i32 @f2(i64 %x, i64 %y) {
; CHECK-LABEL: f2:
-; CHECK-LE: lsr{{.*}}r2
-; CHECK-LE-NEXT: rsb r3, r2, #32
+; CHECK-LE: rsb r3, r2, #32
+; CHECK-LE-NEXT: lsr{{.*}}r2
; CHECK-LE-NEXT: sub r2, r2, #32
; CHECK-LE-NEXT: orr r0, r0, r1, lsl r3
; CHECK-LE-NEXT: cmp r2, #0
; CHECK-LE-NEXT: asrge r0, r1, r2
-; CHECK-BE: lsr{{.*}}r3
-; CHECK-BE-NEXT: rsb r2, r3, #32
+; CHECK-BE: rsb r2, r3, #32
+; CHECK-BE-NEXT: lsr{{.*}}r3
; CHECK-BE-NEXT: orr r1, r1, r0, lsl r2
; CHECK-BE-NEXT: sub r2, r3, #32
; CHECK-BE-NEXT: cmp r2, #0
@@ -49,15 +49,15 @@ define i32 @f2(i64 %x, i64 %y) {
define i32 @f3(i64 %x, i64 %y) {
; CHECK-LABEL: f3:
-; CHECK-LE: lsr{{.*}}r2
-; CHECK-LE-NEXT: rsb r3, r2, #32
+; CHECK-LE: rsb r3, r2, #32
+; CHECK-LE-NEXT: lsr{{.*}}r2
; CHECK-LE-NEXT: sub r2, r2, #32
; CHECK-LE-NEXT: orr r0, r0, r1, lsl r3
; CHECK-LE-NEXT: cmp r2, #0
; CHECK-LE-NEXT: lsrge r0, r1, r2
-; CHECK-BE: lsr{{.*}}r3
-; CHECK-BE-NEXT: rsb r2, r3, #32
+; CHECK-BE: rsb r2, r3, #32
+; CHECK-BE-NEXT: lsr{{.*}}r3
; CHECK-BE-NEXT: orr r1, r1, r0, lsl r2
; CHECK-BE-NEXT: sub r2, r3, #32
; CHECK-BE-NEXT: cmp r2, #0
diff --git a/test/CodeGen/ARM/misched-fusion-aes.ll b/test/CodeGen/ARM/misched-fusion-aes.ll
index d3558ab4abb0..483f26cc8e00 100644
--- a/test/CodeGen/ARM/misched-fusion-aes.ll
+++ b/test/CodeGen/ARM/misched-fusion-aes.ll
@@ -74,15 +74,16 @@ define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QA]]
; CHECK: aese.8 [[QB:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QB]]
+; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aese.8 [[QC:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QC]]
; CHECK: aese.8 [[QD:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QD]]
; CHECK: aese.8 [[QE:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QE]]
+; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aese.8 [[QF:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QF]]
-; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aese.8 [[QG:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesmc.8 {{q[0-9][0-9]?}}, [[QG]]
; CHECK: aese.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
@@ -159,15 +160,16 @@ define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QA]]
; CHECK: aesd.8 [[QB:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QB]]
+; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aesd.8 [[QC:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QC]]
; CHECK: aesd.8 [[QD:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QD]]
; CHECK: aesd.8 [[QE:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QE]]
+; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aesd.8 [[QF:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QF]]
-; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
; CHECK: aesd.8 [[QG:q[0-9][0-9]?]], {{q[0-9][0-9]?}}
; CHECK-NEXT: aesimc.8 {{q[0-9][0-9]?}}, [[QG]]
; CHECK: aesd.8 {{q[0-9][0-9]?}}, {{q[0-9][0-9]?}}
diff --git a/test/CodeGen/ARM/select_const.ll b/test/CodeGen/ARM/select_const.ll
index 48fe572bf8a7..23de9c35a5b8 100644
--- a/test/CodeGen/ARM/select_const.ll
+++ b/test/CodeGen/ARM/select_const.ll
@@ -281,16 +281,16 @@ define i64 @opaque_constant1(i1 %cond, i64 %x) {
; CHECK: @ BB#0:
; CHECK-NEXT: .save {r4, lr}
; CHECK-NEXT: push {r4, lr}
-; CHECK-NEXT: ands r12, r0, #1
; CHECK-NEXT: mov lr, #1
+; CHECK-NEXT: ands r12, r0, #1
; CHECK-NEXT: mov r0, #23
-; CHECK-NEXT: eor r3, r3, #1
; CHECK-NEXT: orr lr, lr, #65536
; CHECK-NEXT: mvnne r0, #3
-; CHECK-NEXT: movne r12, #1
; CHECK-NEXT: and r4, r0, lr
-; CHECK-NEXT: eor r2, r2, lr
+; CHECK-NEXT: movne r12, #1
; CHECK-NEXT: subs r0, r4, #1
+; CHECK-NEXT: eor r2, r2, lr
+; CHECK-NEXT: eor r3, r3, #1
; CHECK-NEXT: sbc r1, r12, #0
; CHECK-NEXT: orrs r2, r2, r3
; CHECK-NEXT: movne r0, r4
diff --git a/test/CodeGen/ARM/shift-i64.ll b/test/CodeGen/ARM/shift-i64.ll
index 12cc5fbe03e4..3644afa17ca4 100644
--- a/test/CodeGen/ARM/shift-i64.ll
+++ b/test/CodeGen/ARM/shift-i64.ll
@@ -29,8 +29,8 @@ define i64 @test_shl(i64 %val, i64 %amt) {
; Explanation for lshr is pretty much the reverse of shl.
define i64 @test_lshr(i64 %val, i64 %amt) {
; CHECK-LABEL: test_lshr:
-; CHECK: lsr r0, r0, r2
; CHECK: rsb [[REVERSE_SHIFT:.*]], r2, #32
+; CHECK: lsr r0, r0, r2
; CHECK: orr r0, r0, r1, lsl [[REVERSE_SHIFT]]
; CHECK: sub [[EXTRA_SHIFT:.*]], r2, #32
; CHECK: cmp [[EXTRA_SHIFT]], #0
diff --git a/test/CodeGen/ARM/ssp-data-layout.ll b/test/CodeGen/ARM/ssp-data-layout.ll
index 92fa0809ed2d..39c279eb90d4 100644
--- a/test/CodeGen/ARM/ssp-data-layout.ll
+++ b/test/CodeGen/ARM/ssp-data-layout.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -disable-fp-elim -march=arm -mcpu=cortex-a8 -mtriple arm-linux-gnu -o - | FileCheck %s
+; RUN: llc < %s -disable-fp-elim -march=arm -mcpu=cortex-a8 -mtriple arm-linux-gnu -target-abi=apcs -o - | FileCheck %s
; This test is fairly fragile. The goal is to ensure that "large" stack
; objects are allocated closest to the stack protector (i.e., farthest away
; from the Stack Pointer.) In standard SSP mode this means that large (>=
diff --git a/test/CodeGen/ARM/str_pre-2.ll b/test/CodeGen/ARM/str_pre-2.ll
index 4b8b4c6bca72..1c6c05de2579 100644
--- a/test/CodeGen/ARM/str_pre-2.ll
+++ b/test/CodeGen/ARM/str_pre-2.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -mtriple=armv6-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=armv6-linux-gnu -target-abi=apcs | FileCheck %s
@b = external global i64*
diff --git a/test/CodeGen/ARM/swifterror.ll b/test/CodeGen/ARM/swifterror.ll
index 3fd57c592bfb..b02adf7912b5 100644
--- a/test/CodeGen/ARM/swifterror.ll
+++ b/test/CodeGen/ARM/swifterror.ll
@@ -420,10 +420,10 @@ define swiftcc void @swifterror_reg_clobber(%swift_error** nocapture %err) {
; CHECK-ARMV7-DAG: str r8, [s[[STK1:.*]]]
; CHECK-ARMV7-DAG: str r10, [s[[STK2:.*]]]
; Store arguments.
-; CHECK-ARMV7: mov r6, r3
-; CHECK-ARMV7: mov r4, r2
-; CHECK-ARMV7: mov r11, r1
-; CHECK-ARMV7: mov r5, r0
+; CHECK-ARMV7-DAG: mov r6, r3
+; CHECK-ARMV7-DAG: mov r4, r2
+; CHECK-ARMV7-DAG: mov r11, r1
+; CHECK-ARMV7-DAG: mov r5, r0
; Setup call.
; CHECK-ARMV7: mov r0, #1
; CHECK-ARMV7: mov r1, #2
@@ -435,10 +435,10 @@ define swiftcc void @swifterror_reg_clobber(%swift_error** nocapture %err) {
; Restore original arguments.
; CHECK-ARMV7-DAG: ldr r10, [s[[STK2]]]
; CHECK-ARMV7-DAG: ldr r8, [s[[STK1]]]
-; CHECK-ARMV7: mov r0, r5
-; CHECK-ARMV7: mov r1, r11
-; CHECK-ARMV7: mov r2, r4
-; CHECK-ARMV7: mov r3, r6
+; CHECK-ARMV7-DAG: mov r0, r5
+; CHECK-ARMV7-DAG: mov r1, r11
+; CHECK-ARMV7-DAG: mov r2, r4
+; CHECK-ARMV7-DAG: mov r3, r6
; CHECK-ARMV7: bl _params_in_reg2
; CHECK-ARMV7: pop {r4, r5, r6, r7, r10, r11, pc}
define swiftcc void @params_in_reg(i32, i32, i32, i32, i8* swiftself, %swift_error** nocapture swifterror %err) {
@@ -469,25 +469,25 @@ declare swiftcc void @params_in_reg2(i32, i32, i32, i32, i8* swiftself, %swift_e
; CHECK-ARMV7: mov r8, #0
; CHECK-ARMV7: bl _params_in_reg2
; Restore original arguments.
-; CHECK-ARMV7: ldr r3, [s[[STK2]]]
-; CHECK-ARMV7: ldr r10, [s[[STK1]]]
+; CHECK-ARMV7-DAG: ldr r3, [s[[STK2]]]
+; CHECK-ARMV7-DAG: ldr r10, [s[[STK1]]]
; Store %error_ptr_ref;
-; CHECK-ARMV7: str r8, [s[[STK3:.*]]]
+; CHECK-ARMV7-DAG: str r8, [s[[STK3:.*]]]
; Restore original arguments.
-; CHECK-ARMV7: mov r0, r5
-; CHECK-ARMV7: mov r1, r11
-; CHECK-ARMV7: mov r2, r4
-; CHECK-ARMV7: mov r8, r6
+; CHECK-ARMV7-DAG: mov r0, r5
+; CHECK-ARMV7-DAG: mov r1, r11
+; CHECK-ARMV7-DAG: mov r2, r4
+; CHECK-ARMV7-DAG: mov r8, r6
; CHECK-ARMV7: bl _params_and_return_in_reg2
; Store swifterror return %err;
-; CHECK-ARMV7: str r8, [s[[STK1]]]
+; CHECK-ARMV7-DAG: str r8, [s[[STK1]]]
; Load swifterror value %error_ptr_ref.
-; CHECK-ARMV7: ldr r8, [s[[STK3]]]
+; CHECK-ARMV7-DAG: ldr r8, [s[[STK3]]]
; Save return values.
-; CHECK-ARMV7: mov r4, r0
-; CHECK-ARMV7: mov r5, r1
-; CHECK-ARMV7: mov r6, r2
-; CHECK-ARMV7: mov r11, r3
+; CHECK-ARMV7-DAG: mov r4, r0
+; CHECK-ARMV7-DAG: mov r5, r1
+; CHECK-ARMV7-DAG: mov r6, r2
+; CHECK-ARMV7-DAG: mov r11, r3
; Setup call.
; CHECK-ARMV7: mov r0, #1
; CHECK-ARMV7: mov r1, #2
@@ -496,12 +496,12 @@ declare swiftcc void @params_in_reg2(i32, i32, i32, i32, i8* swiftself, %swift_e
; CHECK-ARMV7: mov r10, #0
; CHECK-ARMV7: bl _params_in_reg2
; Load swifterror %err;
-; CHECK-ARMV7: ldr r8, [s[[STK1]]]
+; CHECK-ARMV7-DAG: ldr r8, [s[[STK1]]]
; Restore return values for returning.
-; CHECK-ARMV7: mov r0, r4
-; CHECK-ARMV7: mov r1, r5
-; CHECK-ARMV7: mov r2, r6
-; CHECK-ARMV7: mov r3, r11
+; CHECK-ARMV7-DAG: mov r0, r4
+; CHECK-ARMV7-DAG: mov r1, r5
+; CHECK-ARMV7-DAG: mov r2, r6
+; CHECK-ARMV7-DAG: mov r3, r11
; CHECK-ARMV7: pop {r4, r5, r6, r7, r10, r11, pc}
define swiftcc { i32, i32, i32, i32} @params_and_return_in_reg(i32, i32, i32, i32, i8* swiftself, %swift_error** nocapture swifterror %err) {
%error_ptr_ref = alloca swifterror %swift_error*, align 8
diff --git a/test/CodeGen/ARM/thumb2-it-block.ll b/test/CodeGen/ARM/thumb2-it-block.ll
index aaefc0a14863..6d93869ec10f 100644
--- a/test/CodeGen/ARM/thumb2-it-block.ll
+++ b/test/CodeGen/ARM/thumb2-it-block.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
-; RUN: llc -mtriple=thumbv8 %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 < %s | FileCheck %s
+; RUN: llc -mtriple=thumbv8 < %s | FileCheck %s
; PR11107
define i32 @test(i32 %a, i32 %b) {
diff --git a/test/CodeGen/ARM/vcgt.ll b/test/CodeGen/ARM/vcgt.ll
index c39c939d6c95..1e68ff13699a 100644
--- a/test/CodeGen/ARM/vcgt.ll
+++ b/test/CodeGen/ARM/vcgt.ll
@@ -162,8 +162,8 @@ define <4 x i32> @vacgtQf32(<4 x float>* %A, <4 x float>* %B) nounwind {
; rdar://7923010
define <4 x i32> @vcgt_zext(<4 x float>* %A, <4 x float>* %B) nounwind {
;CHECK-LABEL: vcgt_zext:
-;CHECK: vmov.i32 [[Q0:q[0-9]+]], #0x1
-;CHECK: vcgt.f32 [[Q1:q[0-9]+]]
+;CHECK-DAG: vmov.i32 [[Q0:q[0-9]+]], #0x1
+;CHECK-DAG: vcgt.f32 [[Q1:q[0-9]+]]
;CHECK: vand [[Q2:q[0-9]+]], [[Q1]], [[Q0]]
%tmp1 = load <4 x float>, <4 x float>* %A
%tmp2 = load <4 x float>, <4 x float>* %B
diff --git a/test/CodeGen/ARM/vector-DAGCombine.ll b/test/CodeGen/ARM/vector-DAGCombine.ll
index 2ef2a0697ec9..8623d2c164ba 100644
--- a/test/CodeGen/ARM/vector-DAGCombine.ll
+++ b/test/CodeGen/ARM/vector-DAGCombine.ll
@@ -237,14 +237,14 @@ entry:
; illegal type to a legal type.
define <2 x i8> @test_truncate(<2 x i128> %in) {
; CHECK-LABEL: test_truncate:
-; CHECK: mov [[BASE:r[0-9]+]], sp
-; CHECK-NEXT: vld1.32 {[[REG1:d[0-9]+]][0]}, {{\[}}[[BASE]]:32]
-; CHECK-NEXT: add [[BASE2:r[0-9]+]], [[BASE]], #4
-; CHECK-NEXT: vld1.32 {[[REG1]][1]}, {{\[}}[[BASE2]]:32]
; REG2 Should map on the same Q register as REG1, i.e., REG2 = REG1 - 1, but we
; cannot express that.
-; CHECK-NEXT: vmov.32 [[REG2:d[0-9]+]][0], r0
+; CHECK: vmov.32 [[REG2:d[0-9]+]][0], r0
+; CHECK-NEXT: mov [[BASE:r[0-9]+]], sp
+; CHECK-NEXT: vld1.32 {[[REG1:d[0-9]+]][0]}, {{\[}}[[BASE]]:32]
+; CHECK-NEXT: add [[BASE2:r[0-9]+]], [[BASE]], #4
; CHECK-NEXT: vmov.32 [[REG2]][1], r1
+; CHECK-NEXT: vld1.32 {[[REG1]][1]}, {{\[}}[[BASE2]]:32]
; The Q register used here should match floor(REG1/2), but we cannot express that.
; CHECK-NEXT: vmovn.i64 [[RES:d[0-9]+]], q{{[0-9]+}}
; CHECK-NEXT: vmov r0, r1, [[RES]]
diff --git a/test/CodeGen/ARM/vext.ll b/test/CodeGen/ARM/vext.ll
index 5742dc314978..5b524145be76 100644
--- a/test/CodeGen/ARM/vext.ll
+++ b/test/CodeGen/ARM/vext.ll
@@ -182,9 +182,9 @@ define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: test_interleaved:
; CHECK: @ BB#0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
-; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vext.16 d16, d16, d17, #3
; CHECK-NEXT: vorr d17, d16, d16
+; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
; CHECK-NEXT: vuzp.16 d16, d17
; CHECK-NEXT: vzip.16 d16, d18
; CHECK-NEXT: vmov r0, r1, d16
@@ -217,16 +217,16 @@ define <4 x i16> @test_multisource(<32 x i16>* %B) nounwind {
; CHECK-LABEL: test_multisource:
; CHECK: @ BB#0:
; CHECK-NEXT: mov r1, r0
-; CHECK-NEXT: add r2, r0, #32
-; CHECK-NEXT: add r0, r0, #48
+; CHECK-NEXT: add r2, r0, #48
+; CHECK-NEXT: add r0, r0, #32
; CHECK-NEXT: vld1.16 {d16, d17}, [r1:128]!
-; CHECK-NEXT: vld1.64 {d20, d21}, [r2:128]
-; CHECK-NEXT: vld1.64 {d18, d19}, [r0:128]
-; CHECK-NEXT: vld1.64 {d22, d23}, [r1:128]
+; CHECK-NEXT: vld1.64 {d20, d21}, [r0:128]
; CHECK-NEXT: vorr d24, d20, d20
+; CHECK-NEXT: vld1.64 {d18, d19}, [r2:128]
+; CHECK-NEXT: vld1.64 {d22, d23}, [r1:128]
; CHECK-NEXT: vzip.16 d24, d18
-; CHECK-NEXT: vext.16 d18, d20, d24, #2
; CHECK-NEXT: vtrn.16 q8, q11
+; CHECK-NEXT: vext.16 d18, d20, d24, #2
; CHECK-NEXT: vext.16 d16, d18, d16, #2
; CHECK-NEXT: vext.16 d16, d16, d16, #2
; CHECK-NEXT: vmov r0, r1, d16
@@ -259,24 +259,24 @@ define <4 x i16> @test_largespan(<8 x i16>* %B) nounwind {
define <8 x i16> @test_illegal(<8 x i16>* %A, <8 x i16>* %B) nounwind {
; CHECK-LABEL: test_illegal:
; CHECK: @ BB#0:
-; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
-; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
-; CHECK-NEXT: vmov.u16 r1, d16[0]
-; CHECK-NEXT: vmov.u16 r0, d17[3]
-; CHECK-NEXT: vorr d22, d16, d16
-; CHECK-NEXT: vorr d23, d16, d16
-; CHECK-NEXT: vmov.16 d20[0], r1
-; CHECK-NEXT: vuzp.16 d22, d23
-; CHECK-NEXT: vmov.u16 r1, d17[1]
-; CHECK-NEXT: vmov.16 d20[1], r0
-; CHECK-NEXT: vuzp.16 d22, d18
-; CHECK-NEXT: vmov.16 d20[2], r1
-; CHECK-NEXT: vmov.u16 r0, d19[1]
-; CHECK-NEXT: vext.16 d21, d16, d18, #3
-; CHECK-NEXT: vmov.16 d20[3], r0
-; CHECK-NEXT: vmov r0, r1, d20
-; CHECK-NEXT: vmov r2, r3, d21
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
+; CHECK-NEXT: vorr d22, d16, d16
+; CHECK-NEXT: vmov.u16 r0, d16[0]
+; CHECK-NEXT: vorr d23, d16, d16
+; CHECK-NEXT: vmov.u16 r2, d17[3]
+; CHECK-NEXT: vmov.u16 r3, d17[1]
+; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
+; CHECK-NEXT: vmov.u16 r1, d19[1]
+; CHECK-NEXT: vuzp.16 d22, d23
+; CHECK-NEXT: vuzp.16 d22, d18
+; CHECK-NEXT: vmov.16 d20[0], r0
+; CHECK-NEXT: vmov.16 d20[1], r2
+; CHECK-NEXT: vmov.16 d20[2], r3
+; CHECK-NEXT: vmov.16 d20[3], r1
+; CHECK-NEXT: vext.16 d21, d16, d18, #3
+; CHECK-NEXT: vmov r0, r1, d20
+; CHECK-NEXT: vmov r2, r3, d21
+; CHECK-NEXT: mov pc, lr
%tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 7, i32 5, i32 13, i32 3, i32 2, i32 2, i32 9>
@@ -289,10 +289,10 @@ define arm_aapcscc void @test_elem_mismatch(<2 x i64>* nocapture %src, <4 x i16>
; CHECK-LABEL: test_elem_mismatch:
; CHECK: @ BB#0:
; CHECK-NEXT: vld1.64 {d16, d17}, [r0:128]
-; CHECK-NEXT: vmov.32 r2, d16[0]
-; CHECK-NEXT: vmov.32 r0, d17[0]
-; CHECK-NEXT: vmov.16 d16[0], r2
-; CHECK-NEXT: vmov.16 d16[1], r0
+; CHECK-NEXT: vmov.32 r0, d16[0]
+; CHECK-NEXT: vmov.32 r2, d17[0]
+; CHECK-NEXT: vmov.16 d16[0], r0
+; CHECK-NEXT: vmov.16 d16[1], r2
; CHECK-NEXT: vstr d16, [r1]
; CHECK-NEXT: mov pc, lr
%tmp0 = load <2 x i64>, <2 x i64>* %src, align 16
diff --git a/test/CodeGen/ARM/vfp.ll b/test/CodeGen/ARM/vfp.ll
index 03c0354aa1df..8fa5113d8a31 100644
--- a/test/CodeGen/ARM/vfp.ll
+++ b/test/CodeGen/ARM/vfp.ll
@@ -40,8 +40,8 @@ define void @test_add(float* %P, double* %D) {
define void @test_ext_round(float* %P, double* %D) {
;CHECK-LABEL: test_ext_round:
%a = load float, float* %P ; <float> [#uses=1]
-;CHECK: vcvt.f64.f32
-;CHECK: vcvt.f32.f64
+;CHECK-DAG: vcvt.f64.f32
+;CHECK-DAG: vcvt.f32.f64
%b = fpext float %a to double ; <double> [#uses=1]
%A = load double, double* %D ; <double> [#uses=1]
%B = fptrunc double %A to float ; <float> [#uses=1]
diff --git a/test/CodeGen/ARM/vld1.ll b/test/CodeGen/ARM/vld1.ll
index bdb384769741..c50e0beea4d1 100644
--- a/test/CodeGen/ARM/vld1.ll
+++ b/test/CodeGen/ARM/vld1.ll
@@ -78,7 +78,7 @@ define <16 x i8> @vld1Qi8(i8* %A) nounwind {
;Check for a post-increment updating load.
define <16 x i8> @vld1Qi8_update(i8** %ptr) nounwind {
;CHECK-LABEL: vld1Qi8_update:
-;CHECK: vld1.8 {d16, d17}, [{{r[0-9]+}}:64]!
+;CHECK: vld1.8 {d16, d17}, [{{r[0-9]+|lr}}:64]!
%A = load i8*, i8** %ptr
%tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8.p0i8(i8* %A, i32 8)
%tmp2 = getelementptr i8, i8* %A, i32 16
diff --git a/test/CodeGen/ARM/vld2.ll b/test/CodeGen/ARM/vld2.ll
index 1ca16587bd91..6ef37c1b6678 100644
--- a/test/CodeGen/ARM/vld2.ll
+++ b/test/CodeGen/ARM/vld2.ll
@@ -14,7 +14,7 @@
define <8 x i8> @vld2i8(i8* %A) nounwind {
;CHECK-LABEL: vld2i8:
;Check the alignment value. Max for this instruction is 128 bits:
-;CHECK: vld2.8 {d16, d17}, [r0:64]
+;CHECK: vld2.8 {d16, d17}, [{{r[0-9]+|lr}}:64]
%tmp1 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2.v8i8.p0i8(i8* %A, i32 8)
%tmp2 = extractvalue %struct.__neon_int8x8x2_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp1, 1
@@ -25,7 +25,7 @@ define <8 x i8> @vld2i8(i8* %A) nounwind {
define <4 x i16> @vld2i16(i16* %A) nounwind {
;CHECK-LABEL: vld2i16:
;Check the alignment value. Max for this instruction is 128 bits:
-;CHECK: vld2.16 {d16, d17}, [r0:128]
+;CHECK: vld2.16 {d16, d17}, [{{r[0-9]+|lr}}:128]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2.v4i16.p0i8(i8* %tmp0, i32 32)
%tmp2 = extractvalue %struct.__neon_int16x4x2_t %tmp1, 0
@@ -59,7 +59,7 @@ define <2 x float> @vld2f(float* %A) nounwind {
;Check for a post-increment updating load.
define <2 x float> @vld2f_update(float** %ptr) nounwind {
;CHECK-LABEL: vld2f_update:
-;CHECK: vld2.32 {d16, d17}, [r1]!
+;CHECK: vld2.32 {d16, d17}, [{{r[0-9]+|lr}}]!
%A = load float*, float** %ptr
%tmp0 = bitcast float* %A to i8*
%tmp1 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32.p0i8(i8* %tmp0, i32 1)
@@ -74,7 +74,7 @@ define <2 x float> @vld2f_update(float** %ptr) nounwind {
define <1 x i64> @vld2i64(i64* %A) nounwind {
;CHECK-LABEL: vld2i64:
;Check the alignment value. Max for this instruction is 128 bits:
-;CHECK: vld1.64 {d16, d17}, [r0:128]
+;CHECK: vld1.64 {d16, d17}, [{{r[0-9]+|lr}}:128]
%tmp0 = bitcast i64* %A to i8*
%tmp1 = call %struct.__neon_int64x1x2_t @llvm.arm.neon.vld2.v1i64.p0i8(i8* %tmp0, i32 32)
%tmp2 = extractvalue %struct.__neon_int64x1x2_t %tmp1, 0
@@ -86,7 +86,7 @@ define <1 x i64> @vld2i64(i64* %A) nounwind {
define <16 x i8> @vld2Qi8(i8* %A) nounwind {
;CHECK-LABEL: vld2Qi8:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld2.8 {d16, d17, d18, d19}, [r0:64]
+;CHECK: vld2.8 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:64]
%tmp1 = call %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8.p0i8(i8* %A, i32 8)
%tmp2 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 1
@@ -97,7 +97,7 @@ define <16 x i8> @vld2Qi8(i8* %A) nounwind {
;Check for a post-increment updating load with register increment.
define <16 x i8> @vld2Qi8_update(i8** %ptr, i32 %inc) nounwind {
;CHECK-LABEL: vld2Qi8_update:
-;CHECK: vld2.8 {d16, d17, d18, d19}, [r2:128], r1
+;CHECK: vld2.8 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:128], r1
%A = load i8*, i8** %ptr
%tmp1 = call %struct.__neon_int8x16x2_t @llvm.arm.neon.vld2.v16i8.p0i8(i8* %A, i32 16)
%tmp2 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 0
@@ -111,7 +111,7 @@ define <16 x i8> @vld2Qi8_update(i8** %ptr, i32 %inc) nounwind {
define <8 x i16> @vld2Qi16(i16* %A) nounwind {
;CHECK-LABEL: vld2Qi16:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld2.16 {d16, d17, d18, d19}, [r0:128]
+;CHECK: vld2.16 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:128]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2.v8i16.p0i8(i8* %tmp0, i32 16)
%tmp2 = extractvalue %struct.__neon_int16x8x2_t %tmp1, 0
@@ -123,7 +123,7 @@ define <8 x i16> @vld2Qi16(i16* %A) nounwind {
define <4 x i32> @vld2Qi32(i32* %A) nounwind {
;CHECK-LABEL: vld2Qi32:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld2.32 {d16, d17, d18, d19}, [r0:256]
+;CHECK: vld2.32 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:256]
%tmp0 = bitcast i32* %A to i8*
%tmp1 = call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32.p0i8(i8* %tmp0, i32 64)
%tmp2 = extractvalue %struct.__neon_int32x4x2_t %tmp1, 0
diff --git a/test/CodeGen/ARM/vld3.ll b/test/CodeGen/ARM/vld3.ll
index c3e8ee8691fd..0eaad0f90035 100644
--- a/test/CodeGen/ARM/vld3.ll
+++ b/test/CodeGen/ARM/vld3.ll
@@ -15,7 +15,7 @@
define <8 x i8> @vld3i8(i8* %A) nounwind {
;CHECK-LABEL: vld3i8:
;Check the alignment value. Max for this instruction is 64 bits:
-;CHECK: vld3.8 {d16, d17, d18}, [r0:64]
+;CHECK: vld3.8 {d16, d17, d18}, [{{r[0-9]+|lr}}:64]
%tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8.p0i8(i8* %A, i32 32)
%tmp2 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 2
@@ -37,7 +37,7 @@ define <4 x i16> @vld3i16(i16* %A) nounwind {
;Check for a post-increment updating load with register increment.
define <4 x i16> @vld3i16_update(i16** %ptr, i32 %inc) nounwind {
;CHECK-LABEL: vld3i16_update:
-;CHECK: vld3.16 {d16, d17, d18}, [{{r[0-9]+}}], {{r[0-9]+}}
+;CHECK: vld3.16 {d16, d17, d18}, [{{r[0-9]+|lr}}], {{r[0-9]+|lr}}
%A = load i16*, i16** %ptr
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3.v4i16.p0i8(i8* %tmp0, i32 1)
@@ -74,7 +74,7 @@ define <2 x float> @vld3f(float* %A) nounwind {
define <1 x i64> @vld3i64(i64* %A) nounwind {
;CHECK-LABEL: vld3i64:
;Check the alignment value. Max for this instruction is 64 bits:
-;CHECK: vld1.64 {d16, d17, d18}, [r0:64]
+;CHECK: vld1.64 {d16, d17, d18}, [{{r[0-9]+|lr}}:64]
%tmp0 = bitcast i64* %A to i8*
%tmp1 = call %struct.__neon_int64x1x3_t @llvm.arm.neon.vld3.v1i64.p0i8(i8* %tmp0, i32 16)
%tmp2 = extractvalue %struct.__neon_int64x1x3_t %tmp1, 0
@@ -85,7 +85,7 @@ define <1 x i64> @vld3i64(i64* %A) nounwind {
define <1 x i64> @vld3i64_update(i64** %ptr, i64* %A) nounwind {
;CHECK-LABEL: vld3i64_update:
-;CHECK: vld1.64 {d16, d17, d18}, [r1:64]!
+;CHECK: vld1.64 {d16, d17, d18}, [{{r[0-9]+|lr}}:64]!
%tmp0 = bitcast i64* %A to i8*
%tmp1 = call %struct.__neon_int64x1x3_t @llvm.arm.neon.vld3.v1i64.p0i8(i8* %tmp0, i32 16)
%tmp5 = getelementptr i64, i64* %A, i32 3
@@ -99,8 +99,8 @@ define <1 x i64> @vld3i64_update(i64** %ptr, i64* %A) nounwind {
define <16 x i8> @vld3Qi8(i8* %A) nounwind {
;CHECK-LABEL: vld3Qi8:
;Check the alignment value. Max for this instruction is 64 bits:
-;CHECK: vld3.8 {d16, d18, d20}, [r0:64]!
-;CHECK: vld3.8 {d17, d19, d21}, [r0:64]
+;CHECK: vld3.8 {d16, d18, d20}, [{{r[0-9]+|lr}}:64]!
+;CHECK: vld3.8 {d17, d19, d21}, [{{r[0-9]+|lr}}:64]
%tmp1 = call %struct.__neon_int8x16x3_t @llvm.arm.neon.vld3.v16i8.p0i8(i8* %A, i32 32)
%tmp2 = extractvalue %struct.__neon_int8x16x3_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x16x3_t %tmp1, 2
@@ -135,8 +135,8 @@ define <4 x i32> @vld3Qi32(i32* %A) nounwind {
;Check for a post-increment updating load.
define <4 x i32> @vld3Qi32_update(i32** %ptr) nounwind {
;CHECK-LABEL: vld3Qi32_update:
-;CHECK: vld3.32 {d16, d18, d20}, [r[[R:[0-9]+]]]!
-;CHECK: vld3.32 {d17, d19, d21}, [r[[R]]]!
+;CHECK: vld3.32 {d16, d18, d20}, {{\[}}[[R:r[0-9]+|lr]]]!
+;CHECK: vld3.32 {d17, d19, d21}, {{\[}}[[R]]]!
%A = load i32*, i32** %ptr
%tmp0 = bitcast i32* %A to i8*
%tmp1 = call %struct.__neon_int32x4x3_t @llvm.arm.neon.vld3.v4i32.p0i8(i8* %tmp0, i32 1)
diff --git a/test/CodeGen/ARM/vld4.ll b/test/CodeGen/ARM/vld4.ll
index 10570039a9d2..5663e6d41f02 100644
--- a/test/CodeGen/ARM/vld4.ll
+++ b/test/CodeGen/ARM/vld4.ll
@@ -14,7 +14,7 @@
define <8 x i8> @vld4i8(i8* %A) nounwind {
;CHECK-LABEL: vld4i8:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld4.8 {d16, d17, d18, d19}, [r0:64]
+;CHECK: vld4.8 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:64]
%tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8.p0i8(i8* %A, i32 8)
%tmp2 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 2
@@ -25,7 +25,7 @@ define <8 x i8> @vld4i8(i8* %A) nounwind {
;Check for a post-increment updating load with register increment.
define <8 x i8> @vld4i8_update(i8** %ptr, i32 %inc) nounwind {
;CHECK-LABEL: vld4i8_update:
-;CHECK: vld4.8 {d16, d17, d18, d19}, [r2:128], r1
+;CHECK: vld4.8 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:128], r1
%A = load i8*, i8** %ptr
%tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8.p0i8(i8* %A, i32 16)
%tmp2 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 0
@@ -39,7 +39,7 @@ define <8 x i8> @vld4i8_update(i8** %ptr, i32 %inc) nounwind {
define <4 x i16> @vld4i16(i16* %A) nounwind {
;CHECK-LABEL: vld4i16:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld4.16 {d16, d17, d18, d19}, [r0:128]
+;CHECK: vld4.16 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:128]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4.v4i16.p0i8(i8* %tmp0, i32 16)
%tmp2 = extractvalue %struct.__neon_int16x4x4_t %tmp1, 0
@@ -51,7 +51,7 @@ define <4 x i16> @vld4i16(i16* %A) nounwind {
define <2 x i32> @vld4i32(i32* %A) nounwind {
;CHECK-LABEL: vld4i32:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld4.32 {d16, d17, d18, d19}, [r0:256]
+;CHECK: vld4.32 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:256]
%tmp0 = bitcast i32* %A to i8*
%tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32.p0i8(i8* %tmp0, i32 32)
%tmp2 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 0
@@ -74,7 +74,7 @@ define <2 x float> @vld4f(float* %A) nounwind {
define <1 x i64> @vld4i64(i64* %A) nounwind {
;CHECK-LABEL: vld4i64:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld1.64 {d16, d17, d18, d19}, [r0:256]
+;CHECK: vld1.64 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:256]
%tmp0 = bitcast i64* %A to i8*
%tmp1 = call %struct.__neon_int64x1x4_t @llvm.arm.neon.vld4.v1i64.p0i8(i8* %tmp0, i32 64)
%tmp2 = extractvalue %struct.__neon_int64x1x4_t %tmp1, 0
@@ -85,7 +85,7 @@ define <1 x i64> @vld4i64(i64* %A) nounwind {
define <1 x i64> @vld4i64_update(i64** %ptr, i64* %A) nounwind {
;CHECK-LABEL: vld4i64_update:
-;CHECK: vld1.64 {d16, d17, d18, d19}, [r1:256]!
+;CHECK: vld1.64 {d16, d17, d18, d19}, [{{r[0-9]+|lr}}:256]!
%tmp0 = bitcast i64* %A to i8*
%tmp1 = call %struct.__neon_int64x1x4_t @llvm.arm.neon.vld4.v1i64.p0i8(i8* %tmp0, i32 64)
%tmp5 = getelementptr i64, i64* %A, i32 4
@@ -99,8 +99,8 @@ define <1 x i64> @vld4i64_update(i64** %ptr, i64* %A) nounwind {
define <16 x i8> @vld4Qi8(i8* %A) nounwind {
;CHECK-LABEL: vld4Qi8:
;Check the alignment value. Max for this instruction is 256 bits:
-;CHECK: vld4.8 {d16, d18, d20, d22}, [r0:256]!
-;CHECK: vld4.8 {d17, d19, d21, d23}, [r0:256]
+;CHECK: vld4.8 {d16, d18, d20, d22}, [{{r[0-9]+|lr}}:256]!
+;CHECK: vld4.8 {d17, d19, d21, d23}, [{{r[0-9]+|lr}}:256]
%tmp1 = call %struct.__neon_int8x16x4_t @llvm.arm.neon.vld4.v16i8.p0i8(i8* %A, i32 64)
%tmp2 = extractvalue %struct.__neon_int8x16x4_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x16x4_t %tmp1, 2
@@ -111,8 +111,8 @@ define <16 x i8> @vld4Qi8(i8* %A) nounwind {
define <8 x i16> @vld4Qi16(i16* %A) nounwind {
;CHECK-LABEL: vld4Qi16:
;Check for no alignment specifier.
-;CHECK: vld4.16 {d16, d18, d20, d22}, [r0]!
-;CHECK: vld4.16 {d17, d19, d21, d23}, [r0]
+;CHECK: vld4.16 {d16, d18, d20, d22}, [{{r[0-9]+|lr}}]!
+;CHECK: vld4.16 {d17, d19, d21, d23}, [{{r[0-9]+|lr}}]
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16.p0i8(i8* %tmp0, i32 1)
%tmp2 = extractvalue %struct.__neon_int16x8x4_t %tmp1, 0
@@ -124,8 +124,8 @@ define <8 x i16> @vld4Qi16(i16* %A) nounwind {
;Check for a post-increment updating load.
define <8 x i16> @vld4Qi16_update(i16** %ptr) nounwind {
;CHECK-LABEL: vld4Qi16_update:
-;CHECK: vld4.16 {d16, d18, d20, d22}, [r1:64]!
-;CHECK: vld4.16 {d17, d19, d21, d23}, [r1:64]!
+;CHECK: vld4.16 {d16, d18, d20, d22}, [{{r[0-9]+|lr}}:64]!
+;CHECK: vld4.16 {d17, d19, d21, d23}, [{{r[0-9]+|lr}}:64]!
%A = load i16*, i16** %ptr
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call %struct.__neon_int16x8x4_t @llvm.arm.neon.vld4.v8i16.p0i8(i8* %tmp0, i32 8)
diff --git a/test/CodeGen/ARM/vlddup.ll b/test/CodeGen/ARM/vlddup.ll
index 71ca0f791524..72f9434fd10a 100644
--- a/test/CodeGen/ARM/vlddup.ll
+++ b/test/CodeGen/ARM/vlddup.ll
@@ -3,7 +3,7 @@
define <8 x i8> @vld1dupi8(i8* %A) nounwind {
;CHECK-LABEL: vld1dupi8:
;Check the (default) alignment value.
-;CHECK: vld1.8 {d16[]}, [r0]
+;CHECK: vld1.8 {d16[]}, [{{r[0-9]+|lr}}]
%tmp1 = load i8, i8* %A, align 8
%tmp2 = insertelement <8 x i8> undef, i8 %tmp1, i32 0
%tmp3 = shufflevector <8 x i8> %tmp2, <8 x i8> undef, <8 x i32> zeroinitializer
@@ -13,7 +13,7 @@ define <8 x i8> @vld1dupi8(i8* %A) nounwind {
define <8 x i8> @vld1dupi8_preinc(i8** noalias nocapture %a, i32 %b) nounwind {
entry:
;CHECK-LABEL: vld1dupi8_preinc:
-;CHECK: vld1.8 {d16[]}, [r1]
+;CHECK: vld1.8 {d16[]}, [{{r[0-9]+|lr}}]
%0 = load i8*, i8** %a, align 4
%add.ptr = getelementptr inbounds i8, i8* %0, i32 %b
%1 = load i8, i8* %add.ptr, align 1
@@ -26,7 +26,7 @@ entry:
define <8 x i8> @vld1dupi8_postinc_fixed(i8** noalias nocapture %a) nounwind {
entry:
;CHECK-LABEL: vld1dupi8_postinc_fixed:
-;CHECK: vld1.8 {d16[]}, [r1]!
+;CHECK: vld1.8 {d16[]}, [{{r[0-9]+|lr}}]!
%0 = load i8*, i8** %a, align 4
%1 = load i8, i8* %0, align 1
%2 = insertelement <8 x i8> undef, i8 %1, i32 0
@@ -39,7 +39,7 @@ entry:
define <8 x i8> @vld1dupi8_postinc_register(i8** noalias nocapture %a, i32 %n) nounwind {
entry:
;CHECK-LABEL: vld1dupi8_postinc_register:
-;CHECK: vld1.8 {d16[]}, [r2], r1
+;CHECK: vld1.8 {d16[]}, [{{r[0-9]+|lr}}], r1
%0 = load i8*, i8** %a, align 4
%1 = load i8, i8* %0, align 1
%2 = insertelement <8 x i8> undef, i8 %1, i32 0
@@ -52,7 +52,7 @@ entry:
define <16 x i8> @vld1dupqi8_preinc(i8** noalias nocapture %a, i32 %b) nounwind {
entry:
;CHECK-LABEL: vld1dupqi8_preinc:
-;CHECK: vld1.8 {d16[], d17[]}, [r1]
+;CHECK: vld1.8 {d16[], d17[]}, [{{r[0-9]+|lr}}]
%0 = load i8*, i8** %a, align 4
%add.ptr = getelementptr inbounds i8, i8* %0, i32 %b
%1 = load i8, i8* %add.ptr, align 1
@@ -65,7 +65,7 @@ entry:
define <16 x i8> @vld1dupqi8_postinc_fixed(i8** noalias nocapture %a) nounwind {
entry:
;CHECK-LABEL: vld1dupqi8_postinc_fixed:
-;CHECK: vld1.8 {d16[], d17[]}, [r1]!
+;CHECK: vld1.8 {d16[], d17[]}, [{{r[0-9]+|lr}}]!
%0 = load i8*, i8** %a, align 4
%1 = load i8, i8* %0, align 1
%2 = insertelement <16 x i8> undef, i8 %1, i32 0
@@ -78,7 +78,7 @@ entry:
define <16 x i8> @vld1dupqi8_postinc_register(i8** noalias nocapture %a, i32 %n) nounwind {
entry:
;CHECK-LABEL: vld1dupqi8_postinc_register:
-;CHECK: vld1.8 {d16[], d17[]}, [r2], r1
+;CHECK: vld1.8 {d16[], d17[]}, [{{r[0-9]+|lr}}], r1
%0 = load i8*, i8** %a, align 4
%1 = load i8, i8* %0, align 1
%2 = insertelement <16 x i8> undef, i8 %1, i32 0
@@ -91,7 +91,7 @@ entry:
define <4 x i16> @vld1dupi16(i16* %A) nounwind {
;CHECK-LABEL: vld1dupi16:
;Check the alignment value. Max for this instruction is 16 bits:
-;CHECK: vld1.16 {d16[]}, [r0:16]
+;CHECK: vld1.16 {d16[]}, [{{r[0-9]+|lr}}:16]
%tmp1 = load i16, i16* %A, align 8
%tmp2 = insertelement <4 x i16> undef, i16 %tmp1, i32 0
%tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> undef, <4 x i32> zeroinitializer
@@ -100,7 +100,7 @@ define <4 x i16> @vld1dupi16(i16* %A) nounwind {
define <4 x i16> @vld1dupi16_misaligned(i16* %A) nounwind {
;CHECK-LABEL: vld1dupi16_misaligned:
-;CHECK: vld1.16 {d16[]}, [r0]
+;CHECK: vld1.16 {d16[]}, [{{r[0-9]+|lr}}]
%tmp1 = load i16, i16* %A, align 1
%tmp2 = insertelement <4 x i16> undef, i16 %tmp1, i32 0
%tmp3 = shufflevector <4 x i16> %tmp2, <4 x i16> undef, <4 x i32> zeroinitializer
@@ -110,7 +110,7 @@ define <4 x i16> @vld1dupi16_misaligned(i16* %A) nounwind {
; This sort of looks like a vld1dup, but there's an extension in the way.
define <4 x i16> @load_i16_dup_zext(i8* %A) nounwind {
;CHECK-LABEL: load_i16_dup_zext:
-;CHECK: ldrb r0, [r0]
+;CHECK: ldrb r0, [{{r[0-9]+|lr}}]
;CHECK-NEXT: vdup.16 d16, r0
%tmp1 = load i8, i8* %A, align 1
%tmp2 = zext i8 %tmp1 to i16
@@ -122,7 +122,7 @@ define <4 x i16> @load_i16_dup_zext(i8* %A) nounwind {
; This sort of looks like a vld1dup, but there's an extension in the way.
define <4 x i16> @load_i16_dup_sext(i8* %A) nounwind {
;CHECK-LABEL: load_i16_dup_sext:
-;CHECK: ldrsb r0, [r0]
+;CHECK: ldrsb r0, [{{r[0-9]+|lr}}]
;CHECK-NEXT: vdup.16 d16, r0
%tmp1 = load i8, i8* %A, align 1
%tmp2 = sext i8 %tmp1 to i16
@@ -134,7 +134,7 @@ define <4 x i16> @load_i16_dup_sext(i8* %A) nounwind {
; This sort of looks like a vld1dup, but there's an extension in the way.
define <8 x i16> @load_i16_dupq_zext(i8* %A) nounwind {
;CHECK-LABEL: load_i16_dupq_zext:
-;CHECK: ldrb r0, [r0]
+;CHECK: ldrb r0, [{{r[0-9]+|lr}}]
;CHECK-NEXT: vdup.16 q8, r0
%tmp1 = load i8, i8* %A, align 1
%tmp2 = zext i8 %tmp1 to i16
@@ -146,7 +146,7 @@ define <8 x i16> @load_i16_dupq_zext(i8* %A) nounwind {
define <2 x i32> @vld1dupi32(i32* %A) nounwind {
;CHECK-LABEL: vld1dupi32:
;Check the alignment value. Max for this instruction is 32 bits:
-;CHECK: vld1.32 {d16[]}, [r0:32]
+;CHECK: vld1.32 {d16[]}, [{{r[0-9]+|lr}}:32]
%tmp1 = load i32, i32* %A, align 8
%tmp2 = insertelement <2 x i32> undef, i32 %tmp1, i32 0
%tmp3 = shufflevector <2 x i32> %tmp2, <2 x i32> undef, <2 x i32> zeroinitializer
@@ -156,7 +156,7 @@ define <2 x i32> @vld1dupi32(i32* %A) nounwind {
; This sort of looks like a vld1dup, but there's an extension in the way.
define <4 x i32> @load_i32_dup_zext(i8* %A) nounwind {
;CHECK-LABEL: load_i32_dup_zext:
-;CHECK: ldrb r0, [r0]
+;CHECK: ldrb r0, [{{r[0-9]+|lr}}]
;CHECK-NEXT: vdup.32 q8, r0
%tmp1 = load i8, i8* %A, align 1
%tmp2 = zext i8 %tmp1 to i32
@@ -168,7 +168,7 @@ define <4 x i32> @load_i32_dup_zext(i8* %A) nounwind {
; This sort of looks like a vld1dup, but there's an extension in the way.
define <4 x i32> @load_i32_dup_sext(i8* %A) nounwind {
;CHECK-LABEL: load_i32_dup_sext:
-;CHECK: ldrsb r0, [r0]
+;CHECK: ldrsb r0, [{{r[0-9]+|lr}}]
;CHECK-NEXT: vdup.32 q8, r0
%tmp1 = load i8, i8* %A, align 1
%tmp2 = sext i8 %tmp1 to i32
@@ -179,7 +179,7 @@ define <4 x i32> @load_i32_dup_sext(i8* %A) nounwind {
define <2 x float> @vld1dupf(float* %A) nounwind {
;CHECK-LABEL: vld1dupf:
-;CHECK: vld1.32 {d16[]}, [r0:32]
+;CHECK: vld1.32 {d16[]}, [{{r[0-9]+|lr}}:32]
%tmp0 = load float, float* %A
%tmp1 = insertelement <2 x float> undef, float %tmp0, i32 0
%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
@@ -189,7 +189,7 @@ define <2 x float> @vld1dupf(float* %A) nounwind {
define <16 x i8> @vld1dupQi8(i8* %A) nounwind {
;CHECK-LABEL: vld1dupQi8:
;Check the (default) alignment value.
-;CHECK: vld1.8 {d16[], d17[]}, [r0]
+;CHECK: vld1.8 {d16[], d17[]}, [{{r[0-9]+|lr}}]
%tmp1 = load i8, i8* %A, align 8
%tmp2 = insertelement <16 x i8> undef, i8 %tmp1, i32 0
%tmp3 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <16 x i32> zeroinitializer
@@ -198,7 +198,7 @@ define <16 x i8> @vld1dupQi8(i8* %A) nounwind {
define <4 x float> @vld1dupQf(float* %A) nounwind {
;CHECK-LABEL: vld1dupQf:
-;CHECK: vld1.32 {d16[], d17[]}, [r0:32]
+;CHECK: vld1.32 {d16[], d17[]}, [{{r[0-9]+|lr}}:32]
%tmp0 = load float, float* %A
%tmp1 = insertelement <4 x float> undef, float %tmp0, i32 0
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
@@ -212,7 +212,7 @@ define <4 x float> @vld1dupQf(float* %A) nounwind {
define <8 x i8> @vld2dupi8(i8* %A) nounwind {
;CHECK-LABEL: vld2dupi8:
;Check the (default) alignment value.
-;CHECK: vld2.8 {d16[], d17[]}, [r0]
+;CHECK: vld2.8 {d16[], d17[]}, [{{r[0-9]+|lr}}]
%tmp0 = tail call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8.p0i8(i8* %A, <8 x i8> undef, <8 x i8> undef, i32 0, i32 1)
%tmp1 = extractvalue %struct.__neon_int8x8x2_t %tmp0, 0
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> zeroinitializer
@@ -283,7 +283,7 @@ define <4 x i16> @vld2dupi16(i8* %A) nounwind {
;CHECK-LABEL: vld2dupi16:
;Check that a power-of-two alignment smaller than the total size of the memory
;being loaded is ignored.
-;CHECK: vld2.16 {d16[], d17[]}, [r0]
+;CHECK: vld2.16 {d16[], d17[]}, [{{r[0-9]+|lr}}]
%tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16.p0i8(i8* %A, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
%tmp1 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 0
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
@@ -296,7 +296,7 @@ define <4 x i16> @vld2dupi16(i8* %A) nounwind {
;Check for a post-increment updating load.
define <4 x i16> @vld2dupi16_update(i16** %ptr) nounwind {
;CHECK-LABEL: vld2dupi16_update:
-;CHECK: vld2.16 {d16[], d17[]}, [r1]!
+;CHECK: vld2.16 {d16[], d17[]}, [{{r[0-9]+|lr}}]!
%A = load i16*, i16** %ptr
%A2 = bitcast i16* %A to i8*
%tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16.p0i8(i8* %A2, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
@@ -313,7 +313,7 @@ define <4 x i16> @vld2dupi16_update(i16** %ptr) nounwind {
define <4 x i16> @vld2dupi16_odd_update(i16** %ptr) nounwind {
;CHECK-LABEL: vld2dupi16_odd_update:
;CHECK: mov [[INC:r[0-9]+]], #6
-;CHECK: vld2.16 {d16[], d17[]}, [r1], [[INC]]
+;CHECK: vld2.16 {d16[], d17[]}, [{{r[0-9]+|lr}}], [[INC]]
%A = load i16*, i16** %ptr
%A2 = bitcast i16* %A to i8*
%tmp0 = tail call %struct.__neon_int4x16x2_t @llvm.arm.neon.vld2lane.v4i16.p0i8(i8* %A2, <4 x i16> undef, <4 x i16> undef, i32 0, i32 2)
@@ -330,7 +330,7 @@ define <4 x i16> @vld2dupi16_odd_update(i16** %ptr) nounwind {
define <2 x i32> @vld2dupi32(i8* %A) nounwind {
;CHECK-LABEL: vld2dupi32:
;Check the alignment value. Max for this instruction is 64 bits:
-;CHECK: vld2.32 {d16[], d17[]}, [r0:64]
+;CHECK: vld2.32 {d16[], d17[]}, [{{r[0-9]+|lr}}:64]
%tmp0 = tail call %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32.p0i8(i8* %A, <2 x i32> undef, <2 x i32> undef, i32 0, i32 16)
%tmp1 = extractvalue %struct.__neon_int2x32x2_t %tmp0, 0
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer
@@ -350,7 +350,7 @@ declare %struct.__neon_int2x32x2_t @llvm.arm.neon.vld2lane.v2i32.p0i8(i8*, <2 x
;Check for a post-increment updating load with register increment.
define <8 x i8> @vld3dupi8_update(i8** %ptr, i32 %inc) nounwind {
;CHECK-LABEL: vld3dupi8_update:
-;CHECK: vld3.8 {d16[], d17[], d18[]}, [r2], r1
+;CHECK: vld3.8 {d16[], d17[], d18[]}, [{{r[0-9]+|lr}}], r1
%A = load i8*, i8** %ptr
%tmp0 = tail call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8.p0i8(i8* %A, <8 x i8> undef, <8 x i8> undef, <8 x i8> undef, i32 0, i32 8)
%tmp1 = extractvalue %struct.__neon_int8x8x3_t %tmp0, 0
@@ -369,7 +369,7 @@ define <8 x i8> @vld3dupi8_update(i8** %ptr, i32 %inc) nounwind {
define <4 x i16> @vld3dupi16(i8* %A) nounwind {
;CHECK-LABEL: vld3dupi16:
;Check the (default) alignment value. VLD3 does not support alignment.
-;CHECK: vld3.16 {d16[], d17[], d18[]}, [r0]
+;CHECK: vld3.16 {d16[], d17[], d18[]}, [{{r[0-9]+|lr}}]
%tmp0 = tail call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16.p0i8(i8* %A, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 8)
%tmp1 = extractvalue %struct.__neon_int16x4x3_t %tmp0, 0
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> zeroinitializer
@@ -391,7 +391,7 @@ declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16.p0i8(i8*, <4 x
;Check for a post-increment updating load.
define <4 x i16> @vld4dupi16_update(i16** %ptr) nounwind {
;CHECK-LABEL: vld4dupi16_update:
-;CHECK: vld4.16 {d16[], d17[], d18[], d19[]}, [r1]!
+;CHECK: vld4.16 {d16[], d17[], d18[], d19[]}, [{{r[0-9]+|lr}}]!
%A = load i16*, i16** %ptr
%A2 = bitcast i16* %A to i8*
%tmp0 = tail call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16.p0i8(i8* %A2, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, <4 x i16> undef, i32 0, i32 1)
@@ -415,7 +415,7 @@ define <2 x i32> @vld4dupi32(i8* %A) nounwind {
;CHECK-LABEL: vld4dupi32:
;Check the alignment value. An 8-byte alignment is allowed here even though
;it is smaller than the total size of the memory being loaded.
-;CHECK: vld4.32 {d16[], d17[], d18[], d19[]}, [r0:64]
+;CHECK: vld4.32 {d16[], d17[], d18[], d19[]}, [{{r[0-9]+|lr}}:64]
%tmp0 = tail call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32.p0i8(i8* %A, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, <2 x i32> undef, i32 0, i32 8)
%tmp1 = extractvalue %struct.__neon_int32x2x4_t %tmp0, 0
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> zeroinitializer
diff --git a/test/CodeGen/ARM/vldlane.ll b/test/CodeGen/ARM/vldlane.ll
index 866641f3fbbd..f5c0f09ed440 100644
--- a/test/CodeGen/ARM/vldlane.ll
+++ b/test/CodeGen/ARM/vldlane.ll
@@ -308,7 +308,7 @@ define <8 x i16> @vld3laneQi16(i16* %A, <8 x i16>* %B) nounwind {
;Check for a post-increment updating load with register increment.
define <8 x i16> @vld3laneQi16_update(i16** %ptr, <8 x i16>* %B, i32 %inc) nounwind {
;CHECK-LABEL: vld3laneQi16_update:
-;CHECK: vld3.16 {d{{.*}}[1], d{{.*}}[1], d{{.*}}[1]}, [{{r[0-9]+}}], {{r[0-9]+}}
+;CHECK: vld3.16 {d{{.*}}[1], d{{.*}}[1], d{{.*}}[1]}, [{{r[0-9]+|lr}}], {{r[0-9]+}}
%A = load i16*, i16** %ptr
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <8 x i16>, <8 x i16>* %B
diff --git a/test/CodeGen/ARM/vpadd.ll b/test/CodeGen/ARM/vpadd.ll
index 3409d37a31f4..3fa93bb43f03 100644
--- a/test/CodeGen/ARM/vpadd.ll
+++ b/test/CodeGen/ARM/vpadd.ll
@@ -285,17 +285,17 @@ define void @addCombineToVPADDLq_s8(<16 x i8> *%cbcr, <8 x i16> *%X) nounwind ss
define void @addCombineToVPADDL_s8(<16 x i8> *%cbcr, <4 x i16> *%X) nounwind ssp {
; CHECK-LABEL: addCombineToVPADDL_s8:
; CHECK: @ BB#0:
-; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
-; CHECK-NEXT: vmov.i16 d18, #0x8
-; CHECK-NEXT: vneg.s16 d18, d18
-; CHECK-NEXT: vext.8 d19, d16, d16, #1
-; CHECK-NEXT: vshl.i16 d16, d16, #8
-; CHECK-NEXT: vshl.i16 d17, d19, #8
-; CHECK-NEXT: vshl.s16 d16, d16, d18
-; CHECK-NEXT: vshl.s16 d17, d17, d18
-; CHECK-NEXT: vadd.i16 d16, d17, d16
-; CHECK-NEXT: vstr d16, [r1]
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: vmov.i16 d16, #0x8
+; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
+; CHECK-NEXT: vext.8 d17, d18, d16, #1
+; CHECK-NEXT: vneg.s16 d16, d16
+; CHECK-NEXT: vshl.i16 d18, d18, #8
+; CHECK-NEXT: vshl.i16 d17, d17, #8
+; CHECK-NEXT: vshl.s16 d18, d18, d16
+; CHECK-NEXT: vshl.s16 d16, d17, d16
+; CHECK-NEXT: vadd.i16 d16, d16, d18
+; CHECK-NEXT: vstr d16, [r1]
+; CHECK-NEXT: mov pc, lr
%tmp = load <16 x i8>, <16 x i8>* %cbcr
%tmp1 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> undef, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
diff --git a/test/CodeGen/ARM/vst1.ll b/test/CodeGen/ARM/vst1.ll
index 404129a7e6ad..e351a2ec2373 100644
--- a/test/CodeGen/ARM/vst1.ll
+++ b/test/CodeGen/ARM/vst1.ll
@@ -39,7 +39,7 @@ define void @vst1f(float* %A, <2 x float>* %B) nounwind {
;Check for a post-increment updating store.
define void @vst1f_update(float** %ptr, <2 x float>* %B) nounwind {
;CHECK-LABEL: vst1f_update:
-;CHECK: vst1.32 {d16}, [r1]!
+;CHECK: vst1.32 {d16}, [r{{[0-9]+}}]!
%A = load float*, float** %ptr
%tmp0 = bitcast float* %A to i8*
%tmp1 = load <2 x float>, <2 x float>* %B
diff --git a/test/CodeGen/ARM/vst4.ll b/test/CodeGen/ARM/vst4.ll
index 188955102290..afa4321c91a0 100644
--- a/test/CodeGen/ARM/vst4.ll
+++ b/test/CodeGen/ARM/vst4.ll
@@ -12,7 +12,7 @@ define void @vst4i8(i8* %A, <8 x i8>* %B) nounwind {
;Check for a post-increment updating store with register increment.
define void @vst4i8_update(i8** %ptr, <8 x i8>* %B, i32 %inc) nounwind {
;CHECK-LABEL: vst4i8_update:
-;CHECK: vst4.8 {d16, d17, d18, d19}, [r1:128], r2
+;CHECK: vst4.8 {d16, d17, d18, d19}, [r{{[0-9]+}}:128], r2
%A = load i8*, i8** %ptr
%tmp1 = load <8 x i8>, <8 x i8>* %B
call void @llvm.arm.neon.vst4.p0i8.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 16)
@@ -62,7 +62,7 @@ define void @vst4i64(i64* %A, <1 x i64>* %B) nounwind {
define void @vst4i64_update(i64** %ptr, <1 x i64>* %B) nounwind {
;CHECK-LABEL: vst4i64_update:
-;CHECK: vst1.64 {d16, d17, d18, d19}, [r1]!
+;CHECK: vst1.64 {d16, d17, d18, d19}, [r{{[0-9]+}}]!
%A = load i64*, i64** %ptr
%tmp0 = bitcast i64* %A to i8*
%tmp1 = load <1 x i64>, <1 x i64>* %B
@@ -116,8 +116,8 @@ define void @vst4Qf(float* %A, <4 x float>* %B) nounwind {
;Check for a post-increment updating store.
define void @vst4Qf_update(float** %ptr, <4 x float>* %B) nounwind {
;CHECK-LABEL: vst4Qf_update:
-;CHECK: vst4.32 {d16, d18, d20, d22}, [r1]!
-;CHECK: vst4.32 {d17, d19, d21, d23}, [r1]!
+ ;CHECK: vst4.32 {d16, d18, d20, d22}, [r[[REG:[0-9]+]]]!
+;CHECK: vst4.32 {d17, d19, d21, d23}, [r[[REG]]]!
%A = load float*, float** %ptr
%tmp0 = bitcast float* %A to i8*
%tmp1 = load <4 x float>, <4 x float>* %B
diff --git a/test/CodeGen/ARM/vstlane.ll b/test/CodeGen/ARM/vstlane.ll
index 7e130ea01b64..49af0be92316 100644
--- a/test/CodeGen/ARM/vstlane.ll
+++ b/test/CodeGen/ARM/vstlane.ll
@@ -127,7 +127,7 @@ define void @vst2lanei16(i16* %A, <4 x i16>* %B) nounwind {
;Check for a post-increment updating store with register increment.
define void @vst2lanei16_update(i16** %ptr, <4 x i16>* %B, i32 %inc) nounwind {
;CHECK-LABEL: vst2lanei16_update:
-;CHECK: vst2.16 {d16[1], d17[1]}, [r1], r2
+;CHECK: vst2.16 {d16[1], d17[1]}, [r{{[0-9]+}}], r{{[0-9]+}}
%A = load i16*, i16** %ptr
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <4 x i16>, <4 x i16>* %B
@@ -251,7 +251,7 @@ define void @vst3laneQi32(i32* %A, <4 x i32>* %B) nounwind {
;Check for a post-increment updating store.
define void @vst3laneQi32_update(i32** %ptr, <4 x i32>* %B) nounwind {
;CHECK-LABEL: vst3laneQi32_update:
-;CHECK: vst3.32 {d16[0], d18[0], d20[0]}, [r1]!
+;CHECK: vst3.32 {d16[0], d18[0], d20[0]}, [r{{[0-9]+}}]!
%A = load i32*, i32** %ptr
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <4 x i32>, <4 x i32>* %B
@@ -292,7 +292,7 @@ define void @vst4lanei8(i8* %A, <8 x i8>* %B) nounwind {
;Check for a post-increment updating store.
define void @vst4lanei8_update(i8** %ptr, <8 x i8>* %B) nounwind {
;CHECK-LABEL: vst4lanei8_update:
-;CHECK: vst4.8 {d16[1], d17[1], d18[1], d19[1]}, [r1:32]!
+;CHECK: vst4.8 {d16[1], d17[1], d18[1], d19[1]}, [r{{[0-9]+}}:32]!
%A = load i8*, i8** %ptr
%tmp1 = load <8 x i8>, <8 x i8>* %B
call void @llvm.arm.neon.vst4lane.p0i8.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
diff --git a/test/CodeGen/ARM/vuzp.ll b/test/CodeGen/ARM/vuzp.ll
index 0a5235df319f..24090cfd6c65 100644
--- a/test/CodeGen/ARM/vuzp.ll
+++ b/test/CodeGen/ARM/vuzp.ll
@@ -324,26 +324,23 @@ define <8 x i8> @cmpsel_trunc(<8 x i8> %in0, <8 x i8> %in1, <8 x i32> %cmp0, <8
; truncate from i32 to i16 and one vmovn.i16 to perform the final truncation for i8.
; CHECK-LABEL: cmpsel_trunc:
; CHECK: @ BB#0:
-; CHECK-NEXT: .save {r4, r5, r11, lr}
-; CHECK-NEXT: push {r4, r5, r11, lr}
-; CHECK-NEXT: add r4, sp, #64
-; CHECK-NEXT: add r5, sp, #32
-; CHECK-NEXT: add r12, sp, #48
-; CHECK-NEXT: add lr, sp, #16
-; CHECK-NEXT: vld1.64 {d16, d17}, [r5]
-; CHECK-NEXT: vld1.64 {d18, d19}, [r4]
-; CHECK-NEXT: vld1.64 {d20, d21}, [lr]
-; CHECK-NEXT: vld1.64 {d22, d23}, [r12]
-; CHECK-NEXT: vcgt.u32 q8, q9, q8
-; CHECK-NEXT: vcgt.u32 q9, q11, q10
-; CHECK-NEXT: vmovn.i32 d17, q8
-; CHECK-NEXT: vmovn.i32 d16, q9
-; CHECK-NEXT: vmov d18, r2, r3
-; CHECK-NEXT: vmov d19, r0, r1
-; CHECK-NEXT: vmovn.i16 d16, q8
-; CHECK-NEXT: vbsl d16, d19, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: pop {r4, r5, r11, lr}
+; CHECK-NEXT: add r12, sp, #16
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT: add r12, sp, #48
+; CHECK-NEXT: vld1.64 {d20, d21}, [r12]
+; CHECK-NEXT: add r12, sp, #32
+; CHECK-NEXT: vcgt.u32 q8, q10, q8
+; CHECK-NEXT: vld1.64 {d20, d21}, [r12]
+; CHECK-NEXT: vcgt.u32 q9, q10, q9
+; CHECK-NEXT: vmov d20, r2, r3
+; CHECK-NEXT: vmovn.i32 d17, q8
+; CHECK-NEXT: vmovn.i32 d16, q9
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vmovn.i16 d16, q8
+; CHECK-NEXT: vbsl d16, d18, d20
+; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%c = icmp ult <8 x i32> %cmp0, %cmp1
%res = select <8 x i1> %c, <8 x i8> %in0, <8 x i8> %in1
@@ -356,28 +353,28 @@ define <8 x i8> @cmpsel_trunc(<8 x i8> %in0, <8 x i8> %in1, <8 x i32> %cmp0, <8
define <8 x i8> @vuzp_trunc_and_shuffle(<8 x i8> %tr0, <8 x i8> %tr1,
; CHECK-LABEL: vuzp_trunc_and_shuffle:
; CHECK: @ BB#0:
-; CHECK-NEXT: .save {r4, lr}
-; CHECK-NEXT: push {r4, lr}
-; CHECK-NEXT: ldr r12, [sp, #40]
-; CHECK-NEXT: add lr, sp, #24
-; CHECK-NEXT: add r4, sp, #8
-; CHECK-NEXT: vld1.64 {d16, d17}, [r4]
-; CHECK-NEXT: vld1.64 {d18, d19}, [lr]
-; CHECK-NEXT: vld1.32 {d20[0]}, [r12:32]
-; CHECK-NEXT: vcgt.u32 q8, q9, q8
-; CHECK-NEXT: vmovn.i32 d16, q8
-; CHECK-NEXT: vmov.i8 d17, #0x7
-; CHECK-NEXT: vneg.s8 d17, d17
-; CHECK-NEXT: vmovl.u8 q9, d20
-; CHECK-NEXT: vuzp.8 d16, d18
-; CHECK-NEXT: vshl.i8 d16, d16, #7
-; CHECK-NEXT: vmov d18, r2, r3
-; CHECK-NEXT: vmov d19, r0, r1
-; CHECK-NEXT: vshl.s8 d16, d16, d17
-; CHECK-NEXT: vbsl d16, d19, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: pop {r4, lr}
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: add r12, sp, #8
+; CHECK-NEXT: add lr, sp, #24
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: ldr r12, [sp, #40]
+; CHECK-NEXT: vld1.64 {d18, d19}, [lr]
+; CHECK-NEXT: vcgt.u32 q8, q9, q8
+; CHECK-NEXT: vld1.32 {d18[0]}, [r12:32]
+; CHECK-NEXT: vmov.i8 d19, #0x7
+; CHECK-NEXT: vmovl.u8 q10, d18
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vneg.s8 d17, d19
+; CHECK-NEXT: vmov d18, r2, r3
+; CHECK-NEXT: vuzp.8 d16, d20
+; CHECK-NEXT: vshl.i8 d16, d16, #7
+; CHECK-NEXT: vshl.s8 d16, d16, d17
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vbsl d16, d17, d18
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: pop {r11, lr}
+; CHECK-NEXT: mov pc, lr
<4 x i32> %cmp0, <4 x i32> %cmp1, <4 x i8> *%cmp2_ptr) {
%cmp2_load = load <4 x i8>, <4 x i8> * %cmp2_ptr, align 4
%cmp2 = trunc <4 x i8> %cmp2_load to <4 x i1>
@@ -392,25 +389,22 @@ define <8 x i8> @vuzp_trunc_and_shuffle(<8 x i8> %tr0, <8 x i8> %tr1,
define <8 x i8> @vuzp_trunc_and_shuffle_undef_right(<8 x i8> %tr0, <8 x i8> %tr1,
; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_right:
; CHECK: @ BB#0:
-; CHECK-NEXT: .save {r11, lr}
-; CHECK-NEXT: push {r11, lr}
-; CHECK-NEXT: add r12, sp, #24
-; CHECK-NEXT: add lr, sp, #8
-; CHECK-NEXT: vld1.64 {d16, d17}, [lr]
-; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
-; CHECK-NEXT: vcgt.u32 q8, q9, q8
-; CHECK-NEXT: vmov d19, r0, r1
-; CHECK-NEXT: vmovn.i32 d16, q8
-; CHECK-NEXT: vmov.i8 d17, #0x7
-; CHECK-NEXT: vuzp.8 d16, d18
-; CHECK-NEXT: vneg.s8 d17, d17
-; CHECK-NEXT: vshl.i8 d16, d16, #7
-; CHECK-NEXT: vmov d18, r2, r3
-; CHECK-NEXT: vshl.s8 d16, d16, d17
-; CHECK-NEXT: vbsl d16, d19, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: pop {r11, lr}
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: add r12, sp, #16
+; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT: vcgt.u32 q8, q9, q8
+; CHECK-NEXT: vmov.i8 d18, #0x7
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vuzp.8 d16, d17
+; CHECK-NEXT: vneg.s8 d17, d18
+; CHECK-NEXT: vshl.i8 d16, d16, #7
+; CHECK-NEXT: vmov d18, r2, r3
+; CHECK-NEXT: vshl.s8 d16, d16, d17
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vbsl d16, d17, d18
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
<4 x i32> %cmp0, <4 x i32> %cmp1, <4 x i8> *%cmp2_ptr) {
%cmp2_load = load <4 x i8>, <4 x i8> * %cmp2_ptr, align 4
%cmp2 = trunc <4 x i8> %cmp2_load to <4 x i1>
@@ -423,26 +417,23 @@ define <8 x i8> @vuzp_trunc_and_shuffle_undef_right(<8 x i8> %tr0, <8 x i8> %tr1
define <8 x i8> @vuzp_trunc_and_shuffle_undef_left(<8 x i8> %tr0, <8 x i8> %tr1,
; CHECK-LABEL: vuzp_trunc_and_shuffle_undef_left:
; CHECK: @ BB#0:
-; CHECK-NEXT: .save {r11, lr}
-; CHECK-NEXT: push {r11, lr}
-; CHECK-NEXT: add r12, sp, #24
-; CHECK-NEXT: add lr, sp, #8
-; CHECK-NEXT: vldr d20, .LCPI22_0
-; CHECK-NEXT: vld1.64 {d16, d17}, [lr]
-; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
-; CHECK-NEXT: vcgt.u32 q8, q9, q8
-; CHECK-NEXT: vmov d18, r2, r3
-; CHECK-NEXT: vmov d19, r0, r1
-; CHECK-NEXT: vmovn.i32 d16, q8
-; CHECK-NEXT: vmov.i8 d17, #0x7
-; CHECK-NEXT: vtbl.8 d16, {d16}, d20
-; CHECK-NEXT: vneg.s8 d17, d17
-; CHECK-NEXT: vshl.i8 d16, d16, #7
-; CHECK-NEXT: vshl.s8 d16, d16, d17
-; CHECK-NEXT: vbsl d16, d19, d18
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: pop {r11, lr}
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: mov r12, sp
+; CHECK-NEXT: vld1.64 {d16, d17}, [r12]
+; CHECK-NEXT: add r12, sp, #16
+; CHECK-NEXT: vld1.64 {d18, d19}, [r12]
+; CHECK-NEXT: vcgt.u32 q8, q9, q8
+; CHECK-NEXT: vldr d18, .LCPI22_0
+; CHECK-NEXT: vmov.i8 d19, #0x7
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vtbl.8 d16, {d16}, d18
+; CHECK-NEXT: vneg.s8 d17, d19
+; CHECK-NEXT: vmov d18, r2, r3
+; CHECK-NEXT: vshl.i8 d16, d16, #7
+; CHECK-NEXT: vshl.s8 d16, d16, d17
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vbsl d16, d17, d18
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: mov pc, lr
; CHECK-NEXT: .p2align 3
; CHECK-NEXT: @ BB#1:
; CHECK-NEXT: .LCPI22_0:
@@ -468,65 +459,63 @@ define <8 x i8> @vuzp_trunc_and_shuffle_undef_left(<8 x i8> %tr0, <8 x i8> %tr1,
define <10 x i8> @vuzp_wide_type(<10 x i8> %tr0, <10 x i8> %tr1,
; CHECK-LABEL: vuzp_wide_type:
; CHECK: @ BB#0:
-; CHECK-NEXT: .save {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT: push {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT: .setfp r11, sp, #16
-; CHECK-NEXT: add r11, sp, #16
-; CHECK-NEXT: .pad #8
-; CHECK-NEXT: sub sp, sp, #8
-; CHECK-NEXT: bic sp, sp, #15
-; CHECK-NEXT: add r5, r11, #52
-; CHECK-NEXT: add r7, r11, #32
-; CHECK-NEXT: add r4, r11, #44
-; CHECK-NEXT: add r6, r11, #24
-; CHECK-NEXT: add r12, r11, #60
-; CHECK-NEXT: add lr, r11, #40
-; CHECK-NEXT: vld1.32 {d17[0]}, [r7:32]
-; CHECK-NEXT: vld1.32 {d19[0]}, [r5:32]
-; CHECK-NEXT: vld1.32 {d22[0]}, [r12:32]
-; CHECK-NEXT: ldr r12, [r11, #64]
-; CHECK-NEXT: vld1.32 {d20[0]}, [lr:32]
-; CHECK-NEXT: add r7, r11, #48
-; CHECK-NEXT: add r5, r11, #28
-; CHECK-NEXT: vld1.32 {d16[0]}, [r6:32]
-; CHECK-NEXT: vld1.32 {d18[0]}, [r4:32]
-; CHECK-NEXT: add r6, r11, #56
-; CHECK-NEXT: add r4, r11, #36
-; CHECK-NEXT: vcgt.u32 q10, q11, q10
-; CHECK-NEXT: vld1.32 {d19[1]}, [r6:32]
-; CHECK-NEXT: vld1.32 {d17[1]}, [r4:32]
-; CHECK-NEXT: add r6, r12, #4
-; CHECK-NEXT: vld1.32 {d18[1]}, [r7:32]
-; CHECK-NEXT: vld1.32 {d16[1]}, [r5:32]
-; CHECK-NEXT: ldr r7, [r12]
-; CHECK-NEXT: vcgt.u32 q8, q9, q8
-; CHECK-NEXT: vmovn.i32 d18, q10
-; CHECK-NEXT: vmov.32 d21[0], r7
-; CHECK-NEXT: vmovn.i32 d16, q8
-; CHECK-NEXT: vmov.u8 r7, d21[3]
-; CHECK-NEXT: vmov.i8 d17, #0x7
-; CHECK-NEXT: vuzp.8 d16, d18
-; CHECK-NEXT: vmov.8 d23[0], r7
-; CHECK-NEXT: vneg.s8 d17, d17
-; CHECK-NEXT: add r7, r11, #8
-; CHECK-NEXT: vldr d18, .LCPI23_0
-; CHECK-NEXT: vld1.8 {d23[1]}, [r6]
-; CHECK-NEXT: vshl.i8 d16, d16, #7
-; CHECK-NEXT: vshl.s8 d20, d16, d17
-; CHECK-NEXT: vmov.i8 q8, #0x7
-; CHECK-NEXT: vneg.s8 q8, q8
-; CHECK-NEXT: vtbl.8 d22, {d20, d21}, d18
-; CHECK-NEXT: vld1.64 {d18, d19}, [r7]
-; CHECK-NEXT: vshl.i8 q10, q11, #7
-; CHECK-NEXT: vmov d23, r2, r3
-; CHECK-NEXT: vmov d22, r0, r1
-; CHECK-NEXT: vshl.s8 q8, q10, q8
-; CHECK-NEXT: vbsl q8, q11, q9
-; CHECK-NEXT: vmov r0, r1, d16
-; CHECK-NEXT: vmov r2, r3, d17
-; CHECK-NEXT: sub sp, r11, #16
-; CHECK-NEXT: pop {r4, r5, r6, r7, r11, lr}
-; CHECK-NEXT: mov pc, lr
+; CHECK-NEXT: .save {r4, r10, r11, lr}
+; CHECK-NEXT: push {r4, r10, r11, lr}
+; CHECK-NEXT: .setfp r11, sp, #8
+; CHECK-NEXT: add r11, sp, #8
+; CHECK-NEXT: bic sp, sp, #15
+; CHECK-NEXT: add r12, r11, #32
+; CHECK-NEXT: add lr, r11, #60
+; CHECK-NEXT: vld1.32 {d17[0]}, [r12:32]
+; CHECK-NEXT: add r12, r11, #24
+; CHECK-NEXT: vld1.32 {d22[0]}, [lr:32]
+; CHECK-NEXT: add lr, r11, #36
+; CHECK-NEXT: vld1.32 {d16[0]}, [r12:32]
+; CHECK-NEXT: add r12, r11, #52
+; CHECK-NEXT: vld1.32 {d19[0]}, [r12:32]
+; CHECK-NEXT: add r12, r11, #44
+; CHECK-NEXT: vld1.32 {d17[1]}, [lr:32]
+; CHECK-NEXT: vld1.32 {d18[0]}, [r12:32]
+; CHECK-NEXT: add r12, r11, #40
+; CHECK-NEXT: vld1.32 {d20[0]}, [r12:32]
+; CHECK-NEXT: ldr r12, [r11, #64]
+; CHECK-NEXT: vcgt.u32 q10, q11, q10
+; CHECK-NEXT: ldr r4, [r12]
+; CHECK-NEXT: vmov.32 d25[0], r4
+; CHECK-NEXT: add r4, r11, #28
+; CHECK-NEXT: vld1.32 {d16[1]}, [r4:32]
+; CHECK-NEXT: add r4, r11, #56
+; CHECK-NEXT: vld1.32 {d19[1]}, [r4:32]
+; CHECK-NEXT: add r4, r11, #48
+; CHECK-NEXT: vmov.u8 lr, d25[3]
+; CHECK-NEXT: vld1.32 {d18[1]}, [r4:32]
+; CHECK-NEXT: add r4, r12, #4
+; CHECK-NEXT: vcgt.u32 q8, q9, q8
+; CHECK-NEXT: vmovn.i32 d19, q10
+; CHECK-NEXT: vldr d20, .LCPI23_0
+; CHECK-NEXT: vmov.i8 d18, #0x7
+; CHECK-NEXT: vmovn.i32 d16, q8
+; CHECK-NEXT: vneg.s8 d17, d18
+; CHECK-NEXT: vuzp.8 d16, d19
+; CHECK-NEXT: vmov.i8 q9, #0x7
+; CHECK-NEXT: vshl.i8 d16, d16, #7
+; CHECK-NEXT: vneg.s8 q9, q9
+; CHECK-NEXT: vshl.s8 d24, d16, d17
+; CHECK-NEXT: vmov.8 d17[0], lr
+; CHECK-NEXT: vtbl.8 d16, {d24, d25}, d20
+; CHECK-NEXT: vld1.8 {d17[1]}, [r4]
+; CHECK-NEXT: add r4, r11, #8
+; CHECK-NEXT: vshl.i8 q8, q8, #7
+; CHECK-NEXT: vld1.64 {d20, d21}, [r4]
+; CHECK-NEXT: vshl.s8 q8, q8, q9
+; CHECK-NEXT: vmov d19, r2, r3
+; CHECK-NEXT: vmov d18, r0, r1
+; CHECK-NEXT: vbsl q8, q9, q10
+; CHECK-NEXT: vmov r0, r1, d16
+; CHECK-NEXT: vmov r2, r3, d17
+; CHECK-NEXT: sub sp, r11, #8
+; CHECK-NEXT: pop {r4, r10, r11, lr}
+; CHECK-NEXT: mov pc, lr
; CHECK-NEXT: .p2align 3
; CHECK-NEXT: @ BB#1:
; CHECK-NEXT: .LCPI23_0:
diff --git a/test/CodeGen/BPF/remove_truncate_1.ll b/test/CodeGen/BPF/remove_truncate_1.ll
new file mode 100644
index 000000000000..65433853b9d5
--- /dev/null
+++ b/test/CodeGen/BPF/remove_truncate_1.ll
@@ -0,0 +1,87 @@
+; RUN: llc < %s -march=bpf -verify-machineinstrs | FileCheck %s
+
+; Source code:
+; struct xdp_md {
+; unsigned data;
+; unsigned data_end;
+; };
+;
+; int gbl;
+; int xdp_dummy(struct xdp_md *xdp)
+; {
+; char tmp;
+; long addr;
+;
+; if (gbl) {
+; long addr1 = (long)xdp->data;
+; tmp = *(char *)addr1;
+; if (tmp == 1)
+; return 3;
+; } else {
+; tmp = *(volatile char *)(long)xdp->data_end;
+; if (tmp == 1)
+; return 2;
+; }
+; addr = (long)xdp->data;
+; tmp = *(volatile char *)addr;
+; if (tmp == 0)
+; return 1;
+; return 0;
+; }
+
+%struct.xdp_md = type { i32, i32 }
+
+@gbl = common local_unnamed_addr global i32 0, align 4
+
+; Function Attrs: norecurse nounwind
+define i32 @xdp_dummy(%struct.xdp_md* nocapture readonly %xdp) local_unnamed_addr #0 {
+entry:
+ %0 = load i32, i32* @gbl, align 4
+ %tobool = icmp eq i32 %0, 0
+ br i1 %tobool, label %if.else, label %if.then
+
+if.then: ; preds = %entry
+ %data = getelementptr inbounds %struct.xdp_md, %struct.xdp_md* %xdp, i64 0, i32 0
+ %1 = load i32, i32* %data, align 4
+ %conv = zext i32 %1 to i64
+ %2 = inttoptr i64 %conv to i8*
+ %3 = load i8, i8* %2, align 1
+ %cmp = icmp eq i8 %3, 1
+ br i1 %cmp, label %cleanup20, label %if.end12
+; CHECK: r1 = *(u32 *)(r1 + 0)
+; CHECK: r2 = *(u8 *)(r1 + 0)
+
+if.else: ; preds = %entry
+ %data_end = getelementptr inbounds %struct.xdp_md, %struct.xdp_md* %xdp, i64 0, i32 1
+ %4 = load i32, i32* %data_end, align 4
+ %conv6 = zext i32 %4 to i64
+; CHECK: r2 = *(u32 *)(r1 + 4)
+ %5 = inttoptr i64 %conv6 to i8*
+ %6 = load volatile i8, i8* %5, align 1
+ %cmp8 = icmp eq i8 %6, 1
+ br i1 %cmp8, label %cleanup20, label %if.else.if.end12_crit_edge
+
+if.else.if.end12_crit_edge: ; preds = %if.else
+ %data13.phi.trans.insert = getelementptr inbounds %struct.xdp_md, %struct.xdp_md* %xdp, i64 0, i32 0
+ %.pre = load i32, i32* %data13.phi.trans.insert, align 4
+ br label %if.end12
+; CHECK: r1 = *(u32 *)(r1 + 0)
+
+if.end12: ; preds = %if.else.if.end12_crit_edge, %if.then
+ %7 = phi i32 [ %.pre, %if.else.if.end12_crit_edge ], [ %1, %if.then ]
+ %conv14 = zext i32 %7 to i64
+; CHECK-NOT: r1 <<= 32
+; CHECK-NOT: r1 >>= 32
+ %8 = inttoptr i64 %conv14 to i8*
+ %9 = load volatile i8, i8* %8, align 1
+; CHECK: r1 = *(u8 *)(r1 + 0)
+ %cmp16 = icmp eq i8 %9, 0
+ %.28 = zext i1 %cmp16 to i32
+ br label %cleanup20
+
+cleanup20: ; preds = %if.then, %if.end12, %if.else
+ %retval.1 = phi i32 [ 3, %if.then ], [ 2, %if.else ], [ %.28, %if.end12 ]
+ ret i32 %retval.1
+}
+
+attributes #0 = { norecurse nounwind }
diff --git a/test/CodeGen/BPF/remove_truncate_2.ll b/test/CodeGen/BPF/remove_truncate_2.ll
new file mode 100644
index 000000000000..979d820dd857
--- /dev/null
+++ b/test/CodeGen/BPF/remove_truncate_2.ll
@@ -0,0 +1,65 @@
+; RUN: llc < %s -march=bpf -verify-machineinstrs | FileCheck %s
+
+; Source code:
+; struct xdp_md {
+; unsigned data;
+; unsigned data_end;
+; };
+;
+; int gbl;
+; int xdp_dummy(struct xdp_md *xdp)
+; {
+; char addr = *(char *)(long)xdp->data;
+; if (gbl) {
+; if (gbl == 1)
+; return 1;
+; if (addr == 1)
+; return 3;
+; } else if (addr == 0)
+; return 2;
+; return 0;
+; }
+
+%struct.xdp_md = type { i32, i32 }
+
+@gbl = common local_unnamed_addr global i32 0, align 4
+
+; Function Attrs: norecurse nounwind readonly
+define i32 @xdp_dummy(%struct.xdp_md* nocapture readonly %xdp) local_unnamed_addr #0 {
+entry:
+ %data = getelementptr inbounds %struct.xdp_md, %struct.xdp_md* %xdp, i64 0, i32 0
+ %0 = load i32, i32* %data, align 4
+ %conv = zext i32 %0 to i64
+ %1 = inttoptr i64 %conv to i8*
+ %2 = load i8, i8* %1, align 1
+; CHECK: r1 = *(u32 *)(r1 + 0)
+; CHECK: r1 = *(u8 *)(r1 + 0)
+ %3 = load i32, i32* @gbl, align 4
+ switch i32 %3, label %if.end [
+ i32 0, label %if.else
+ i32 1, label %cleanup
+ ]
+
+if.end: ; preds = %entry
+ %cmp4 = icmp eq i8 %2, 1
+; CHECK: r0 = 3
+; CHECK-NOT: r1 &= 255
+; CHECK: if r1 == 1 goto
+ br i1 %cmp4, label %cleanup, label %if.end13
+
+if.else: ; preds = %entry
+ %cmp9 = icmp eq i8 %2, 0
+; CHECK: r0 = 2
+; CHECK-NOT: r1 &= 255
+; CHECK: if r1 == 0 goto
+ br i1 %cmp9, label %cleanup, label %if.end13
+
+if.end13: ; preds = %if.else, %if.end
+ br label %cleanup
+
+cleanup: ; preds = %if.else, %if.end, %entry, %if.end13
+ %retval.0 = phi i32 [ 0, %if.end13 ], [ 1, %entry ], [ 3, %if.end ], [ 2, %if.else ]
+ ret i32 %retval.0
+}
+
+attributes #0 = { norecurse nounwind readonly }
diff --git a/test/CodeGen/Hexagon/addrmode-keepdeadphis.mir b/test/CodeGen/Hexagon/addrmode-keepdeadphis.mir
new file mode 100644
index 000000000000..b77a7b1bd365
--- /dev/null
+++ b/test/CodeGen/Hexagon/addrmode-keepdeadphis.mir
@@ -0,0 +1,30 @@
+# RUN: llc -march=hexagon -run-pass amode-opt %s -o - | FileCheck %s
+
+# Check that the addasl is not propagated into the addressing mode.
+# CHECK-NOT: L4_loadri_ur
+
+--- |
+ @g = global i32 zeroinitializer
+ define void @fred() { ret void }
+...
+
+---
+name: fred
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ liveins: %p0
+ %r0 = A2_tfrsi @g
+ %r1 = A2_tfrsi 1
+ %r2 = S2_addasl_rrri %r0, %r1, 1
+ J2_jumpt %p0, %bb.2, implicit-def %pc
+
+ bb.1:
+ liveins: %r0, %r2
+ %r1 = A2_tfrsi 2
+
+ bb.2:
+ liveins: %r0, %r2
+ %r3 = L2_loadri_io %r2, 0
+...
diff --git a/test/CodeGen/Hexagon/expand-condsets-undefvni.ll b/test/CodeGen/Hexagon/expand-condsets-undefvni.ll
new file mode 100644
index 000000000000..45ba5131e668
--- /dev/null
+++ b/test/CodeGen/Hexagon/expand-condsets-undefvni.ll
@@ -0,0 +1,49 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+;
+; Check that this compiles successfully.
+; CHECK: jumpr r31
+
+target triple = "hexagon"
+
+define i64 @fred(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b2:
+ %v3 = lshr i64 %a1, 52
+ %v4 = trunc i64 %v3 to i11
+ switch i11 %v4, label %b15 [
+ i11 -1, label %b5
+ i11 0, label %b14
+ ]
+
+b5: ; preds = %b2
+ br i1 undef, label %b13, label %b6
+
+b6: ; preds = %b5
+ %v7 = or i64 %a1, 2251799813685248
+ br i1 undef, label %b8, label %b10
+
+b8: ; preds = %b6
+ %v9 = select i1 undef, i64 %v7, i64 undef
+ br label %b16
+
+b10: ; preds = %b6
+ br i1 undef, label %b16, label %b11
+
+b11: ; preds = %b10
+ %v12 = select i1 undef, i64 undef, i64 %v7
+ br label %b16
+
+b13: ; preds = %b5
+ br label %b16
+
+b14: ; preds = %b2
+ br label %b16
+
+b15: ; preds = %b2
+ br label %b16
+
+b16: ; preds = %b15, %b14, %b13, %b11, %b10, %b8
+ %v17 = phi i64 [ undef, %b13 ], [ -2251799813685248, %b14 ], [ 0, %b15 ], [ %v12, %b11 ], [ %v9, %b8 ], [ %v7, %b10 ]
+ ret i64 %v17
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv62" }
diff --git a/test/CodeGen/Hexagon/expand-vselect-kill.ll b/test/CodeGen/Hexagon/expand-vselect-kill.ll
new file mode 100644
index 000000000000..1d07859665c0
--- /dev/null
+++ b/test/CodeGen/Hexagon/expand-vselect-kill.ll
@@ -0,0 +1,53 @@
+; RUN: llc -march=hexagon -verify-machineinstrs < %s | FileCheck %s
+;
+; Check that this does not crash.
+
+target triple = "hexagon"
+
+; CHECK-LABEL: danny:
+; CHECK-DAG: if ([[PREG:p[0-3]]]) [[VREG:v[0-9]+]]
+; CHECK-DAG: if (![[PREG]]) [[VREG]]
+define void @danny() local_unnamed_addr #0 {
+b0:
+ %v1 = icmp eq i32 0, undef
+ %v2 = select i1 %v1, <16 x i32> zeroinitializer, <16 x i32> undef
+ %v3 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v2, <16 x i32> zeroinitializer, i32 2)
+ %v4 = tail call <32 x i32> @llvm.hexagon.V6.vswap(<512 x i1> undef, <16 x i32> undef, <16 x i32> %v3)
+ %v5 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v4)
+ %v6 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> undef, <16 x i32> %v5, i32 62)
+ %v7 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v6)
+ store <16 x i32> %v7, <16 x i32>* undef, align 64
+ unreachable
+}
+
+declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #2
+declare <32 x i32> @llvm.hexagon.V6.vswap(<512 x i1>, <16 x i32>, <16 x i32>) #2
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #2
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #2
+declare <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32>, <16 x i32>, i32) #2
+
+; CHECK-LABEL: sammy:
+; CHECK-DAG: if ([[PREG:p[0-3]]]) [[VREG:v[0-9]+]]
+; CHECK-DAG: if (![[PREG]]) [[VREG]]
+define void @sammy() local_unnamed_addr #1 {
+b0:
+ %v1 = icmp eq i32 0, undef
+ %v2 = select i1 %v1, <32 x i32> zeroinitializer, <32 x i32> undef
+ %v3 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> %v2, <32 x i32> zeroinitializer, i32 2)
+ %v4 = tail call <64 x i32> @llvm.hexagon.V6.vswap.128B(<1024 x i1> undef, <32 x i32> undef, <32 x i32> %v3)
+ %v5 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v4)
+ %v6 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> undef, <32 x i32> %v5, i32 62)
+ %v7 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v6)
+ store <32 x i32> %v7, <32 x i32>* undef, align 128
+ unreachable
+}
+
+declare <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32>, <32 x i32>, i32) #2
+declare <64 x i32> @llvm.hexagon.V6.vswap.128B(<1024 x i1>, <32 x i32>, <32 x i32>) #2
+declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #2
+declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #2
+declare <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32>, <32 x i32>, i32) #2
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx" }
+attributes #1 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx-double" }
+attributes #2 = { nounwind readnone }
diff --git a/test/CodeGen/Hexagon/fpelim-basic.ll b/test/CodeGen/Hexagon/fpelim-basic.ll
new file mode 100644
index 000000000000..ffec07f7dbfe
--- /dev/null
+++ b/test/CodeGen/Hexagon/fpelim-basic.ll
@@ -0,0 +1,91 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+target triple = "hexagon"
+
+; FP elimination enabled.
+;
+; CHECK-LABEL: danny:
+; CHECK: r29 = add(r29,#-[[SIZE:[0-9]+]])
+; CHECK: r29 = add(r29,#[[SIZE]])
+define i32 @danny(i32 %a0, i32 %a1) local_unnamed_addr #0 {
+b2:
+ %v3 = alloca [32 x i32], align 8
+ %v4 = bitcast [32 x i32]* %v3 to i8*
+ call void @llvm.lifetime.start.p0i8(i64 128, i8* nonnull %v4) #3
+ br label %b5
+
+b5: ; preds = %b5, %b2
+ %v6 = phi i32 [ 0, %b2 ], [ %v8, %b5 ]
+ %v7 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %v6
+ store i32 %v6, i32* %v7, align 4
+ %v8 = add nuw nsw i32 %v6, 1
+ %v9 = icmp eq i32 %v8, 32
+ br i1 %v9, label %b10, label %b5
+
+b10: ; preds = %b5
+ %v11 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %a0
+ store i32 %a1, i32* %v11, align 4
+ br label %b12
+
+b12: ; preds = %b12, %b10
+ %v13 = phi i32 [ 0, %b10 ], [ %v18, %b12 ]
+ %v14 = phi i32 [ 0, %b10 ], [ %v17, %b12 ]
+ %v15 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %v13
+ %v16 = load i32, i32* %v15, align 4
+ %v17 = add nsw i32 %v16, %v14
+ %v18 = add nuw nsw i32 %v13, 1
+ %v19 = icmp eq i32 %v18, 32
+ br i1 %v19, label %b20, label %b12
+
+b20: ; preds = %b12
+ call void @llvm.lifetime.end.p0i8(i64 128, i8* nonnull %v4) #3
+ ret i32 %v17
+}
+
+; FP elimination disabled.
+;
+; CHECK-LABEL: sammy:
+; CHECK: allocframe
+; CHECK: dealloc_return
+define i32 @sammy(i32 %a0, i32 %a1) local_unnamed_addr #1 {
+b2:
+ %v3 = alloca [32 x i32], align 8
+ %v4 = bitcast [32 x i32]* %v3 to i8*
+ call void @llvm.lifetime.start.p0i8(i64 128, i8* nonnull %v4) #3
+ br label %b5
+
+b5: ; preds = %b5, %b2
+ %v6 = phi i32 [ 0, %b2 ], [ %v8, %b5 ]
+ %v7 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %v6
+ store i32 %v6, i32* %v7, align 4
+ %v8 = add nuw nsw i32 %v6, 1
+ %v9 = icmp eq i32 %v8, 32
+ br i1 %v9, label %b10, label %b5
+
+b10: ; preds = %b5
+ %v11 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %a0
+ store i32 %a1, i32* %v11, align 4
+ br label %b12
+
+b12: ; preds = %b12, %b10
+ %v13 = phi i32 [ 0, %b10 ], [ %v18, %b12 ]
+ %v14 = phi i32 [ 0, %b10 ], [ %v17, %b12 ]
+ %v15 = getelementptr inbounds [32 x i32], [32 x i32]* %v3, i32 0, i32 %v13
+ %v16 = load i32, i32* %v15, align 4
+ %v17 = add nsw i32 %v16, %v14
+ %v18 = add nuw nsw i32 %v13, 1
+ %v19 = icmp eq i32 %v18, 32
+ br i1 %v19, label %b20, label %b12
+
+b20: ; preds = %b12
+ call void @llvm.lifetime.end.p0i8(i64 128, i8* nonnull %v4) #3
+ ret i32 %v17
+}
+
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #2
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #2
+
+attributes #0 = { nounwind readnone "no-frame-pointer-elim"="false" "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind readnone "no-frame-pointer-elim"="true" "target-cpu"="hexagonv60" }
+attributes #2 = { argmemonly nounwind }
+attributes #3 = { nounwind }
diff --git a/test/CodeGen/Hexagon/frame.ll b/test/CodeGen/Hexagon/frame.ll
deleted file mode 100644
index e87acb8cd796..000000000000
--- a/test/CodeGen/Hexagon/frame.ll
+++ /dev/null
@@ -1,23 +0,0 @@
-; RUN: llc -march=hexagon -mcpu=hexagonv4 < %s | FileCheck %s
-
-@num = external global i32
-@acc = external global i32
-@num2 = external global i32
-
-; CHECK: allocframe
-; CHECK: dealloc_return
-
-define i32 @foo() nounwind {
-entry:
- %i = alloca i32, align 4
- %0 = load i32, i32* @num, align 4
- store i32 %0, i32* %i, align 4
- %1 = load i32, i32* %i, align 4
- %2 = load i32, i32* @acc, align 4
- %mul = mul nsw i32 %1, %2
- %3 = load i32, i32* @num2, align 4
- %add = add nsw i32 %mul, %3
- store i32 %add, i32* %i, align 4
- %4 = load i32, i32* %i, align 4
- ret i32 %4
-}
diff --git a/test/CodeGen/Hexagon/jt-in-text.ll b/test/CodeGen/Hexagon/jt-in-text.ll
new file mode 100644
index 000000000000..62b5caef6aaa
--- /dev/null
+++ b/test/CodeGen/Hexagon/jt-in-text.ll
@@ -0,0 +1,57 @@
+; RUN: llc -hexagon-emit-jt-text=true < %s | FileCheck %s
+target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
+target triple = "hexagon-unknown--elf"
+
+; CHECK: .text
+; CHECK-NOT: .rodata
+; CHECK: .word
+
+@lane0_pwr_st = global i32 0, align 4
+@lane1_pwr_st = global i32 0, align 4
+@lane2_pwr_st = global i32 0, align 4
+@lane3_pwr_st = global i32 0, align 4
+
+; Function Attrs: noinline nounwind
+define void @test2(i32 %lane_id, i32 %rx_pwr_st) #0 {
+entry:
+ %lane_id.addr = alloca i32, align 4
+ %rx_pwr_st.addr = alloca i32, align 4
+ store i32 %lane_id, i32* %lane_id.addr, align 4
+ store i32 %rx_pwr_st, i32* %rx_pwr_st.addr, align 4
+ %0 = load i32, i32* %lane_id.addr, align 4
+ switch i32 %0, label %sw.epilog [
+ i32 0, label %sw.bb
+ i32 1, label %sw.bb1
+ i32 2, label %sw.bb2
+ i32 3, label %sw.bb3
+ i32 15, label %sw.bb4
+ ]
+
+sw.bb: ; preds = %entry
+ store i32 1, i32* @lane0_pwr_st, align 4
+ br label %sw.epilog
+
+sw.bb1: ; preds = %entry
+ store i32 1, i32* @lane1_pwr_st, align 4
+ br label %sw.epilog
+
+sw.bb2: ; preds = %entry
+ store i32 1, i32* @lane2_pwr_st, align 4
+ br label %sw.epilog
+
+sw.bb3: ; preds = %entry
+ store i32 1, i32* @lane3_pwr_st, align 4
+ br label %sw.epilog
+
+sw.bb4: ; preds = %entry
+ store i32 1, i32* @lane0_pwr_st, align 4
+ store i32 1, i32* @lane1_pwr_st, align 4
+ store i32 1, i32* @lane2_pwr_st, align 4
+ store i32 1, i32* @lane3_pwr_st, align 4
+ br label %sw.epilog
+
+sw.epilog: ; preds = %entry, %sw.bb4, %sw.bb3, %sw.bb2, %sw.bb1, %sw.bb
+ ret void
+}
+
+attributes #0 = { noinline nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv60" "target-features"="-hvx-double,-long-calls" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/test/CodeGen/Hexagon/newvaluejump-kill2.mir b/test/CodeGen/Hexagon/newvaluejump-kill2.mir
new file mode 100644
index 000000000000..565d07dc87ee
--- /dev/null
+++ b/test/CodeGen/Hexagon/newvaluejump-kill2.mir
@@ -0,0 +1,18 @@
+# RUN: llc -march=hexagon -run-pass hexagon-nvj -verify-machineinstrs %s -o - | FileCheck %s
+# CHECK: J4_cmpgtu_t_jumpnv_t killed %r3, killed %r1, %bb.1, implicit-def %pc
+
+---
+name: fred
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ liveins: %r0
+ %r1 = A2_addi %r0, -1
+ %r2 = A2_tfrsi -1431655765
+ %r3 = A2_tfrsi 2
+ %p0 = C2_cmpgtu killed %r3, %r1
+ %r2 = S4_subaddi killed %r1, 1, killed %r2
+ J2_jumpt killed %p0, %bb.1, implicit-def %pc
+ bb.1:
+...
diff --git a/test/CodeGen/Hexagon/newvaluejump2.ll b/test/CodeGen/Hexagon/newvaluejump2.ll
index 4c897f0830f3..fbc3f2925d19 100644
--- a/test/CodeGen/Hexagon/newvaluejump2.ll
+++ b/test/CodeGen/Hexagon/newvaluejump2.ll
@@ -6,7 +6,7 @@
@Reg = common global i32 0, align 4
define i32 @main() nounwind {
entry:
-; CHECK: if (cmp.gt(r{{[0-9]+}},r{{[0-9]+}}.new)) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}}
+; CHECK: if (cmp.gt(r{{[0-9]+}}.new,r{{[0-9]+}})) jump:{{[t|nt]}} .LBB{{[0-9]+}}_{{[0-9]+}}
%Reg2 = alloca i32, align 4
%0 = load i32, i32* %Reg2, align 4
%1 = load i32, i32* @Reg, align 4
diff --git a/test/CodeGen/Hexagon/regalloc-liveout-undef.mir b/test/CodeGen/Hexagon/regalloc-liveout-undef.mir
new file mode 100644
index 000000000000..6a41514b060e
--- /dev/null
+++ b/test/CodeGen/Hexagon/regalloc-liveout-undef.mir
@@ -0,0 +1,35 @@
+# RUN: llc -march=hexagon -run-pass liveintervals -run-pass machineverifier -run-pass simple-register-coalescing %s -o - | FileCheck %s
+#
+# If there is no consumer of the live intervals, the live intervals pass
+# will be freed immediately after it runs, before the verifier. Add a
+# user (register coalescer in this case), so that the verification will
+# cover live intervals as well.
+#
+# Make sure that this compiles successfully.
+# CHECK: undef %1.isub_lo = A2_addi %1.isub_lo, 1
+
+---
+name: fred
+tracksRegLiveness: true
+
+registers:
+ - { id: 0, class: intregs }
+ - { id: 1, class: doubleregs }
+ - { id: 2, class: predregs }
+ - { id: 3, class: doubleregs }
+body: |
+ bb.0:
+ liveins: %d0
+ successors: %bb.1
+ %0 = IMPLICIT_DEF
+ %1 = COPY %d0
+
+ bb.1:
+ successors: %bb.1
+ %2 = C2_cmpgt %0, %1.isub_lo
+ %3 = COPY %1
+ %1 = COPY %3
+ undef %1.isub_lo = A2_addi %1.isub_lo, 1
+ J2_jump %bb.1, implicit-def %pc
+...
+
diff --git a/test/CodeGen/MIR/Generic/multiRunPass.mir b/test/CodeGen/MIR/Generic/multiRunPass.mir
index bd1c0d0b458e..e055c44205b5 100644
--- a/test/CodeGen/MIR/Generic/multiRunPass.mir
+++ b/test/CodeGen/MIR/Generic/multiRunPass.mir
@@ -7,7 +7,8 @@
# This test ensures that the command line accepts
# several run passes on the same command line and
# actually create the proper pipeline for it.
-# PSEUDO_PEEPHOLE: -expand-isel-pseudos {{(-machineverifier )?}}-peephole-opt
+# PSEUDO_PEEPHOLE: -expand-isel-pseudos
+# PSEUDO_PEEPHOLE-SAME: {{(-machineverifier )?}}-peephole-opt
# PEEPHOLE_PSEUDO: -peephole-opt {{(-machineverifier )?}}-expand-isel-pseudos
# Make sure there are no other passes happening after what we asked.
diff --git a/test/CodeGen/Mips/2008-06-05-Carry.ll b/test/CodeGen/Mips/2008-06-05-Carry.ll
index 5e6092fc7848..c61e1cdedea7 100644
--- a/test/CodeGen/Mips/2008-06-05-Carry.ll
+++ b/test/CodeGen/Mips/2008-06-05-Carry.ll
@@ -2,21 +2,20 @@
define i64 @add64(i64 %u, i64 %v) nounwind {
entry:
-; CHECK-LABEL: add64:
; CHECK: addu
-; CHECK-DAG: sltu
-; CHECK-DAG: addu
+; CHECK: sltu
; CHECK: addu
- %tmp2 = add i64 %u, %v
+; CHECK: addu
+ %tmp2 = add i64 %u, %v
ret i64 %tmp2
}
define i64 @sub64(i64 %u, i64 %v) nounwind {
entry:
-; CHECK-LABEL: sub64
-; CHECK-DAG: sltu
-; CHECK-DAG: subu
+; CHECK: sub64
; CHECK: subu
+; CHECK: sltu
+; CHECK: addu
; CHECK: subu
%tmp2 = sub i64 %u, %v
ret i64 %tmp2
diff --git a/test/CodeGen/Mips/dsp-patterns.ll b/test/CodeGen/Mips/dsp-patterns.ll
index 250d3eff37dc..837c0d8bfc52 100644
--- a/test/CodeGen/Mips/dsp-patterns.ll
+++ b/test/CodeGen/Mips/dsp-patterns.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=mips -mcpu=mips32r2 -mattr=dsp < %s | FileCheck %s -check-prefix=R1
-; RUN: llc -march=mips -mcpu=mips32r2 -mattr=dspr2 < %s | FileCheck %s -check-prefix=R2
+; RUN: llc -march=mips -mattr=dsp < %s | FileCheck %s -check-prefix=R1
+; RUN: llc -march=mips -mattr=dspr2 < %s | FileCheck %s -check-prefix=R2
; R1-LABEL: test_lbux:
; R1: lbux ${{[0-9]+}}
diff --git a/test/CodeGen/Mips/llcarry.ll b/test/CodeGen/Mips/llcarry.ll
index b7cc6fc8ea75..fcf129420234 100644
--- a/test/CodeGen/Mips/llcarry.ll
+++ b/test/CodeGen/Mips/llcarry.ll
@@ -14,9 +14,9 @@ entry:
%add = add nsw i64 %1, %0
store i64 %add, i64* @k, align 8
; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
-; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
-; 16: move ${{[0-9]+}}, $24
+; 16: move ${{[0-9]+}}, $t8
+; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
ret void
}
@@ -28,8 +28,8 @@ entry:
%sub = sub nsw i64 %0, %1
; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
-; 16: move ${{[0-9]+}}, $24
-; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
+; 16: move ${{[0-9]+}}, $t8
+; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: subu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
store i64 %sub, i64* @l, align 8
ret void
@@ -41,7 +41,8 @@ entry:
%add = add nsw i64 %0, 15
; 16: addiu ${{[0-9]+}}, 15
; 16: sltu ${{[0-9]+}}, ${{[0-9]+}}
-; 16: move ${{[0-9]+}}, $24
+; 16: move ${{[0-9]+}}, $t8
+; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
; 16: addu ${{[0-9]+}}, ${{[0-9]+}}, ${{[0-9]+}}
store i64 %add, i64* @m, align 8
ret void
diff --git a/test/CodeGen/Mips/llvm-ir/add.ll b/test/CodeGen/Mips/llvm-ir/add.ll
index 63884eb03b8c..a5ecdda94ce2 100644
--- a/test/CodeGen/Mips/llvm-ir/add.ll
+++ b/test/CodeGen/Mips/llvm-ir/add.ll
@@ -1,35 +1,35 @@
; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP32,PRE4
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP32
; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP32,GP32-CMOV
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP32
; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP32,GP32-CMOV
+; RUN: -check-prefixes=ALL,R2-R6,GP32
; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP32,GP32-CMOV
+; RUN: -check-prefixes=ALL,R2-R6,GP32
; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP32,GP32-CMOV
+; RUN: -check-prefixes=ALL,R2-R6,GP32
; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
; RUN: -check-prefixes=ALL,R2-R6,GP32
; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64,GP64-NOT-R2-R6
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64
; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64,GP64-NOT-R2-R6
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64
; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
-; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64,GP64-NOT-R2-R6
+; RUN: -check-prefixes=ALL,NOT-R2-R6,GP64
; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
+; RUN: -check-prefixes=ALL,R2-R6,GP64
; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
+; RUN: -check-prefixes=ALL,R2-R6,GP64
; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
+; RUN: -check-prefixes=ALL,R2-R6,GP64
; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
-; RUN: -check-prefixes=ALL,R2-R6,GP64,GP64-R2-R6
+; RUN: -check-prefixes=ALL,R2-R6,GP64
; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -O2 -verify-machineinstrs | FileCheck %s \
-; RUN: -check-prefixes=ALL,MMR3,MM32
+; RUN: -check-prefixes=ALL,MMR6,MM32
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips -O2 | FileCheck %s \
; RUN: -check-prefixes=ALL,MMR6,MM32
; RUN: llc < %s -march=mips -mcpu=mips64r6 -target-abi n64 -mattr=+micromips -O2 | FileCheck %s \
-; RUN: -check-prefixes=ALL,MM64
+; RUN: -check-prefixes=ALL,MMR6,MM64
; FIXME: This code sequence is inefficient as it should be 'subu $[[T0]], $zero, $[[T0]'.
@@ -110,17 +110,17 @@ define signext i64 @add_i64(i64 signext %a, i64 signext %b) {
entry:
; ALL-LABEL: add_i64:
- ; GP32-DAG: addu $[[T0:[0-9]+]], $4, $6
- ; GP32-DAG: addu $3, $5, $7
- ; GP32: sltu $[[T1:[0-9]+]], $3, $5
- ; GP32: addu $2, $[[T0]], $[[T1]]
+ ; GP32: addu $3, $5, $7
+ ; GP32: sltu $[[T0:[0-9]+]], $3, $7
+ ; GP32: addu $[[T1:[0-9]+]], $[[T0]], $6
+ ; GP32: addu $2, $4, $[[T1]]
; GP64: daddu $2, $4, $5
- ; MM32-DAG: addu16 $3, $5, $7
- ; MM32-DAG: addu16 $[[T0:[0-9]+]], $4, $6
- ; MM32: sltu $[[T1:[0-9]+]], $3, $5
- ; MM32: addu16 $2, $[[T0]], $[[T1]]
+ ; MM32: addu16 $3, $5, $7
+ ; MM32: sltu $[[T0:[0-9]+]], $3, $7
+ ; MM32: addu $[[T1:[0-9]+]], $[[T0]], $6
+ ; MM32: addu $2, $4, $[[T1]]
; MM64: daddu $2, $4, $5
@@ -132,108 +132,49 @@ define signext i128 @add_i128(i128 signext %a, i128 signext %b) {
entry:
; ALL-LABEL: add_i128:
- ; PRE4: move $[[R1:[0-9]+]], $5
- ; PRE4: move $[[R2:[0-9]+]], $4
- ; PRE4: lw $[[R3:[0-9]+]], 24($sp)
- ; PRE4: addu $[[R4:[0-9]+]], $6, $[[R3]]
- ; PRE4: lw $[[R5:[0-9]+]], 28($sp)
- ; PRE4: addu $[[R6:[0-9]+]], $7, $[[R5]]
- ; PRE4: sltu $[[R7:[0-9]+]], $[[R6]], $7
- ; PRE4: addu $[[R8:[0-9]+]], $[[R4]], $[[R7]]
- ; PRE4: xor $[[R9:[0-9]+]], $[[R8]], $6
- ; PRE4: sltiu $[[R10:[0-9]+]], $[[R9]], 1
- ; PRE4: bnez $[[R10]], $BB5_2
- ; PRE4: sltu $[[R7]], $[[R8]], $6
- ; PRE4: lw $[[R12:[0-9]+]], 20($sp)
- ; PRE4: addu $[[R13:[0-9]+]], $[[R1]], $[[R12]]
- ; PRE4: lw $[[R14:[0-9]+]], 16($sp)
- ; PRE4: addu $[[R15:[0-9]+]], $[[R13]], $[[R7]]
- ; PRE4: addu $[[R16:[0-9]+]], $[[R2]], $[[R14]]
- ; PRE4: sltu $[[R17:[0-9]+]], $[[R15]], $[[R13]]
- ; PRE4: sltu $[[R18:[0-9]+]], $[[R13]], $[[R1]]
- ; PRE4: addu $[[R19:[0-9]+]], $[[R16]], $[[R18]]
- ; PRE4: addu $2, $[[R19]], $[[R17]]
+ ; GP32: lw $[[T0:[0-9]+]], 28($sp)
+ ; GP32: addu $[[T1:[0-9]+]], $7, $[[T0]]
+ ; GP32: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+ ; GP32: lw $[[T3:[0-9]+]], 24($sp)
+ ; GP32: addu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; GP32: addu $[[T5:[0-9]+]], $6, $[[T4]]
+ ; GP32: sltu $[[T6:[0-9]+]], $[[T5]], $[[T3]]
+ ; GP32: lw $[[T7:[0-9]+]], 20($sp)
+ ; GP32: addu $[[T8:[0-9]+]], $[[T6]], $[[T7]]
+ ; GP32: lw $[[T9:[0-9]+]], 16($sp)
+ ; GP32: addu $3, $5, $[[T8]]
+ ; GP32: sltu $[[T10:[0-9]+]], $3, $[[T7]]
+ ; GP32: addu $[[T11:[0-9]+]], $[[T10]], $[[T9]]
+ ; GP32: addu $2, $4, $[[T11]]
+ ; GP32: move $4, $[[T5]]
+ ; GP32: move $5, $[[T1]]
- ; GP32-CMOV: lw $[[T0:[0-9]+]], 24($sp)
- ; GP32-CMOV: addu $[[T1:[0-9]+]], $6, $[[T0]]
- ; GP32-CMOV: lw $[[T2:[0-9]+]], 28($sp)
- ; GP32-CMOV: addu $[[T3:[0-9]+]], $7, $[[T2]]
- ; GP32-CMOV: sltu $[[T4:[0-9]+]], $[[T3]], $7
- ; GP32-CMOV: addu $[[T5:[0-9]+]], $[[T1]], $[[T4]]
- ; GP32-CMOV: sltu $[[T6:[0-9]+]], $[[T5]], $6
- ; GP32-CMOV: xor $[[T7:[0-9]+]], $[[T5]], $6
- ; GP32-CMOV: movz $[[T8:[0-9]+]], $[[T4]], $[[T7]]
- ; GP32-CMOV: lw $[[T9:[0-9]+]], 20($sp)
- ; GP32-CMOV: addu $[[T10:[0-9]+]], $5, $[[T4]]
- ; GP32-CMOV: addu $[[T11:[0-9]+]], $[[T10]], $[[T8]]
- ; GP32-CMOV: lw $[[T12:[0-9]+]], 16($sp)
- ; GP32-CMOV: sltu $[[T13:[0-9]+]], $[[T11]], $[[T10]]
- ; GP32-CMOV: addu $[[T14:[0-9]+]], $4, $[[T12]]
- ; GP32-CMOV: sltu $[[T15:[0-9]+]], $[[T10]], $5
- ; GP32-CMOV: addu $[[T16:[0-9]+]], $[[T14]], $[[T15]]
- ; GP32-CMOV: addu $[[T17:[0-9]+]], $[[T16]], $[[T13]]
- ; GP32-CMOV: move $4, $[[T5]]
- ; GP32-CMOV: move $5, $[[T3]]
+ ; GP64: daddu $3, $5, $7
+ ; GP64: sltu $[[T0:[0-9]+]], $3, $7
+ ; GP64: daddu $[[T1:[0-9]+]], $[[T0]], $6
+ ; GP64: daddu $2, $4, $[[T1]]
- ; GP64: daddu $[[T0:[0-9]+]], $4, $6
- ; GP64: daddu $[[T1:[0-9]+]], $5, $7
- ; GP64: sltu $[[T2:[0-9]+]], $[[T1]], $5
- ; GP64-NOT-R2-R6: dsll $[[T3:[0-9]+]], $[[T2]], 32
- ; GP64-NOT-R2-R6: dsrl $[[T4:[0-9]+]], $[[T3]], 32
- ; GP64-R2-R6: dext $[[T4:[0-9]+]], $[[T2]], 0, 32
+ ; MM32: lw $[[T0:[0-9]+]], 28($sp)
+ ; MM32: addu $[[T1:[0-9]+]], $7, $[[T0]]
+ ; MM32: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+ ; MM32: lw $[[T3:[0-9]+]], 24($sp)
+ ; MM32: addu16 $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; MM32: addu16 $[[T5:[0-9]+]], $6, $[[T4]]
+ ; MM32: sltu $[[T6:[0-9]+]], $[[T5]], $[[T3]]
+ ; MM32: lw $[[T7:[0-9]+]], 20($sp)
+ ; MM32: addu16 $[[T8:[0-9]+]], $[[T6]], $[[T7]]
+ ; MM32: lw $[[T9:[0-9]+]], 16($sp)
+ ; MM32: addu16 $[[T10:[0-9]+]], $5, $[[T8]]
+ ; MM32: sltu $[[T11:[0-9]+]], $[[T10]], $[[T7]]
+ ; MM32: addu $[[T12:[0-9]+]], $[[T11]], $[[T9]]
+ ; MM32: addu16 $[[T13:[0-9]+]], $4, $[[T12]]
+ ; MM32: move $4, $[[T5]]
+ ; MM32: move $5, $[[T1]]
- ; GP64: daddu $2, $[[T0]], $[[T4]]
-
- ; MMR3: move $[[T1:[0-9]+]], $5
- ; MMR3-DAG: lw $[[T2:[0-9]+]], 32($sp)
- ; MMR3: addu16 $[[T3:[0-9]+]], $6, $[[T2]]
- ; MMR3-DAG: lw $[[T4:[0-9]+]], 36($sp)
- ; MMR3: addu16 $[[T5:[0-9]+]], $7, $[[T4]]
- ; MMR3: sltu $[[T6:[0-9]+]], $[[T5]], $7
- ; MMR3: addu16 $[[T7:[0-9]+]], $[[T3]], $[[T6]]
- ; MMR3: sltu $[[T8:[0-9]+]], $[[T7]], $6
- ; MMR3: xor $[[T9:[0-9]+]], $[[T7]], $6
- ; MMR3: movz $[[T8]], $[[T6]], $[[T9]]
- ; MMR3: lw $[[T10:[0-9]+]], 28($sp)
- ; MMR3: addu16 $[[T11:[0-9]+]], $[[T1]], $[[T10]]
- ; MMR3: addu16 $[[T12:[0-9]+]], $[[T11]], $[[T8]]
- ; MMR3: lw $[[T13:[0-9]+]], 24($sp)
- ; MMR3: sltu $[[T14:[0-9]+]], $[[T12]], $[[T11]]
- ; MMR3: addu16 $[[T15:[0-9]+]], $4, $[[T13]]
- ; MMR3: sltu $[[T16:[0-9]+]], $[[T11]], $[[T1]]
- ; MMR3: addu16 $[[T17:[0-9]+]], $[[T15]], $[[T16]]
- ; MMR3: addu16 $2, $2, $[[T14]]
-
- ; MMR6: move $[[T1:[0-9]+]], $5
- ; MMR6: move $[[T2:[0-9]+]], $4
- ; MMR6: lw $[[T3:[0-9]+]], 32($sp)
- ; MMR6: addu16 $[[T4:[0-9]+]], $6, $[[T3]]
- ; MMR6: lw $[[T5:[0-9]+]], 36($sp)
- ; MMR6: addu16 $[[T6:[0-9]+]], $7, $[[T5]]
- ; MMR6: sltu $[[T7:[0-9]+]], $[[T6]], $7
- ; MMR6: addu16 $[[T8:[0-9]+]], $[[T4]], $7
- ; MMR6: sltu $[[T9:[0-9]+]], $[[T8]], $6
- ; MMR6: xor $[[T10:[0-9]+]], $[[T4]], $6
- ; MMR6: sltiu $[[T11:[0-9]+]], $[[T10]], 1
- ; MMR6: seleqz $[[T12:[0-9]+]], $[[T9]], $[[T11]]
- ; MMR6: selnez $[[T13:[0-9]+]], $[[T7]], $[[T11]]
- ; MMR6: lw $[[T14:[0-9]+]], 24($sp)
- ; MMR6: or $[[T15:[0-9]+]], $[[T13]], $[[T12]]
- ; MMR6: addu16 $[[T16:[0-9]+]], $[[T2]], $[[T14]]
- ; MMR6: lw $[[T17:[0-9]+]], 28($sp)
- ; MMR6: addu16 $[[T18:[0-9]+]], $[[T1]], $[[T17]]
- ; MMR6: addu16 $[[T19:[0-9]+]], $[[T18]], $[[T15]]
- ; MMR6: sltu $[[T20:[0-9]+]], $[[T18]], $[[T1]]
- ; MMR6: sltu $[[T21:[0-9]+]], $[[T17]], $[[T18]]
- ; MMR6: addu16 $2, $[[T16]], $[[T20]]
- ; MMR6: addu16 $2, $[[T20]], $[[T21]]
-
- ; MM64: daddu $[[T0:[0-9]+]], $4, $6
; MM64: daddu $3, $5, $7
- ; MM64: sltu $[[T1:[0-9]+]], $3, $5
- ; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
- ; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
- ; MM64: daddu $2, $[[T0]], $[[T3]]
+ ; MM64: sltu $[[T0:[0-9]+]], $3, $7
+ ; MM64: daddu $[[T1:[0-9]+]], $[[T0]], $6
+ ; MM64: daddu $2, $4, $[[T1]]
%r = add i128 %a, %b
ret i128 %r
@@ -308,16 +249,17 @@ define signext i32 @add_i32_4(i32 signext %a) {
define signext i64 @add_i64_4(i64 signext %a) {
; ALL-LABEL: add_i64_4:
- ; GP32: addiu $3, $5, 4
- ; GP32: sltu $[[T0:[0-9]+]], $3, $5
- ; GP32: addu $2, $4, $[[T0]]
-
- ; MM32: addiur2 $[[T1:[0-9]+]], $5, 4
- ; MM32: sltu $[[T2:[0-9]+]], $[[T1]], $5
- ; MM32: addu16 $2, $4, $[[T2]]
+ ; GP32: addiu $[[T0:[0-9]+]], $5, 4
+ ; GP32: addiu $[[T1:[0-9]+]], $zero, 4
+ ; GP32: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; GP32: addu $2, $4, $[[T1]]
; GP64: daddiu $2, $4, 4
+ ; MM32: addiu $[[T0:[0-9]+]], $5, 4
+ ; MM32: li16 $[[T1:[0-9]+]], 4
+ ; MM32: sltu $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+ ; MM32: addu $2, $4, $[[T2]]
; MM64: daddiu $2, $4, 4
@@ -328,67 +270,38 @@ define signext i64 @add_i64_4(i64 signext %a) {
define signext i128 @add_i128_4(i128 signext %a) {
; ALL-LABEL: add_i128_4:
- ; PRE4: move $[[T0:[0-9]+]], $5
- ; PRE4: addiu $[[T1:[0-9]+]], $7, 4
- ; PRE4: sltu $[[T2:[0-9]+]], $[[T1]], $7
- ; PRE4: xori $[[T3:[0-9]+]], $[[T2]], 1
- ; PRE4: bnez $[[T3]], $BB[[BB0:[0-9_]+]]
- ; PRE4: addu $[[T4:[0-9]+]], $6, $[[T2]]
- ; PRE4: sltu $[[T5:[0-9]+]], $[[T4]], $6
- ; PRE4; $BB[[BB0:[0-9]+]]:
- ; PRE4: addu $[[T6:[0-9]+]], $[[T0]], $[[T5]]
- ; PRE4: sltu $[[T7:[0-9]+]], $[[T6]], $[[T0]]
- ; PRE4: addu $[[T8:[0-9]+]], $4, $[[T7]]
- ; PRE4: move $4, $[[T4]]
-
- ; GP32-CMOV: addiu $[[T0:[0-9]+]], $7, 4
- ; GP32-CMOV: sltu $[[T1:[0-9]+]], $[[T0]], $7
- ; GP32-CMOV: addu $[[T2:[0-9]+]], $6, $[[T1]]
- ; GP32-CMOV: sltu $[[T3:[0-9]+]], $[[T2]], $6
- ; GP32-CMOV: movz $[[T3]], $[[T1]], $[[T1]]
- ; GP32-CMOV: addu $[[T4:[0-9]+]], $5, $[[T3]]
- ; GP32-CMOV: sltu $[[T5:[0-9]+]], $[[T4]], $5
- ; GP32-CMOV: addu $[[T7:[0-9]+]], $4, $[[T5]]
- ; GP32-CMOV: move $4, $[[T2]]
- ; GP32-CMOV: move $5, $[[T0]]
-
- ; GP64: daddiu $[[T0:[0-9]+]], $5, 4
- ; GP64: sltu $[[T1:[0-9]+]], $[[T0]], $5
- ; GP64-NOT-R2-R6: dsll $[[T2:[0-9]+]], $[[T1]], 32
- ; GP64-NOT-R2-R6: dsrl $[[T3:[0-9]+]], $[[T2]], 32
- ; GP64-R2-R6: dext $[[T3:[0-9]+]], $[[T1]], 0, 32
-
- ; GP64: daddu $2, $4, $[[T3]]
+ ; GP32: addiu $[[T0:[0-9]+]], $7, 4
+ ; GP32: addiu $[[T1:[0-9]+]], $zero, 4
+ ; GP32: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; GP32: addu $[[T2:[0-9]+]], $6, $[[T1]]
+ ; GP32: sltu $[[T1]], $[[T2]], $zero
+ ; GP32: addu $[[T3:[0-9]+]], $5, $[[T1]]
+ ; GP32: sltu $[[T1]], $[[T3]], $zero
+ ; GP32: addu $[[T1]], $4, $[[T1]]
+ ; GP32: move $4, $[[T2]]
+ ; GP32: move $5, $[[T0]]
- ; MMR3: addiur2 $[[T0:[0-9]+]], $7, 4
- ; MMR3: sltu $[[T1:[0-9]+]], $[[T0]], $7
- ; MMR3: sltu $[[T2:[0-9]+]], $[[T0]], $7
- ; MMR3: addu16 $[[T3:[0-9]+]], $6, $[[T2]]
- ; MMR3: sltu $[[T4:[0-9]+]], $[[T3]], $6
- ; MMR3: movz $[[T4]], $[[T2]], $[[T1]]
- ; MMR3: addu16 $[[T6:[0-9]+]], $5, $[[T4]]
- ; MMR3: sltu $[[T7:[0-9]+]], $[[T6]], $5
- ; MMR3: addu16 $2, $4, $[[T7]]
+ ; GP64: daddiu $[[T0:[0-9]+]], $5, 4
+ ; GP64: daddiu $[[T1:[0-9]+]], $zero, 4
+ ; GP64: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; GP64: daddu $2, $4, $[[T1]]
- ; MMR6: addiur2 $[[T1:[0-9]+]], $7, 4
- ; MMR6: sltu $[[T2:[0-9]+]], $[[T1]], $7
- ; MMR6: xori $[[T3:[0-9]+]], $[[T2]], 1
- ; MMR6: selnez $[[T4:[0-9]+]], $[[T2]], $[[T3]]
- ; MMR6: addu16 $[[T5:[0-9]+]], $6, $[[T2]]
- ; MMR6: sltu $[[T6:[0-9]+]], $[[T5]], $6
- ; MMR6: seleqz $[[T7:[0-9]+]], $[[T6]], $[[T3]]
- ; MMR6: or $[[T8:[0-9]+]], $[[T4]], $[[T7]]
- ; MMR6: addu16 $[[T9:[0-9]+]], $5, $[[T8]]
- ; MMR6: sltu $[[T10:[0-9]+]], $[[T9]], $5
- ; MMR6: addu16 $[[T11:[0-9]+]], $4, $[[T10]]
- ; MMR6: move $4, $7
- ; MMR6: move $5, $[[T1]]
+ ; MM32: addiu $[[T0:[0-9]+]], $7, 4
+ ; MM32: li16 $[[T1:[0-9]+]], 4
+ ; MM32: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; MM32: addu16 $[[T2:[0-9]+]], $6, $[[T1]]
+ ; MM32: li16 $[[T1]], 0
+ ; MM32: sltu $[[T3:[0-9]+]], $[[T2]], $[[T1]]
+ ; MM32: addu16 $[[T3]], $5, $[[T3]]
+ ; MM32: sltu $[[T1]], $[[T3]], $[[T1]]
+ ; MM32: addu16 $[[T1]], $4, $[[T1]]
+ ; MM32: move $4, $[[T2]]
+ ; MM32: move $5, $[[T0]]
; MM64: daddiu $[[T0:[0-9]+]], $5, 4
- ; MM64: sltu $[[T1:[0-9]+]], $[[T0]], $5
- ; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
- ; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
- ; MM64: daddu $2, $4, $[[T3]]
+ ; MM64: daddiu $[[T1:[0-9]+]], $zero, 4
+ ; MM64: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; MM64: daddu $2, $4, $[[T1]]
%r = add i128 4, %a
ret i128 %r
@@ -467,15 +380,16 @@ define signext i64 @add_i64_3(i64 signext %a) {
; ALL-LABEL: add_i64_3:
; GP32: addiu $[[T0:[0-9]+]], $5, 3
- ; GP32: sltu $[[T1:[0-9]+]], $[[T0]], $5
+ ; GP32: addiu $[[T1:[0-9]+]], $zero, 3
+ ; GP32: sltu $[[T1]], $[[T0]], $[[T1]]
; GP32: addu $2, $4, $[[T1]]
; GP64: daddiu $2, $4, 3
- ; MM32: move $[[T1:[0-9]+]], $5
- ; MM32: addius5 $[[T1]], 3
- ; MM32: sltu $[[T2:[0-9]+]], $[[T1]], $5
- ; MM32: addu16 $2, $4, $[[T2]]
+ ; MM32: addiu $[[T0:[0-9]+]], $5, 3
+ ; MM32: li16 $[[T1:[0-9]+]], 3
+ ; MM32: sltu $[[T2:[0-9]+]], $[[T0]], $[[T1]]
+ ; MM32: addu $2, $4, $[[T2]]
; MM64: daddiu $2, $4, 3
@@ -486,70 +400,38 @@ define signext i64 @add_i64_3(i64 signext %a) {
define signext i128 @add_i128_3(i128 signext %a) {
; ALL-LABEL: add_i128_3:
- ; PRE4: move $[[T0:[0-9]+]], $5
- ; PRE4: addiu $[[T1:[0-9]+]], $7, 3
- ; PRE4: sltu $[[T2:[0-9]+]], $[[T1]], $7
- ; PRE4: xori $[[T3:[0-9]+]], $[[T2]], 1
- ; PRE4: bnez $[[T3]], $BB[[BB0:[0-9_]+]]
- ; PRE4: addu $[[T4:[0-9]+]], $6, $[[T2]]
- ; PRE4: sltu $[[T5:[0-9]+]], $[[T4]], $6
- ; PRE4; $BB[[BB0:[0-9]+]]:
- ; PRE4: addu $[[T6:[0-9]+]], $[[T0]], $[[T5]]
- ; PRE4: sltu $[[T7:[0-9]+]], $[[T6]], $[[T0]]
- ; PRE4: addu $[[T8:[0-9]+]], $4, $[[T7]]
- ; PRE4: move $4, $[[T4]]
-
- ; GP32-CMOV: addiu $[[T0:[0-9]+]], $7, 3
- ; GP32-CMOV: sltu $[[T1:[0-9]+]], $[[T0]], $7
- ; GP32-CMOV: addu $[[T2:[0-9]+]], $6, $[[T1]]
- ; GP32-CMOV: sltu $[[T3:[0-9]+]], $[[T2]], $6
- ; GP32-CMOV: movz $[[T3]], $[[T1]], $[[T1]]
- ; GP32-CMOV: addu $[[T4:[0-9]+]], $5, $[[T3]]
- ; GP32-CMOV: sltu $[[T5:[0-9]+]], $[[T4]], $5
- ; GP32-CMOV: addu $[[T7:[0-9]+]], $4, $[[T5]]
- ; GP32-CMOV: move $4, $[[T2]]
- ; GP32-CMOV: move $5, $[[T0]]
-
- ; GP64: daddiu $[[T0:[0-9]+]], $5, 3
- ; GP64: sltu $[[T1:[0-9]+]], $[[T0]], $5
-
- ; GP64-NOT-R2-R6: dsll $[[T2:[0-9]+]], $[[T1]], 32
- ; GP64-NOT-R2-R6: dsrl $[[T3:[0-9]+]], $[[T2]], 32
- ; GP64-R2-R6: dext $[[T3:[0-9]+]], $[[T1]], 0, 32
-
- ; GP64: daddu $2, $4, $[[T3]]
+ ; GP32: addiu $[[T0:[0-9]+]], $7, 3
+ ; GP32: addiu $[[T1:[0-9]+]], $zero, 3
+ ; GP32: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; GP32: addu $[[T2:[0-9]+]], $6, $[[T1]]
+ ; GP32: sltu $[[T3:[0-9]+]], $[[T2]], $zero
+ ; GP32: addu $[[T4:[0-9]+]], $5, $[[T3]]
+ ; GP32: sltu $[[T5:[0-9]+]], $[[T4]], $zero
+ ; GP32: addu $[[T5]], $4, $[[T5]]
+ ; GP32: move $4, $[[T2]]
+ ; GP32: move $5, $[[T0]]
- ; MMR3: move $[[T1:[0-9]+]], $7
- ; MMR3: addius5 $[[T1]], 3
- ; MMR3: sltu $[[T2:[0-9]+]], $[[T1]], $7
- ; MMR3: sltu $[[T3:[0-9]+]], $[[T1]], $7
- ; MMR3: addu16 $[[T4:[0-9]+]], $6, $[[T3]]
- ; MMR3: sltu $[[T5:[0-9]+]], $[[T4]], $6
- ; MMR3: movz $[[T5]], $[[T3]], $[[T2]]
- ; MMR3: addu16 $[[T6:[0-9]+]], $5, $[[T5]]
- ; MMR3: sltu $[[T7:[0-9]+]], $[[T6]], $5
- ; MMR3: addu16 $2, $4, $[[T7]]
+ ; GP64: daddiu $[[T0:[0-9]+]], $5, 3
+ ; GP64: daddiu $[[T1:[0-9]+]], $zero, 3
+ ; GP64: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; GP64: daddu $2, $4, $[[T1]]
- ; MMR6: move $[[T1:[0-9]+]], $7
- ; MMR6: addius5 $[[T1]], 3
- ; MMR6: sltu $[[T2:[0-9]+]], $[[T1]], $7
- ; MMR6: xori $[[T3:[0-9]+]], $[[T2]], 1
- ; MMR6: selnez $[[T4:[0-9]+]], $[[T2]], $[[T3]]
- ; MMR6: addu16 $[[T5:[0-9]+]], $6, $[[T2]]
- ; MMR6: sltu $[[T6:[0-9]+]], $[[T5]], $6
- ; MMR6: seleqz $[[T7:[0-9]+]], $[[T6]], $[[T3]]
- ; MMR6: or $[[T8:[0-9]+]], $[[T4]], $[[T7]]
- ; MMR6: addu16 $[[T9:[0-9]+]], $5, $[[T8]]
- ; MMR6: sltu $[[T10:[0-9]+]], $[[T9]], $5
- ; MMR6: addu16 $[[T11:[0-9]+]], $4, $[[T10]]
- ; MMR6: move $4, $[[T5]]
- ; MMR6: move $5, $[[T1]]
+ ; MM32: addiu $[[T0:[0-9]+]], $7, 3
+ ; MM32: li16 $[[T1:[0-9]+]], 3
+ ; MM32: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; MM32: addu16 $[[T2:[0-9]+]], $6, $[[T1]]
+ ; MM32: li16 $[[T3:[0-9]+]], 0
+ ; MM32: sltu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+ ; MM32: addu16 $[[T4]], $5, $[[T4]]
+ ; MM32: sltu $[[T5:[0-9]+]], $[[T4]], $[[T3]]
+ ; MM32: addu16 $[[T5]], $4, $[[T5]]
+ ; MM32: move $4, $[[T2]]
+ ; MM32: move $5, $[[T0]]
; MM64: daddiu $[[T0:[0-9]+]], $5, 3
- ; MM64: sltu $[[T1:[0-9]+]], $[[T0]], $5
- ; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
- ; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
- ; MM64: daddu $2, $4, $[[T3]]
+ ; MM64: daddiu $[[T1:[0-9]+]], $zero, 3
+ ; MM64: sltu $[[T1]], $[[T0]], $[[T1]]
+ ; MM64: daddu $2, $4, $[[T1]]
%r = add i128 3, %a
ret i128 %r
diff --git a/test/CodeGen/Mips/llvm-ir/sub.ll b/test/CodeGen/Mips/llvm-ir/sub.ll
index 655addb10a64..a730063c552f 100644
--- a/test/CodeGen/Mips/llvm-ir/sub.ll
+++ b/test/CodeGen/Mips/llvm-ir/sub.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
-; RUN: -check-prefixes=NOT-R2-R6,GP32,GP32-NOT-MM,NOT-MM,PRE4
+; RUN: -check-prefixes=NOT-R2-R6,GP32,GP32-NOT-MM,NOT-MM
; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
; RUN: -check-prefixes=NOT-R2-R6,GP32,GP32-NOT-MM,NOT-MM
; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
@@ -11,25 +11,25 @@
; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
; RUN: -check-prefixes=R2-R6,GP32,GP32-NOT-MM,NOT-MM
; RUN: llc < %s -march=mips -mcpu=mips32r3 -mattr=+micromips -verify-machineinstrs | FileCheck %s \
-; RUN: -check-prefixes=GP32-MM,GP32,MM32,MMR3
+; RUN: -check-prefixes=GP32-MM,GP32,MM
; RUN: llc < %s -march=mips -mcpu=mips32r6 -mattr=+micromips | FileCheck %s \
-; RUN: -check-prefixes=GP32-MM,GP32,MM32,MMR6
+; RUN: -check-prefixes=GP32-MM,GP32,MM
; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
-; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
+; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM
; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
-; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
+; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM
; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
-; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM,GP64-NOT-R2
+; RUN: -check-prefixes=NOT-R2-R6,GP64,NOT-MM
; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
-; RUN: -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
+; RUN: -check-prefixes=R2-R6,GP64,NOT-MM
; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
-; RUN: -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
+; RUN: -check-prefixes=R2-R6,GP64,NOT-MM
; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
-; RUN: -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
+; RUN: -check-prefixes=R2-R6,GP64,NOT-MM
; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
-; RUN: -check-prefixes=R2-R6,GP64,NOT-MM,GP64-R2
+; RUN: -check-prefixes=R2-R6,GP64,NOT-MM
; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -mattr=+micromips | FileCheck %s \
-; RUN: -check-prefixes=GP64,MM64
+; RUN: -check-prefixes=GP64,MM
define signext i1 @sub_i1(i1 signext %a, i1 signext %b) {
entry:
@@ -100,15 +100,10 @@ define signext i64 @sub_i64(i64 signext %a, i64 signext %b) {
entry:
; ALL-LABEL: sub_i64:
- ; GP32-NOT-MM: sltu $[[T0:[0-9]+]], $5, $7
- ; GP32-NOT-MM: subu $2, $4, $6
- ; GP32-NOT-MM: subu $2, $2, $[[T0]]
- ; GP32-NOT-MM: subu $3, $5, $7
-
- ; MM32: sltu $[[T0:[0-9]+]], $5, $7
- ; MM32: subu16 $3, $4, $6
- ; MM32: subu16 $2, $3, $[[T0]]
- ; MM32: subu16 $3, $5, $7
+ ; GP32-NOT-MM subu $3, $5, $7
+ ; GP32: sltu $[[T0:[0-9]+]], $5, $7
+ ; GP32: addu $[[T1:[0-9]+]], $[[T0]], $6
+ ; GP32: subu $2, $4, $[[T1]]
; GP64: dsubu $2, $4, $5
@@ -120,109 +115,42 @@ define signext i128 @sub_i128(i128 signext %a, i128 signext %b) {
entry:
; ALL-LABEL: sub_i128:
-; PRE4: lw $[[T0:[0-9]+]], 24($sp)
-; PRE4: lw $[[T1:[0-9]+]], 28($sp)
-; PRE4: sltu $[[T2:[0-9]+]], $7, $[[T1]]
-; PRE4: xor $[[T3:[0-9]+]], $6, $[[T0]]
-; PRE4: sltiu $[[T4:[0-9]+]], $[[T3]], 1
-; PRE4: bnez $[[T4]]
-; PRE4: move $[[T5:[0-9]+]], $[[T2]]
-; PRE4: sltu $[[T5]], $6, $[[T0]]
-
-; PRE4: lw $[[T6:[0-9]+]], 20($sp)
-; PRE4: subu $[[T7:[0-9]+]], $5, $[[T6]]
-; PRE4: subu $[[T8:[0-9]+]], $[[T7]], $[[T5]]
-; PRE4: sltu $[[T9:[0-9]+]], $[[T7]], $[[T5]]
-; PRE4: sltu $[[T10:[0-9]+]], $5, $[[T6]]
-; PRE4: lw $[[T11:[0-9]+]], 16($sp)
-; PRE4: subu $[[T12:[0-9]+]], $4, $[[T11]]
-; PRE4: subu $[[T13:[0-9]+]], $[[T12]], $[[T10]]
-; PRE4: subu $[[T14:[0-9]+]], $[[T13]], $[[T9]]
-; PRE4: subu $[[T15:[0-9]+]], $6, $[[T0]]
-; PRE4: subu $[[T16:[0-9]+]], $[[T15]], $[[T2]]
-; PRE4: subu $5, $7, $[[T1]]
-
-; MMR3: lw $[[T1:[0-9]+]], 48($sp)
-; MMR3: sltu $[[T2:[0-9]+]], $6, $[[T1]]
-; MMR3: xor $[[T3:[0-9]+]], $6, $[[T1]]
-; MMR3: lw $[[T4:[0-9]+]], 52($sp)
-; MMR3: sltu $[[T5:[0-9]+]], $7, $[[T4]]
-; MMR3: movz $[[T6:[0-9]+]], $[[T5]], $[[T3]]
-; MMR3: lw $[[T7:[0-8]+]], 44($sp)
-; MMR3: subu16 $[[T8:[0-9]+]], $5, $[[T7]]
-; MMR3: subu16 $[[T9:[0-9]+]], $[[T8]], $[[T6]]
-; MMR3: sltu $[[T10:[0-9]+]], $[[T8]], $[[T2]]
-; MMR3: sltu $[[T11:[0-9]+]], $5, $[[T7]]
-; MMR3: lw $[[T12:[0-9]+]], 40($sp)
-; MMR3: lw $[[T13:[0-9]+]], 12($sp)
-; MMR3: subu16 $[[T14:[0-9]+]], $[[T13]], $[[T12]]
-; MMR3: subu16 $[[T15:[0-9]+]], $[[T14]], $[[T11]]
-; MMR3: subu16 $[[T16:[0-9]+]], $[[T15]], $[[T10]]
-; MMR3: subu16 $[[T17:[0-9]+]], $6, $[[T1]]
-; MMR3: subu16 $[[T18:[0-9]+]], $[[T17]], $7
-; MMR3: lw $[[T19:[0-9]+]], 8($sp)
-; MMR3: lw $[[T20:[0-9]+]], 0($sp)
-; MMR3: subu16 $5, $[[T19]], $[[T20]]
-
-; MMR6: move $[[T0:[0-9]+]], $7
-; MMR6: sw $[[T0]], 8($sp)
-; MMR6: move $[[T1:[0-9]+]], $5
-; MMR6: sw $4, 12($sp)
-; MMR6: lw $[[T2:[0-9]+]], 48($sp)
-; MMR6: sltu $[[T3:[0-9]+]], $6, $[[T2]]
-; MMR6: xor $[[T4:[0-9]+]], $6, $[[T2]]
-; MMR6: sltiu $[[T5:[0-9]+]], $[[T4]], 1
-; MMR6: seleqz $[[T6:[0-9]+]], $[[T3]], $[[T5]]
-; MMR6: lw $[[T7:[0-9]+]], 52($sp)
-; MMR6: sltu $[[T8:[0-9]+]], $[[T0]], $[[T7]]
-; MMR6: selnez $[[T9:[0-9]+]], $[[T8]], $[[T5]]
-; MMR6: or $[[T10:[0-9]+]], $[[T9]], $[[T6]]
-; MMR6: lw $[[T11:[0-9]+]], 44($sp)
-; MMR6: subu16 $[[T12:[0-9]+]], $[[T1]], $[[T11]]
-; MMR6: subu16 $[[T13:[0-9]+]], $[[T12]], $[[T7]]
-; MMR6: sltu $[[T16:[0-9]+]], $[[T12]], $[[T7]]
-; MMR6: sltu $[[T17:[0-9]+]], $[[T1]], $[[T11]]
-; MMR6: lw $[[T18:[0-9]+]], 40($sp)
-; MMR6: lw $[[T19:[0-9]+]], 12($sp)
-; MMR6: subu16 $[[T20:[0-9]+]], $[[T19]], $[[T18]]
-; MMR6: subu16 $[[T21:[0-9]+]], $[[T20]], $[[T17]]
-; MMR6: subu16 $[[T22:[0-9]+]], $[[T21]], $[[T16]]
-; MMR6: subu16 $[[T23:[0-9]+]], $6, $[[T2]]
-; MMR6: subu16 $4, $[[T23]], $5
-; MMR6: lw $[[T24:[0-9]+]], 8($sp)
-; MMR6: lw $[[T25:[0-9]+]], 0($sp)
-; MMR6: subu16 $5, $[[T24]], $[[T25]]
-; MMR6: lw $3, 4($sp)
-
-; FIXME: The sltu, dsll, dsrl pattern here occurs when an i32 is zero
-; extended to 64 bits. Fortunately slt(i)(u) actually gives an i1.
-; These should be combined away.
-
-; GP64-NOT-R2: dsubu $1, $4, $6
-; GP64-NOT-R2: sltu $[[T0:[0-9]+]], $5, $7
-; GP64-NOT-R2: dsll $[[T1:[0-9]+]], $[[T0]], 32
-; GP64-NOT-R2: dsrl $[[T2:[0-9]+]], $[[T1]], 32
-; GP64-NOT-R2: dsubu $2, $1, $[[T2]]
-; GP64-NOT-R2: dsubu $3, $5, $7
-
-; FIXME: Likewise for the sltu, dext here.
-
-; GP64-R2: dsubu $1, $4, $6
-; GP64-R2: sltu $[[T0:[0-9]+]], $5, $7
-; GP64-R2: dext $[[T1:[0-9]+]], $[[T0]], 0, 32
-; GP64-R2: dsubu $2, $1, $[[T1]]
-; GP64-R2: dsubu $3, $5, $7
+ ; GP32-NOT-MM: lw $[[T0:[0-9]+]], 20($sp)
+ ; GP32-NOT-MM: sltu $[[T1:[0-9]+]], $5, $[[T0]]
+ ; GP32-NOT-MM: lw $[[T2:[0-9]+]], 16($sp)
+ ; GP32-NOT-MM: addu $[[T3:[0-9]+]], $[[T1]], $[[T2]]
+ ; GP32-NOT-MM: lw $[[T4:[0-9]+]], 24($sp)
+ ; GP32-NOT-MM: lw $[[T5:[0-9]+]], 28($sp)
+ ; GP32-NOT-MM: subu $[[T6:[0-9]+]], $7, $[[T5]]
+ ; GP32-NOT-MM: subu $2, $4, $[[T3]]
+ ; GP32-NOT-MM: sltu $[[T8:[0-9]+]], $6, $[[T4]]
+ ; GP32-NOT-MM: addu $[[T9:[0-9]+]], $[[T8]], $[[T0]]
+ ; GP32-NOT-MM: subu $3, $5, $[[T9]]
+ ; GP32-NOT-MM: sltu $[[T10:[0-9]+]], $7, $[[T5]]
+ ; GP32-NOT-MM: addu $[[T11:[0-9]+]], $[[T10]], $[[T4]]
+ ; GP32-NOT-MM: subu $4, $6, $[[T11]]
+ ; GP32-NOT-MM: move $5, $[[T6]]
-; FIXME: Again, redundant sign extension. Also, microMIPSR6 has the
-; dext instruction which should be used here.
+ ; GP32-MM: lw $[[T0:[0-9]+]], 20($sp)
+ ; GP32-MM: sltu $[[T1:[0-9]+]], $[[T2:[0-9]+]], $[[T0]]
+ ; GP32-MM: lw $[[T3:[0-9]+]], 16($sp)
+ ; GP32-MM: addu $[[T3]], $[[T1]], $[[T3]]
+ ; GP32-MM: lw $[[T4:[0-9]+]], 24($sp)
+ ; GP32-MM: lw $[[T5:[0-9]+]], 28($sp)
+ ; GP32-MM: subu $[[T1]], $7, $[[T5]]
+ ; GP32-MM: subu16 $[[T3]], $[[T6:[0-9]+]], $[[T3]]
+ ; GP32-MM: sltu $[[T6]], $6, $[[T4]]
+ ; GP32-MM: addu16 $[[T0]], $[[T6]], $[[T0]]
+ ; GP32-MM: subu16 $[[T0]], $5, $[[T0]]
+ ; GP32-MM: sltu $[[T6]], $7, $[[T5]]
+ ; GP32-MM: addu $[[T6]], $[[T6]], $[[T4]]
+ ; GP32-MM: subu16 $[[T6]], $6, $[[T6]]
+ ; GP32-MM: move $[[T2]], $[[T1]]
-; MM64: dsubu $[[T0:[0-9]+]], $4, $6
-; MM64: sltu $[[T1:[0-9]+]], $5, $7
-; MM64: dsll $[[T2:[0-9]+]], $[[T1]], 32
-; MM64: dsrl $[[T3:[0-9]+]], $[[T2]], 32
-; MM64: dsubu $2, $[[T0]], $[[T3]]
-; MM64: dsubu $3, $5, $7
-; MM64: jr $ra
+ ; GP64: dsubu $3, $5, $7
+ ; GP64: sltu $[[T0:[0-9]+]], $5, $7
+ ; GP64: daddu $[[T1:[0-9]+]], $[[T0]], $6
+ ; GP64: dsubu $2, $4, $[[T1]]
%r = sub i128 %a, %b
ret i128 %r
diff --git a/test/CodeGen/Mips/madd-msub.ll b/test/CodeGen/Mips/madd-msub.ll
index 3e1a2e8b9708..7baba005a072 100644
--- a/test/CodeGen/Mips/madd-msub.ll
+++ b/test/CodeGen/Mips/madd-msub.ll
@@ -25,11 +25,11 @@
; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $6
-; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
-; 32R6-DAG: muh $[[T3:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: sra $[[T4:[0-9]+]], $6, 31
-; 32R6-DAG: addu $[[T5:[0-9]+]], $[[T3]], $[[T4]]
-; 32R6-DAG: addu $2, $[[T5]], $[[T2]]
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $6
+; 32R6-DAG: sra $[[T3:[0-9]+]], $6, 31
+; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T2]], $[[T3]]
+; 32R6-DAG: muh $[[T5:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $2, $[[T5]], $[[T4]]
; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
@@ -71,7 +71,7 @@ entry:
; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $6
-; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $[[T0]]
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $6
; FIXME: There's a redundant move here. We should remove it
; 32R6-DAG: muhu $[[T3:[0-9]+]], ${{[45]}}, ${{[45]}}
; 32R6-DAG: addu $2, $[[T3]], $[[T2]]
@@ -109,10 +109,10 @@ entry:
; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
; 32R6-DAG: addu $[[T1:[0-9]+]], $[[T0]], $7
-; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $1
-; 32R6-DAG: muh $[[T3:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T3]], $6
-; 32R6-DAG: addu $2, $[[T4]], $[[T2]]
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $[[T1]], $7
+; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T2]], $6
+; 32R6-DAG: muh $[[T5:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: addu $2, $[[T5]], $[[T4]]
; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
@@ -134,17 +134,6 @@ entry:
ret i64 %add
}
-; ALL-LABEL: madd4
-; ALL-NOT: madd ${{[0-9]+}}, ${{[0-9]+}}
-
-define i32 @madd4(i32 %a, i32 %b, i32 %c) {
-entry:
- %mul = mul nsw i32 %a, %b
- %add = add nsw i32 %c, %mul
-
- ret i32 %add
-}
-
; ALL-LABEL: msub1:
; 32-DAG: sra $[[T0:[0-9]+]], $6, 31
@@ -159,13 +148,13 @@ entry:
; DSP-DAG: mfhi $2, $[[AC]]
; DSP-DAG: mflo $3, $[[AC]]
-; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: sltu $[[T1:[0-9]+]], $6, $[[T0]]
-; 32R6-DAG: muh $[[T2:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: sra $[[T3:[0-9]+]], $6, 31
-; 32R6-DAG: subu $[[T4:[0-9]+]], $[[T3]], $[[T2]]
-; 32R6-DAG: subu $2, $[[T4]], $[[T1]]
-; 32R6-DAG: subu $3, $6, $[[T0]]
+; 32R6-DAG: muh $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: sltu $[[T3:[0-9]+]], $6, $[[T1]]
+; 32R6-DAG: addu $[[T4:[0-9]+]], $[[T3]], $[[T0]]
+; 32R6-DAG: sra $[[T5:[0-9]+]], $6, 31
+; 32R6-DAG: subu $2, $[[T5]], $[[T4]]
+; 32R6-DAG: subu $3, $6, $[[T1]]
; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
@@ -205,12 +194,13 @@ entry:
; DSP-DAG: mfhi $2, $[[AC]]
; DSP-DAG: mflo $3, $[[AC]]
-; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: sltu $[[T1:[0-9]+]], $6, $[[T0]]
-; 32R6-DAG: muhu $[[T2:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: negu $[[T3:[0-9]+]], $[[T2]]
-; 32R6-DAG: subu $2, $[[T3]], $[[T1]]
-; 32R6-DAG: subu $3, $6, $[[T0]]
+; 32R6-DAG: muhu $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $6, $[[T1]]
+; 32R6-DAG: addu $[[T3:[0-9]+]], $[[T2]], $[[T0]]
+; 32R6-DAG: negu $2, $[[T3]]
+; 32R6-DAG: subu $3, $6, $[[T1]]
; 64-DAG: d[[m:m]]ult $5, $4
; 64-DAG: [[m]]flo $[[T0:[0-9]+]]
@@ -244,12 +234,12 @@ entry:
; DSP-DAG: mfhi $2, $[[AC]]
; DSP-DAG: mflo $3, $[[AC]]
-; 32R6-DAG: mul $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: sltu $[[T1:[0-9]+]], $7, $[[T0]]
-; 32R6-DAG: muh $[[T2:[0-9]+]], ${{[45]}}, ${{[45]}}
-; 32R6-DAG: subu $[[T3:[0-9]+]], $6, $[[T2]]
-; 32R6-DAG: subu $2, $[[T3]], $[[T1]]
-; 32R6-DAG: subu $3, $7, $[[T0]]
+; 32R6-DAG: muh $[[T0:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: mul $[[T1:[0-9]+]], ${{[45]}}, ${{[45]}}
+; 32R6-DAG: sltu $[[T2:[0-9]+]], $7, $[[T1]]
+; 32R6-DAG: addu $[[T3:[0-9]+]], $[[T2]], $[[T0]]
+; 32R6-DAG: subu $2, $6, $[[T3]]
+; 32R6-DAG: subu $3, $7, $[[T1]]
; 64-DAG: sll $[[T0:[0-9]+]], $4, 0
; 64-DAG: sll $[[T1:[0-9]+]], $5, 0
@@ -270,14 +260,3 @@ entry:
%sub = sub nsw i64 %c, %mul
ret i64 %sub
}
-
-; ALL-LABEL: msub4
-; ALL-NOT: msub ${{[0-9]+}}, ${{[0-9]+}}
-
-define i32 @msub4(i32 %a, i32 %b, i32 %c) {
-entry:
- %mul = mul nsw i32 %a, %b
- %sub = sub nsw i32 %c, %mul
-
- ret i32 %sub
-}
diff --git a/test/CodeGen/NVPTX/lower-aggr-copies.ll b/test/CodeGen/NVPTX/lower-aggr-copies.ll
index 192d4becb059..f522c6722ee6 100644
--- a/test/CodeGen/NVPTX/lower-aggr-copies.ll
+++ b/test/CodeGen/NVPTX/lower-aggr-copies.ll
@@ -17,6 +17,8 @@ entry:
ret i8* %dst
; IR-LABEL: @memcpy_caller
+; IR: [[CMPREG:%[0-9]+]] = icmp eq i64 0, %n
+; IR: br i1 [[CMPREG]], label %split, label %loadstoreloop
; IR: loadstoreloop:
; IR: [[LOADPTR:%[0-9]+]] = getelementptr inbounds i8, i8* %src, i64
; IR-NEXT: [[VAL:%[0-9]+]] = load i8, i8* [[LOADPTR]]
@@ -73,6 +75,8 @@ entry:
; IR-LABEL: @memset_caller
; IR: [[VAL:%[0-9]+]] = trunc i32 %c to i8
+; IR: [[CMPREG:%[0-9]+]] = icmp eq i64 0, %n
+; IR: br i1 [[CMPREG]], label %split, label %loadstoreloop
; IR: loadstoreloop:
; IR: [[STOREPTR:%[0-9]+]] = getelementptr inbounds i8, i8* %dst, i64
; IR-NEXT: store i8 [[VAL]], i8* [[STOREPTR]]
diff --git a/test/CodeGen/PowerPC/anon_aggr.ll b/test/CodeGen/PowerPC/anon_aggr.ll
index 9b32a8f55f34..2c1735844477 100644
--- a/test/CodeGen/PowerPC/anon_aggr.ll
+++ b/test/CodeGen/PowerPC/anon_aggr.ll
@@ -1,6 +1,6 @@
; RUN: llc -verify-machineinstrs -O0 -mcpu=ppc64 -mtriple=powerpc64-unknown-linux-gnu -fast-isel=false < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -O0 -mcpu=g4 -mtriple=powerpc-apple-darwin8 < %s | FileCheck -check-prefix=DARWIN32 %s
-; RUN: llc -verify-machineinstrs -O0 -mcpu=ppc970 -mtriple=powerpc64-apple-darwin8 < %s | FileCheck -check-prefix=DARWIN64 %s
+; RUN: llc -verify-machineinstrs -O0 -mcpu=970 -mtriple=powerpc64-apple-darwin8 < %s | FileCheck -check-prefix=DARWIN64 %s
; Test case for PR 14779: anonymous aggregates are not handled correctly.
; Darwin bug report PR 15821 is similar.
@@ -22,7 +22,7 @@ unequal:
; CHECK-LABEL: func1:
; CHECK: cmpld {{([0-9]+,)?}}4, 5
-; CHECK-DAG: std 4, -[[OFFSET1:[0-9]+]]
+; CHECK-DAG: std 3, -[[OFFSET1:[0-9]+]]
; CHECK-DAG: std 5, -[[OFFSET2:[0-9]+]]
; CHECK: ld 3, -[[OFFSET1]](1)
; CHECK: ld 3, -[[OFFSET2]](1)
@@ -31,19 +31,19 @@ unequal:
; DARWIN32: mr
; DARWIN32: mr r[[REG1:[0-9]+]], r[[REGA:[0-9]+]]
; DARWIN32: mr r[[REG2:[0-9]+]], r[[REGB:[0-9]+]]
-; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REGA]], r[[REGB]]
+; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REGB]], r[[REGA]]
; DARWIN32: stw r[[REG1]], -[[OFFSET1:[0-9]+]]
; DARWIN32: stw r[[REG2]], -[[OFFSET2:[0-9]+]]
-; DARWIN32: lwz r3, -[[OFFSET1]]
; DARWIN32: lwz r3, -[[OFFSET2]]
+; DARWIN32: lwz r3, -[[OFFSET1]]
; DARWIN64: _func1:
; DARWIN64: mr
; DARWIN64: mr r[[REG1:[0-9]+]], r[[REGA:[0-9]+]]
; DARWIN64: mr r[[REG2:[0-9]+]], r[[REGB:[0-9]+]]
-; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REGA]], r[[REGB]]
-; DARWIN64: std r[[REG1]], -[[OFFSET1:[0-9]+]]
-; DARWIN64: std r[[REG2]], -[[OFFSET2:[0-9]+]]
+; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REGB]], r[[REGA]]
+; DARWIN64: std r[[REG1]], -[[OFFSET2:[0-9]+]]
+; DARWIN64: std r[[REG2]], -[[OFFSET1:[0-9]+]]
; DARWIN64: ld r3, -[[OFFSET1]]
; DARWIN64: ld r3, -[[OFFSET2]]
@@ -61,19 +61,19 @@ unequal:
ret i8* %array2_ptr
}
; CHECK-LABEL: func2:
-; CHECK: cmpld {{([0-9]+,)?}}4, 6
+; CHECK-DAG: cmpld {{([0-9]+,)?}}4, 6
; CHECK-DAG: std 6, 72(1)
; CHECK-DAG: std 5, 64(1)
; CHECK-DAG: std 6, -[[OFFSET1:[0-9]+]]
-; CHECK-DAG: std 4, -[[OFFSET2:[0-9]+]]
+; CHECK-DAG: std 5, -[[OFFSET2:[0-9]+]]
; CHECK: ld 3, -[[OFFSET2]](1)
; CHECK: ld 3, -[[OFFSET1]](1)
; DARWIN32-LABEL: _func2
-; DARWIN32-DAG: addi r[[REG8:[0-9]+]], r[[REGSP:[0-9]+]], 36
-; DARWIN32-DAG: lwz r[[REG2:[0-9]+]], 44(r[[REGSP]])
; DARWIN32: mr
+; DARWIN32: addi r[[REG8:[0-9]+]], r[[REGSP:[0-9]+]], 36
; DARWIN32: mr r[[REG7:[0-9]+]], r5
+; DARWIN32: lwz r[[REG2:[0-9]+]], 44(r[[REGSP]])
; DARWIN32-DAG: cmplw {{(cr[0-9]+,)?}}r5, r[[REG2]]
; DARWIN32-DAG: stw r[[REG7]], -[[OFFSET1:[0-9]+]]
; DARWIN32-DAG: stw r[[REG2]], -[[OFFSET2:[0-9]+]]
@@ -82,9 +82,9 @@ unequal:
; DARWIN64: _func2:
-; DARWIN64: ld r[[REG2:[0-9]+]], 72(r1)
; DARWIN64: mr
; DARWIN64: mr r[[REG3:[0-9]+]], r[[REGA:[0-9]+]]
+; DARWIN64: ld r[[REG2:[0-9]+]], 72(r1)
; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REGA]], r[[REG2]]
; DARWIN64: std r[[REG2]], -[[OFFSET2:[0-9]+]]
; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]]
@@ -107,9 +107,9 @@ unequal:
}
; CHECK-LABEL: func3:
-; CHECK: cmpld {{([0-9]+,)?}}4, 6
-; CHECK-DAG: std 4, -[[OFFSET2:[0-9]+]](1)
-; CHECK-DAG: std 6, -[[OFFSET1:[0-9]+]](1)
+; CHECK-DAG: cmpld {{([0-9]+,)?}}3, 4
+; CHECK-DAG: std 3, -[[OFFSET2:[0-9]+]](1)
+; CHECK-DAG: std 4, -[[OFFSET1:[0-9]+]](1)
; CHECK: ld 3, -[[OFFSET2]](1)
; CHECK: ld 3, -[[OFFSET1]](1)
@@ -127,13 +127,13 @@ unequal:
; DARWIN32-DAG: lwz r3, -[[OFFSET2:[0-9]+]]
; DARWIN64: _func3:
-; DARWIN64: ld r[[REG3:[0-9]+]], 72(r1)
-; DARWIN64: ld r[[REG4:[0-9]+]], 56(r1)
+; DARWIN64-DAG: ld r[[REG3:[0-9]+]], 72(r1)
+; DARWIN64-DAG: ld r[[REG4:[0-9]+]], 56(r1)
; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REG4]], r[[REG3]]
-; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]]
-; DARWIN64: std r[[REG4]], -[[OFFSET2:[0-9]+]]
-; DARWIN64: ld r3, -[[OFFSET2]]
+; DARWIN64: std r[[REG4]], -[[OFFSET1:[0-9]+]]
+; DARWIN64: std r[[REG3]], -[[OFFSET2:[0-9]+]]
; DARWIN64: ld r3, -[[OFFSET1]]
+; DARWIN64: ld r3, -[[OFFSET2]]
define i8* @func4(i64 %p1, i64 %p2, i64 %p3, i64 %p4,
@@ -152,31 +152,31 @@ unequal:
}
; CHECK-LABEL: func4:
-; CHECK: ld [[REG3:[0-9]+]], 136(1)
-; CHECK: ld [[REG2:[0-9]+]], 120(1)
-; CHECK: cmpld {{([0-9]+,)?}}[[REG2]], [[REG3]]
-; CHECK: std [[REG3]], -[[OFFSET2:[0-9]+]](1)
+; CHECK-DAG: ld [[REG2:[0-9]+]], 120(1)
+; CHECK-DAG: ld [[REG3:[0-9]+]], 136(1)
+; CHECK-DAG: cmpld {{([0-9]+,)?}}[[REG2]], [[REG3]]
; CHECK: std [[REG2]], -[[OFFSET1:[0-9]+]](1)
+; CHECK: std [[REG3]], -[[OFFSET2:[0-9]+]](1)
; CHECK: ld 3, -[[OFFSET1]](1)
; CHECK: ld 3, -[[OFFSET2]](1)
; DARWIN32: _func4:
; DARWIN32: lwz r[[REG4:[0-9]+]], 96(r1)
; DARWIN32: addi r[[REG1:[0-9]+]], r1, 100
-; DARWIN32: lwz r[[REG3:[0-9]+]], 108(r1)
; DARWIN32: mr r[[REG2:[0-9]+]], r[[REG4]]
+; DARWIN32: lwz r[[REG3:[0-9]+]], 108(r1)
; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REG4]], r[[REG3]]
-; DARWIN32: stw r[[REG2]], -[[OFFSET1:[0-9]+]]
-; DARWIN32: stw r[[REG3]], -[[OFFSET2:[0-9]+]]
-; DARWIN32: lwz r[[REG1]], -[[OFFSET1]]
-; DARWIN32: lwz r[[REG1]], -[[OFFSET2]]
+; DARWIN32-DAG: stw r[[REG2]], -[[OFFSET1:[0-9]+]]
+; DARWIN32-DAG: stw r[[REG3]], -[[OFFSET2:[0-9]+]]
+; DARWIN32: lwz r3, -[[OFFSET1]]
+; DARWIN32: lwz r3, -[[OFFSET2]]
; DARWIN64: _func4:
; DARWIN64: ld r[[REG2:[0-9]+]], 120(r1)
-; DARWIN64: ld r[[REG3:[0-9]+]], 136(r1)
-; DARWIN64: mr r[[REG4:[0-9]+]], r[[REG2]]
+; DARWIN64-DAG: ld r[[REG3:[0-9]+]], 136(r1)
+; DARWIN64-DAG: mr r[[REG4:[0-9]+]], r[[REG2]]
; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REG2]], r[[REG3]]
-; DARWIN64: std r[[REG4]], -[[OFFSET1:[0-9]+]]
; DARWIN64: std r[[REG3]], -[[OFFSET2:[0-9]+]]
+; DARWIN64: std r[[REG4]], -[[OFFSET1:[0-9]+]]
; DARWIN64: ld r3, -[[OFFSET1]]
; DARWIN64: ld r3, -[[OFFSET2]]
diff --git a/test/CodeGen/PowerPC/floatPSA.ll b/test/CodeGen/PowerPC/floatPSA.ll
index ccda9d56a147..73dea19adbd5 100644
--- a/test/CodeGen/PowerPC/floatPSA.ll
+++ b/test/CodeGen/PowerPC/floatPSA.ll
@@ -1,4 +1,4 @@
-; RUN: llc -verify-machineinstrs -O0 -mtriple=powerpc64-unknown-linux-gnu -fast-isel=false < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -O2 -mtriple=powerpc64-unknown-linux-gnu -fast-isel=false < %s | FileCheck %s
; This verifies that single-precision floating point values that can't
; be passed in registers are stored in the rightmost word of the parameter
diff --git a/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll b/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
index d398dfe7fc92..059665adc351 100644
--- a/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
+++ b/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
@@ -100,28 +100,26 @@ define signext i32 @zeroEqualityTest04() {
; CHECK-NEXT: addi 5, 4, .LzeroEqualityTest02.buffer2@toc@l
; CHECK-NEXT: ldbrx 3, 0, 6
; CHECK-NEXT: ldbrx 4, 0, 5
-; CHECK-NEXT: subf. 7, 4, 3
+; CHECK-NEXT: cmpld 3, 4
; CHECK-NEXT: bne 0, .LBB3_2
; CHECK-NEXT: # BB#1: # %loadbb1
; CHECK-NEXT: li 4, 8
; CHECK-NEXT: ldbrx 3, 6, 4
; CHECK-NEXT: ldbrx 4, 5, 4
-; CHECK-NEXT: subf. 5, 4, 3
-; CHECK-NEXT: beq 0, .LBB3_4
+; CHECK-NEXT: li 5, 0
+; CHECK-NEXT: cmpld 3, 4
+; CHECK-NEXT: beq 0, .LBB3_3
; CHECK-NEXT: .LBB3_2: # %res_block
; CHECK-NEXT: cmpld 3, 4
-; CHECK-NEXT: li 3, 1
+; CHECK-NEXT: li 11, 1
; CHECK-NEXT: li 12, -1
-; CHECK-NEXT: isel 3, 12, 3, 0
+; CHECK-NEXT: isel 5, 12, 11, 0
; CHECK-NEXT: .LBB3_3: # %endblock
-; CHECK-NEXT: cmpwi 3, 1
+; CHECK-NEXT: cmpwi 5, 1
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: li 4, 1
; CHECK-NEXT: isel 3, 4, 3, 0
; CHECK-NEXT: blr
-; CHECK-NEXT: .LBB3_4:
-; CHECK-NEXT: li 3, 0
-; CHECK-NEXT: b .LBB3_3
%call = tail call signext i32 @memcmp(i8* bitcast ([4 x i32]* @zeroEqualityTest02.buffer1 to i8*), i8* bitcast ([4 x i32]* @zeroEqualityTest02.buffer2 to i8*), i64 16)
%not.cmp = icmp slt i32 %call, 1
%. = zext i1 %not.cmp to i32
@@ -138,27 +136,25 @@ define signext i32 @zeroEqualityTest05() {
; CHECK-NEXT: addi 5, 4, .LzeroEqualityTest03.buffer2@toc@l
; CHECK-NEXT: ldbrx 3, 0, 6
; CHECK-NEXT: ldbrx 4, 0, 5
-; CHECK-NEXT: subf. 7, 4, 3
+; CHECK-NEXT: cmpld 3, 4
; CHECK-NEXT: bne 0, .LBB4_2
; CHECK-NEXT: # BB#1: # %loadbb1
; CHECK-NEXT: li 4, 8
; CHECK-NEXT: ldbrx 3, 6, 4
; CHECK-NEXT: ldbrx 4, 5, 4
-; CHECK-NEXT: subf. 5, 4, 3
-; CHECK-NEXT: beq 0, .LBB4_4
+; CHECK-NEXT: li 5, 0
+; CHECK-NEXT: cmpld 3, 4
+; CHECK-NEXT: beq 0, .LBB4_3
; CHECK-NEXT: .LBB4_2: # %res_block
; CHECK-NEXT: cmpld 3, 4
-; CHECK-NEXT: li 3, 1
+; CHECK-NEXT: li 11, 1
; CHECK-NEXT: li 12, -1
-; CHECK-NEXT: isel 3, 12, 3, 0
+; CHECK-NEXT: isel 5, 12, 11, 0
; CHECK-NEXT: .LBB4_3: # %endblock
-; CHECK-NEXT: srwi 3, 3, 31
+; CHECK-NEXT: srwi 3, 5, 31
; CHECK-NEXT: xori 3, 3, 1
; CHECK-NEXT: clrldi 3, 3, 32
; CHECK-NEXT: blr
-; CHECK-NEXT: .LBB4_4:
-; CHECK-NEXT: li 3, 0
-; CHECK-NEXT: b .LBB4_3
%call = tail call signext i32 @memcmp(i8* bitcast ([4 x i32]* @zeroEqualityTest03.buffer1 to i8*), i8* bitcast ([4 x i32]* @zeroEqualityTest03.buffer2 to i8*), i64 16)
%call.lobit = lshr i32 %call, 31
%call.lobit.not = xor i32 %call.lobit, 1
diff --git a/test/CodeGen/PowerPC/memcmp.ll b/test/CodeGen/PowerPC/memcmp.ll
index bae713cb2072..fbaaa8bb74c9 100644
--- a/test/CodeGen/PowerPC/memcmp.ll
+++ b/test/CodeGen/PowerPC/memcmp.ll
@@ -1,87 +1,72 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le-unknown-gnu-linux < %s | FileCheck %s -check-prefix=CHECK
-; Check size 8
-; Function Attrs: nounwind readonly
-define signext i32 @test1(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) local_unnamed_addr #0 {
-entry:
- %0 = bitcast i32* %buffer1 to i8*
- %1 = bitcast i32* %buffer2 to i8*
- %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 8) #2
+define signext i32 @memcmp8(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+; CHECK-LABEL: memcmp8:
+; CHECK: # BB#0:
+; CHECK-NEXT: ldbrx 3, 0, 3
+; CHECK-NEXT: ldbrx 4, 0, 4
+; CHECK-NEXT: li 5, 1
+; CHECK-NEXT: li 12, -1
+; CHECK-NEXT: cmpld 3, 4
+; CHECK-NEXT: isel 3, 12, 5, 0
+; CHECK-NEXT: isel 3, 0, 3, 2
+; CHECK-NEXT: blr
+ %t0 = bitcast i32* %buffer1 to i8*
+ %t1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 8)
ret i32 %call
-
-; CHECK-LABEL: @test1
-; CHECK: ldbrx [[LOAD1:[0-9]+]]
-; CHECK-NEXT: ldbrx [[LOAD2:[0-9]+]]
-; CHECK-NEXT: li [[LI:[0-9]+]], 1
-; CHECK-NEXT: cmpld [[CMPLD:[0-9]+]], [[LOAD1]], [[LOAD2]]
-; CHECK-NEXT: subf. [[SUB:[0-9]+]], [[LOAD2]], [[LOAD1]]
-; CHECK-NEXT: li [[LI2:[0-9]+]], -1
-; CHECK-NEXT: isel [[ISEL:[0-9]+]], [[LI2]], [[LI]], 4
-; CHECK-NEXT: isel [[ISEL2:[0-9]+]], 0, [[ISEL]], 2
-; CHECK-NEXT: extsw 3, [[ISEL2]]
-; CHECK-NEXT: blr
}
-; Check size 4
-; Function Attrs: nounwind readonly
-define signext i32 @test2(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) local_unnamed_addr #0 {
-entry:
- %0 = bitcast i32* %buffer1 to i8*
- %1 = bitcast i32* %buffer2 to i8*
- %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 4) #2
+define signext i32 @memcmp4(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+; CHECK-LABEL: memcmp4:
+; CHECK: # BB#0:
+; CHECK-NEXT: lwbrx 3, 0, 3
+; CHECK-NEXT: lwbrx 4, 0, 4
+; CHECK-NEXT: li 5, 1
+; CHECK-NEXT: li 12, -1
+; CHECK-NEXT: cmplw 3, 4
+; CHECK-NEXT: isel 3, 12, 5, 0
+; CHECK-NEXT: isel 3, 0, 3, 2
+; CHECK-NEXT: blr
+ %t0 = bitcast i32* %buffer1 to i8*
+ %t1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 4)
ret i32 %call
-
-; CHECK-LABEL: @test2
-; CHECK: lwbrx [[LOAD1:[0-9]+]]
-; CHECK-NEXT: lwbrx [[LOAD2:[0-9]+]]
-; CHECK-NEXT: li [[LI:[0-9]+]], 1
-; CHECK-NEXT: cmpld [[CMPLD:[0-9]+]], [[LOAD1]], [[LOAD2]]
-; CHECK-NEXT: subf. [[SUB:[0-9]+]], [[LOAD2]], [[LOAD1]]
-; CHECK-NEXT: li [[LI2:[0-9]+]], -1
-; CHECK-NEXT: isel [[ISEL:[0-9]+]], [[LI2]], [[LI]], 4
-; CHECK-NEXT: isel [[ISEL2:[0-9]+]], 0, [[ISEL]], 2
-; CHECK-NEXT: extsw 3, [[ISEL2]]
-; CHECK-NEXT: blr
}
-; Check size 2
-; Function Attrs: nounwind readonly
-define signext i32 @test3(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) local_unnamed_addr #0 {
-entry:
- %0 = bitcast i32* %buffer1 to i8*
- %1 = bitcast i32* %buffer2 to i8*
- %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 2) #2
+define signext i32 @memcmp2(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+; CHECK-LABEL: memcmp2:
+; CHECK: # BB#0:
+; CHECK-NEXT: lhbrx 3, 0, 3
+; CHECK-NEXT: lhbrx 4, 0, 4
+; CHECK-NEXT: li 5, 1
+; CHECK-NEXT: li 12, -1
+; CHECK-NEXT: cmplw 3, 4
+; CHECK-NEXT: isel 3, 12, 5, 0
+; CHECK-NEXT: isel 3, 0, 3, 2
+; CHECK-NEXT: blr
+ %t0 = bitcast i32* %buffer1 to i8*
+ %t1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 2)
ret i32 %call
-
-; CHECK-LABEL: @test3
-; CHECK: lhbrx [[LOAD1:[0-9]+]]
-; CHECK-NEXT: lhbrx [[LOAD2:[0-9]+]]
-; CHECK-NEXT: li [[LI:[0-9]+]], 1
-; CHECK-NEXT: cmpld [[CMPLD:[0-9]+]], [[LOAD1]], [[LOAD2]]
-; CHECK-NEXT: subf. [[SUB:[0-9]+]], [[LOAD2]], [[LOAD1]]
-; CHECK-NEXT: li [[LI2:[0-9]+]], -1
-; CHECK-NEXT: isel [[ISEL:[0-9]+]], [[LI2]], [[LI]], 4
-; CHECK-NEXT: isel [[ISEL2:[0-9]+]], 0, [[ISEL]], 2
-; CHECK-NEXT: extsw 3, [[ISEL2]]
-; CHECK-NEXT: blr
}
-; Check size 1
-; Function Attrs: nounwind readonly
-define signext i32 @test4(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) local_unnamed_addr #0 {
-entry:
- %0 = bitcast i32* %buffer1 to i8*
- %1 = bitcast i32* %buffer2 to i8*
- %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 1) #2
+define signext i32 @memcmp1(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+; CHECK-LABEL: memcmp1:
+; CHECK: # BB#0:
+; CHECK-NEXT: lbz 3, 0(3)
+; CHECK-NEXT: lbz 4, 0(4)
+; CHECK-NEXT: li 5, 1
+; CHECK-NEXT: li 12, -1
+; CHECK-NEXT: cmplw 3, 4
+; CHECK-NEXT: isel 3, 12, 5, 0
+; CHECK-NEXT: isel 3, 0, 3, 2
+; CHECK-NEXT: blr
+ %t0 = bitcast i32* %buffer1 to i8*
+ %t1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 1) #2
ret i32 %call
-
-; CHECK-LABEL: @test4
-; CHECK: lbz [[LOAD1:[0-9]+]]
-; CHECK-NEXT: lbz [[LOAD2:[0-9]+]]
-; CHECK-NEXT: subf [[SUB:[0-9]+]], [[LOAD2]], [[LOAD1]]
-; CHECK-NEXT: extsw 3, [[SUB]]
-; CHECK-NEXT: blr
}
-; Function Attrs: nounwind readonly
-declare signext i32 @memcmp(i8*, i8*, i64) #1
+declare signext i32 @memcmp(i8*, i8*, i64)
diff --git a/test/CodeGen/PowerPC/memcmpIR.ll b/test/CodeGen/PowerPC/memcmpIR.ll
index f052cc258df8..55f48ad19a63 100644
--- a/test/CodeGen/PowerPC/memcmpIR.ll
+++ b/test/CodeGen/PowerPC/memcmpIR.ll
@@ -3,48 +3,47 @@
define signext i32 @test1(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
entry:
+ ; CHECK-LABEL: @test1(
; CHECK: [[LOAD1:%[0-9]+]] = load i64, i64*
; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64*
; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD1]])
; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD2]])
- ; CHECK-NEXT: [[SUB:%[0-9]+]] = sub i64 [[BSWAP1]], [[BSWAP2]]
- ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %loadbb1, label %res_block
; CHECK-LABEL: res_block:{{.*}}
; CHECK: [[ICMP2:%[0-9]+]] = icmp ult i64
; CHECK-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
; CHECK-NEXT: br label %endblock
+ ; CHECK-LABEL: loadbb1:{{.*}}
; CHECK: [[GEP1:%[0-9]+]] = getelementptr i64, i64* {{.*}}, i64 1
; CHECK-NEXT: [[GEP2:%[0-9]+]] = getelementptr i64, i64* {{.*}}, i64 1
; CHECK-NEXT: [[LOAD1:%[0-9]+]] = load i64, i64* [[GEP1]]
; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64* [[GEP2]]
; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD1]])
; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD2]])
- ; CHECK-NEXT: [[SUB:%[0-9]+]] = sub i64 [[BSWAP1]], [[BSWAP2]]
- ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-NEXT: br i1 [[ICMP]], label %res_block, label %endblock
-
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %endblock, label %res_block
+ ; CHECK-BE-LABEL: @test1(
; CHECK-BE: [[LOAD1:%[0-9]+]] = load i64, i64*
; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64*
- ; CHECK-BE-NEXT: [[SUB:%[0-9]+]] = sub i64 [[LOAD1]], [[LOAD2]]
- ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-BE-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %loadbb1, label %res_block
; CHECK-BE-LABEL: res_block:{{.*}}
; CHECK-BE: [[ICMP2:%[0-9]+]] = icmp ult i64
; CHECK-BE-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
; CHECK-BE-NEXT: br label %endblock
+ ; CHECK-BE-LABEL: loadbb1:{{.*}}
; CHECK-BE: [[GEP1:%[0-9]+]] = getelementptr i64, i64* {{.*}}, i64 1
; CHECK-BE-NEXT: [[GEP2:%[0-9]+]] = getelementptr i64, i64* {{.*}}, i64 1
; CHECK-BE-NEXT: [[LOAD1:%[0-9]+]] = load i64, i64* [[GEP1]]
; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64* [[GEP2]]
- ; CHECK-BE-NEXT: [[SUB:%[0-9]+]] = sub i64 [[LOAD1]], [[LOAD2]]
- ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-BE-NEXT: br i1 [[ICMP]], label %res_block, label %endblock
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %endblock, label %res_block
%0 = bitcast i32* %buffer1 to i8*
%1 = bitcast i32* %buffer2 to i8*
@@ -55,33 +54,25 @@ entry:
declare signext i32 @memcmp(i8* nocapture, i8* nocapture, i64) local_unnamed_addr #1
define signext i32 @test2(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+ ; CHECK-LABEL: @test2(
; CHECK: [[LOAD1:%[0-9]+]] = load i32, i32*
; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i32, i32*
; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i32 @llvm.bswap.i32(i32 [[LOAD1]])
; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i32 @llvm.bswap.i32(i32 [[LOAD2]])
- ; CHECK-NEXT: [[ZEXT1:%[0-9]+]] = zext i32 [[BSWAP1]] to i64
- ; CHECK-NEXT: [[ZEXT2:%[0-9]+]] = zext i32 [[BSWAP2]] to i64
- ; CHECK-NEXT: [[SUB:%[0-9]+]] = sub i64 [[ZEXT1]], [[ZEXT2]]
- ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-NEXT: br i1 [[ICMP]], label %res_block, label %endblock
-
- ; CHECK-LABEL: res_block:{{.*}}
- ; CHECK: [[ICMP2:%[0-9]+]] = icmp ult i64
- ; CHECK-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
- ; CHECK-NEXT: br label %endblock
+ ; CHECK-NEXT: [[CMP1:%[0-9]+]] = icmp ne i32 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: [[CMP2:%[0-9]+]] = icmp ult i32 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: [[SELECT1:%[0-9]+]] = select i1 [[CMP2]], i32 -1, i32 1
+ ; CHECK-NEXT: [[SELECT2:%[0-9]+]] = select i1 [[CMP1]], i32 [[SELECT1]], i32 0
+ ; CHECK-NEXT: ret i32 [[SELECT2]]
+ ; CHECK-BE-LABEL: @test2(
; CHECK-BE: [[LOAD1:%[0-9]+]] = load i32, i32*
; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i32, i32*
- ; CHECK-BE-NEXT: [[ZEXT1:%[0-9]+]] = zext i32 [[LOAD1]] to i64
- ; CHECK-BE-NEXT: [[ZEXT2:%[0-9]+]] = zext i32 [[LOAD2]] to i64
- ; CHECK-BE-NEXT: [[SUB:%[0-9]+]] = sub i64 [[ZEXT1]], [[ZEXT2]]
- ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-BE-NEXT: br i1 [[ICMP]], label %res_block, label %endblock
-
- ; CHECK-BE-LABEL: res_block:{{.*}}
- ; CHECK-BE: [[ICMP2:%[0-9]+]] = icmp ult i64
- ; CHECK-BE-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
- ; CHECK-BE-NEXT: br label %endblock
+ ; CHECK-BE-NEXT: [[CMP1:%[0-9]+]] = icmp ne i32 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: [[CMP2:%[0-9]+]] = icmp ult i32 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: [[SELECT1:%[0-9]+]] = select i1 [[CMP2]], i32 -1, i32 1
+ ; CHECK-BE-NEXT: [[SELECT2:%[0-9]+]] = select i1 [[CMP1]], i32 [[SELECT1]], i32 0
+ ; CHECK-BE-NEXT: ret i32 [[SELECT2]]
entry:
%0 = bitcast i32* %buffer1 to i8*
@@ -95,35 +86,35 @@ define signext i32 @test3(i32* nocapture readonly %buffer1, i32* nocapture reado
; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64*
; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD1]])
; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD2]])
- ; CHECK-NEXT: [[SUB:%[0-9]+]] = sub i64 [[BSWAP1]], [[BSWAP2]]
- ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %loadbb1, label %res_block
; CHECK-LABEL: res_block:{{.*}}
; CHECK: [[ICMP2:%[0-9]+]] = icmp ult i64
; CHECK-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
; CHECK-NEXT: br label %endblock
+ ; CHECK-LABEL: loadbb1:{{.*}}
; CHECK: [[LOAD1:%[0-9]+]] = load i32, i32*
; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i32, i32*
; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i32 @llvm.bswap.i32(i32 [[LOAD1]])
; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i32 @llvm.bswap.i32(i32 [[LOAD2]])
; CHECK-NEXT: [[ZEXT1:%[0-9]+]] = zext i32 [[BSWAP1]] to i64
; CHECK-NEXT: [[ZEXT2:%[0-9]+]] = zext i32 [[BSWAP2]] to i64
- ; CHECK-NEXT: [[SUB:%[0-9]+]] = sub i64 [[ZEXT1]], [[ZEXT2]]
- ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[ZEXT1]], [[ZEXT2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %loadbb2, label %res_block
+ ; CHECK-LABEL: loadbb2:{{.*}}
; CHECK: [[LOAD1:%[0-9]+]] = load i16, i16*
; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i16, i16*
; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i16 @llvm.bswap.i16(i16 [[LOAD1]])
; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i16 @llvm.bswap.i16(i16 [[LOAD2]])
; CHECK-NEXT: [[ZEXT1:%[0-9]+]] = zext i16 [[BSWAP1]] to i64
; CHECK-NEXT: [[ZEXT2:%[0-9]+]] = zext i16 [[BSWAP2]] to i64
- ; CHECK-NEXT: [[SUB:%[0-9]+]] = sub i64 [[ZEXT1]], [[ZEXT2]]
- ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[ZEXT1]], [[ZEXT2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %loadbb3, label %res_block
+ ; CHECK-LABEL: loadbb3:{{.*}}
; CHECK: [[LOAD1:%[0-9]+]] = load i8, i8*
; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i8, i8*
; CHECK-NEXT: [[ZEXT1:%[0-9]+]] = zext i8 [[LOAD1]] to i32
@@ -133,9 +124,8 @@ define signext i32 @test3(i32* nocapture readonly %buffer1, i32* nocapture reado
; CHECK-BE: [[LOAD1:%[0-9]+]] = load i64, i64*
; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64*
- ; CHECK-BE-NEXT: [[SUB:%[0-9]+]] = sub i64 [[LOAD1]], [[LOAD2]]
- ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-BE-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %loadbb1, label %res_block
; CHECK-BE-LABEL: res_block:{{.*}}
; CHECK-BE: [[ICMP2:%[0-9]+]] = icmp ult i64
@@ -146,17 +136,15 @@ define signext i32 @test3(i32* nocapture readonly %buffer1, i32* nocapture reado
; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i32, i32*
; CHECK-BE-NEXT: [[ZEXT1:%[0-9]+]] = zext i32 [[LOAD1]] to i64
; CHECK-BE-NEXT: [[ZEXT2:%[0-9]+]] = zext i32 [[LOAD2]] to i64
- ; CHECK-BE-NEXT: [[SUB:%[0-9]+]] = sub i64 [[ZEXT1]], [[ZEXT2]]
- ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-BE-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[ZEXT1]], [[ZEXT2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %loadbb2, label %res_block
; CHECK-BE: [[LOAD1:%[0-9]+]] = load i16, i16*
; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i16, i16*
; CHECK-BE-NEXT: [[ZEXT1:%[0-9]+]] = zext i16 [[LOAD1]] to i64
; CHECK-BE-NEXT: [[ZEXT2:%[0-9]+]] = zext i16 [[LOAD2]] to i64
- ; CHECK-BE-NEXT: [[SUB:%[0-9]+]] = sub i64 [[ZEXT1]], [[ZEXT2]]
- ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp ne i64 [[SUB]], 0
- ; CHECK-BE-NEXT: br i1 [[ICMP]], label %res_block, label
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[ZEXT1]], [[ZEXT2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %loadbb3, label %res_block
; CHECK-BE: [[LOAD1:%[0-9]+]] = load i8, i8*
; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i8, i8*
diff --git a/test/CodeGen/PowerPC/merge_stores_dereferenceable.ll b/test/CodeGen/PowerPC/merge_stores_dereferenceable.ll
new file mode 100644
index 000000000000..29aee7a3825f
--- /dev/null
+++ b/test/CodeGen/PowerPC/merge_stores_dereferenceable.ll
@@ -0,0 +1,24 @@
+; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+
+; This code causes an assertion failure if dereferenceable flag is not properly set when in merging consecutive stores
+; CHECK-LABEL: func:
+; CHECK: lxvd2x [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
+; CHECK-NOT: lxvd2x
+; CHECK: stxvd2x [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
+
+define <2 x i64> @func(i64* %pdst) {
+entry:
+ %a = alloca [4 x i64], align 8
+ %psrc0 = bitcast [4 x i64]* %a to i64*
+ %psrc1 = getelementptr inbounds i64, i64* %psrc0, i64 1
+ %d0 = load i64, i64* %psrc0
+ %d1 = load i64, i64* %psrc1
+ %pdst0 = getelementptr inbounds i64, i64* %pdst, i64 0
+ %pdst1 = getelementptr inbounds i64, i64* %pdst, i64 1
+ store i64 %d0, i64* %pdst0, align 8
+ store i64 %d1, i64* %pdst1, align 8
+ %psrcd = bitcast [4 x i64]* %a to <2 x i64>*
+ %vec = load <2 x i64>, <2 x i64>* %psrcd
+ ret <2 x i64> %vec
+}
+
diff --git a/test/CodeGen/PowerPC/ppc64-align-long-double.ll b/test/CodeGen/PowerPC/ppc64-align-long-double.ll
index d59dc64dcf85..ba56dbaa83d0 100644
--- a/test/CodeGen/PowerPC/ppc64-align-long-double.ll
+++ b/test/CodeGen/PowerPC/ppc64-align-long-double.ll
@@ -1,6 +1,6 @@
-; RUN: llc -verify-machineinstrs -mcpu=pwr7 -O0 -fast-isel=false -mattr=-vsx < %s | FileCheck %s
-; RUN: llc -verify-machineinstrs -mcpu=pwr7 -O0 -fast-isel=false -mattr=+vsx < %s | FileCheck -check-prefix=CHECK-VSX %s
-; RUN: llc -verify-machineinstrs -mcpu=pwr9 -O0 -fast-isel=false -mattr=+vsx < %s | FileCheck -check-prefix=CHECK-P9 %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -O2 -fast-isel=false -mattr=-vsx < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -O2 -fast-isel=false -mattr=+vsx < %s | FileCheck -check-prefix=CHECK-VSX %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr9 -O2 -fast-isel=false -mattr=+vsx < %s | FileCheck -check-prefix=CHECK-P9 %s
; Verify internal alignment of long double in a struct. The double
; argument comes in in GPR3; GPR4 is skipped; GPRs 5 and 6 contain
@@ -44,9 +44,9 @@ entry:
; CHECK-VSX-DAG: std 3, 48(1)
; CHECK-VSX-DAG: std 5, -16(1)
; CHECK-VSX-DAG: std 6, -8(1)
-; CHECK-VSX: addi 3, 1, -16
-; CHECK-VSX: lxsdx 1, 0, 3
-; CHECK-VSX: addi 3, 1, -8
+; CHECK-VSX-DAG: addi [[REG1:[0-9]+]], 1, -16
+; CHECK-VSX-DAG: addi 3, 1, -8
+; CHECK-VSX: lxsdx 1, 0, [[REG1]]
; CHECK-VSX: lxsdx 2, 0, 3
; FIXME-VSX: addi 4, 1, 48
@@ -54,9 +54,9 @@ entry:
; FIXME-VSX: li 3, 24
; FIXME-VSX: lxsdx 2, 4, 3
-; CHECK-P9: std 6, 72(1)
-; CHECK-P9: std 5, 64(1)
-; CHECK-P9: std 4, 56(1)
-; CHECK-P9: std 3, 48(1)
-; CHECK-P9: mtvsrd 1, 5
-; CHECK-P9: mtvsrd 2, 6
+; CHECK-P9-DAG: std 6, 72(1)
+; CHECK-P9-DAG: std 5, 64(1)
+; CHECK-P9-DAG: std 4, 56(1)
+; CHECK-P9-DAG: std 3, 48(1)
+; CHECK-P9-DAG: mtvsrd 1, 5
+; CHECK-P9-DAG: mtvsrd 2, 6
diff --git a/test/CodeGen/PowerPC/tls.ll b/test/CodeGen/PowerPC/tls.ll
index 55df71b53761..63f498c1662c 100644
--- a/test/CodeGen/PowerPC/tls.ll
+++ b/test/CodeGen/PowerPC/tls.ll
@@ -11,8 +11,8 @@ target triple = "powerpc64-unknown-linux-gnu"
define i32 @localexec() nounwind {
entry:
;OPT0: addis [[REG1:[0-9]+]], 13, a@tprel@ha
-;OPT0-NEXT: li [[REG2:[0-9]+]], 42
;OPT0-NEXT: addi [[REG1]], [[REG1]], a@tprel@l
+;OPT0-NEXT: li [[REG2:[0-9]+]], 42
;OPT0: stw [[REG2]], 0([[REG1]])
;OPT1: addis [[REG1:[0-9]+]], 13, a@tprel@ha
;OPT1-NEXT: li [[REG2:[0-9]+]], 42
diff --git a/test/CodeGen/PowerPC/tls_get_addr_fence1.mir b/test/CodeGen/PowerPC/tls_get_addr_fence1.mir
new file mode 100644
index 000000000000..fa8e73e321dd
--- /dev/null
+++ b/test/CodeGen/PowerPC/tls_get_addr_fence1.mir
@@ -0,0 +1,66 @@
+# ADJCALLSTACKDOWN and ADJCALLSTACKUP must be generated around TLS pseudo code as scheduling fence (PR25839).
+# RUN: llc -mtriple=powerpc64le-linux-gnu -run-pass=ppc-tls-dynamic-call -verify-machineinstrs -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "e-m:e-i64:64-n32:64"
+ target triple = "powerpc64le-unknown-linux-gnu"
+
+ @tls_var = external thread_local local_unnamed_addr global i32
+
+ define i32 @tls_func() local_unnamed_addr {
+ entry:
+ %0 = load i32, i32* @tls_var
+ ret i32 %0
+ }
+
+...
+---
+name: tls_func
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: g8rc_and_g8rc_nox0, preferred-register: '' }
+ - { id: 1, class: g8rc_and_g8rc_nox0, preferred-register: '' }
+ - { id: 2, class: g8rc, preferred-register: '' }
+liveins:
+ - { reg: '%x2' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0.entry:
+ liveins: %x2
+ %0 = ADDIStlsgdHA %x2, @tls_var
+ %1 = ADDItlsgdLADDR killed %0, @tls_var, @tls_var, implicit-def dead %x0, implicit-def dead %x3, implicit-def dead %x4, implicit-def dead %x5, implicit-def dead %x6, implicit-def dead %x7, implicit-def dead %x8, implicit-def dead %x9, implicit-def dead %x10, implicit-def dead %x11, implicit-def dead %x12, implicit-def dead %lr8, implicit-def dead %ctr8, implicit-def dead %cr0, implicit-def dead %cr1, implicit-def dead %cr5, implicit-def dead %cr6, implicit-def dead %cr7
+ %2 = LWZ8 0, killed %1 :: (dereferenceable load 4 from @tls_var)
+ %x3 = COPY %2
+ BLR8 implicit %lr8, implicit %rm, implicit %x3
+ ; CHECK-LABEL: bb.0.entry
+ ; CHECK: %[[reg1:[0-9]+]] = ADDIStlsgdHA %x2, @tls_var
+ ; CHECK: ADJCALLSTACKDOWN 0, 0
+ ; CHECK: %x3 = ADDItlsgdL %[[reg1]], @tls_var
+ ; CHECK: %x3 = GETtlsADDR %x3, @tls_var
+ ; CHECK: ADJCALLSTACKUP 0, 0
+ ; CHECK: BLR8
+...
diff --git a/test/CodeGen/PowerPC/tls_get_addr_fence2.mir b/test/CodeGen/PowerPC/tls_get_addr_fence2.mir
new file mode 100644
index 000000000000..2bb88147fcf4
--- /dev/null
+++ b/test/CodeGen/PowerPC/tls_get_addr_fence2.mir
@@ -0,0 +1,65 @@
+# ADJCALLSTACKDOWN and ADJCALLSTACKUP should not be generated around TLS pseudo code if it is located within existing ADJCALLSTACKDOWN/ADJCALLSTACKUP pair.
+# RUN: llc -mtriple=powerpc64le-linux-gnu -run-pass=ppc-tls-dynamic-call -verify-machineinstrs -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "e-m:e-i64:64-n32:64"
+ target triple = "powerpc64le-unknown-linux-gnu"
+
+ @tls_var = external thread_local local_unnamed_addr global i32
+
+ define i32 @tls_func() local_unnamed_addr {
+ entry:
+ %0 = load i32, i32* @tls_var
+ ret i32 %0
+ }
+
+...
+---
+name: tls_func
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: g8rc_and_g8rc_nox0, preferred-register: '' }
+ - { id: 1, class: g8rc_and_g8rc_nox0, preferred-register: '' }
+ - { id: 2, class: g8rc, preferred-register: '' }
+liveins:
+ - { reg: '%x2' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 4294967295
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ savePoint: ''
+ restorePoint: ''
+fixedStack:
+stack:
+constants:
+body: |
+ bb.0.entry:
+ liveins: %x2
+ ADJCALLSTACKDOWN 32, 0, implicit-def %r1, implicit %r1
+ %0 = ADDIStlsgdHA %x2, @tls_var
+ %1 = ADDItlsgdLADDR killed %0, @tls_var, @tls_var, implicit-def dead %x0, implicit-def dead %x3, implicit-def dead %x4, implicit-def dead %x5, implicit-def dead %x6, implicit-def dead %x7, implicit-def dead %x8, implicit-def dead %x9, implicit-def dead %x10, implicit-def dead %x11, implicit-def dead %x12, implicit-def dead %lr8, implicit-def dead %ctr8, implicit-def dead %cr0, implicit-def dead %cr1, implicit-def dead %cr5, implicit-def dead %cr6, implicit-def dead %cr7
+ %2 = LWZ8 0, killed %1 :: (dereferenceable load 4 from @tls_var)
+ %x3 = COPY %2
+ ADJCALLSTACKUP 32, 0, implicit-def %r1, implicit %r1
+ BLR8 implicit %lr8, implicit %rm, implicit %x3
+ ; CHECK-LABEL: bb.0.entry
+ ; CHECK-NOT: ADJCALLSTACKDOWN 0, 0
+ ; CHECK-NOT: ADJCALLSTACKUP 0, 0
+ ; CHECK: BLR8
+...
diff --git a/test/CodeGen/Thumb/long-setcc.ll b/test/CodeGen/Thumb/long-setcc.ll
index 3460edb96f0d..7db06d0ae35e 100644
--- a/test/CodeGen/Thumb/long-setcc.ll
+++ b/test/CodeGen/Thumb/long-setcc.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi < %s | FileCheck %s
define i1 @t1(i64 %x) {
%B = icmp slt i64 %x, 0
diff --git a/test/CodeGen/Thumb2/constant-islands-new-island.ll b/test/CodeGen/Thumb2/constant-islands-new-island.ll
index 8ed657ef1f2a..de7b0cce3792 100644
--- a/test/CodeGen/Thumb2/constant-islands-new-island.ll
+++ b/test/CodeGen/Thumb2/constant-islands-new-island.ll
@@ -1,25 +1,25 @@
; RUN: llc < %s -mtriple=thumbv7-linux-gnueabihf %s -o - | FileCheck %s
-; Check that new water is created by splitting the basic block right after the
+; Check that new water is created by splitting the basic block after the
; load instruction. Previously, new water was created before the load
; instruction, which caused the pass to fail to converge.
define void @test(i1 %tst) {
; CHECK-LABEL: test:
; CHECK: vldr {{s[0-9]+}}, [[CONST:\.LCPI[0-9]+_[0-9]+]]
-; CHECK-NEXT: b.w [[CONTINUE:\.LBB[0-9]+_[0-9]+]]
+; CHECK: b.w [[CONTINUE:\.LBB[0-9]+_[0-9]+]]
; CHECK: [[CONST]]:
; CHECK-NEXT: .long
; CHECK: [[CONTINUE]]:
entry:
- call i32 @llvm.arm.space(i32 2000, i32 undef)
br i1 %tst, label %true, label %false
true:
%val = phi float [12345.0, %entry], [undef, %false]
+ call i32 @llvm.arm.space(i32 2000, i32 undef)
call void @bar(float %val)
ret void
diff --git a/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll b/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll
new file mode 100644
index 000000000000..9fcc0f5d617b
--- /dev/null
+++ b/test/CodeGen/Thumb2/ifcvt-no-branch-predictor.ll
@@ -0,0 +1,154 @@
+; RUN: llc < %s -mtriple=thumbv7m -mcpu=cortex-m7 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BP
+; RUN: llc < %s -mtriple=thumbv7m -mcpu=cortex-m3 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-NOBP
+
+declare void @otherfn()
+
+; CHECK-LABEL: triangle1:
+; CHECK: itt ne
+; CHECK: movne
+; CHECK: strne
+define i32 @triangle1(i32 %n, i32* %p) {
+entry:
+ %tobool = icmp eq i32 %n, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ store i32 1, i32* %p, align 4
+ br label %if.end
+
+if.end:
+ tail call void @otherfn()
+ ret i32 0
+}
+
+; CHECK-LABEL: triangle2:
+; CHECK-BP: itttt ne
+; CHECK-BP: movne
+; CHECK-BP: strne
+; CHECK-BP: movne
+; CHECK-BP: strne
+; CHECK-NOBP: cbz
+; CHECK-NOBP: movs
+; CHECK-NOBP: str
+; CHECK-NOBP: movs
+; CHECK-NOBP: str
+define i32 @triangle2(i32 %n, i32* %p, i32* %q) {
+entry:
+ %tobool = icmp eq i32 %n, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ store i32 1, i32* %p, align 4
+ store i32 2, i32* %q, align 4
+ br label %if.end
+
+if.end:
+ tail call void @otherfn()
+ ret i32 0
+}
+
+; CHECK-LABEL: triangle3:
+; CHECK: cbz
+; CHECK: movs
+; CHECK: str
+; CHECK: movs
+; CHECK: str
+; CHECK: movs
+; CHECK: str
+define i32 @triangle3(i32 %n, i32* %p, i32* %q, i32* %r) {
+entry:
+ %tobool = icmp eq i32 %n, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ store i32 1, i32* %p, align 4
+ store i32 2, i32* %q, align 4
+ store i32 3, i32* %r, align 4
+ br label %if.end
+
+if.end:
+ tail call void @otherfn()
+ ret i32 0
+}
+
+; CHECK-LABEL: diamond1:
+; CHECK: ite eq
+; CHECK: ldreq
+; CHECK: strne
+define i32 @diamond1(i32 %n, i32* %p) {
+entry:
+ %tobool = icmp eq i32 %n, 0
+ br i1 %tobool, label %if.else, label %if.then
+
+if.then:
+ store i32 %n, i32* %p, align 4
+ br label %if.end
+
+if.else:
+ %0 = load i32, i32* %p, align 4
+ br label %if.end
+
+if.end:
+ %n.addr.0 = phi i32 [ %n, %if.then ], [ %0, %if.else ]
+ tail call void @otherfn()
+ ret i32 %n.addr.0
+}
+
+; CHECK-LABEL: diamond2:
+; CHECK-BP: itte
+; CHECK-BP: streq
+; CHECK-BP: ldreq
+; CHECK-BP: strne
+; CHECK-NOBP: cbz
+; CHECK-NOBP: str
+; CHECK-NOBP: b
+; CHECK-NOBP: str
+; CHECK-NOBP: ldr
+define i32 @diamond2(i32 %n, i32 %m, i32* %p, i32* %q) {
+entry:
+ %tobool = icmp eq i32 %n, 0
+ br i1 %tobool, label %if.else, label %if.then
+
+if.then:
+ store i32 %n, i32* %p, align 4
+ br label %if.end
+
+if.else:
+ store i32 %m, i32* %q, align 4
+ %0 = load i32, i32* %p, align 4
+ br label %if.end
+
+if.end:
+ %n.addr.0 = phi i32 [ %n, %if.then ], [ %0, %if.else ]
+ tail call void @otherfn()
+ ret i32 %n.addr.0
+}
+
+; CHECK-LABEL: diamond3:
+; CHECK: cbz
+; CHECK: movs
+; CHECK: str
+; CHECK: b
+; CHECK: ldr
+; CHECK: ldr
+; CHECK: adds
+define i32 @diamond3(i32 %n, i32* %p, i32* %q) {
+entry:
+ %tobool = icmp eq i32 %n, 0
+ br i1 %tobool, label %if.else, label %if.then
+
+if.then:
+ store i32 1, i32* %p, align 4
+ br label %if.end
+
+if.else:
+ %0 = load i32, i32* %p, align 4
+ %1 = load i32, i32* %q, align 4
+ %add = add nsw i32 %1, %0
+ br label %if.end
+
+if.end:
+ %n.addr.0 = phi i32 [ %n, %if.then ], [ %add, %if.else ]
+ tail call void @otherfn()
+ ret i32 %n.addr.0
+}
diff --git a/test/CodeGen/Thumb2/thumb2-ifcvt2.ll b/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
index 4a76e100b658..3c74dde11148 100644
--- a/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
+++ b/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
@@ -35,9 +35,6 @@ entry:
; CHECK: cmp
; CHECK: it eq
; CHECK: cmpeq
-; CHECK: itt eq
-; CHECK: moveq
-; CHECK: popeq
br label %tailrecurse
tailrecurse: ; preds = %bb, %entry
diff --git a/test/CodeGen/WebAssembly/exception.ll b/test/CodeGen/WebAssembly/exception.ll
new file mode 100644
index 000000000000..eedb5c78b241
--- /dev/null
+++ b/test/CodeGen/WebAssembly/exception.ll
@@ -0,0 +1,22 @@
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+declare void @llvm.wasm.throw(i32, i8*)
+declare void @llvm.wasm.rethrow()
+
+; CHECK-LABEL: throw:
+; CHECK-NEXT: i32.const $push0=, 0
+; CHECK-NEXT: throw 0, $pop0
+define void @throw() {
+ call void @llvm.wasm.throw(i32 0, i8* null)
+ ret void
+}
+
+; CHECK-LABEL: rethrow:
+; CHECK-NEXT: rethrow 0
+define void @rethrow() {
+ call void @llvm.wasm.rethrow()
+ ret void
+}
diff --git a/test/CodeGen/X86/GlobalISel/and-scalar.ll b/test/CodeGen/X86/GlobalISel/and-scalar.ll
new file mode 100644
index 000000000000..b19321421087
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/and-scalar.ll
@@ -0,0 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL
+
+define i8 @test_and_i8(i8 %arg1, i8 %arg2) {
+; ALL-LABEL: test_and_i8:
+; ALL: # BB#0:
+; ALL-NEXT: andb %dil, %sil
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = and i8 %arg1, %arg2
+ ret i8 %ret
+}
+
+define i16 @test_and_i16(i16 %arg1, i16 %arg2) {
+; ALL-LABEL: test_and_i16:
+; ALL: # BB#0:
+; ALL-NEXT: andw %di, %si
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = and i16 %arg1, %arg2
+ ret i16 %ret
+}
+
+define i32 @test_and_i32(i32 %arg1, i32 %arg2) {
+; ALL-LABEL: test_and_i32:
+; ALL: # BB#0:
+; ALL-NEXT: andl %edi, %esi
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = and i32 %arg1, %arg2
+ ret i32 %ret
+}
+
+define i64 @test_and_i64(i64 %arg1, i64 %arg2) {
+; ALL-LABEL: test_and_i64:
+; ALL: # BB#0:
+; ALL-NEXT: andq %rdi, %rsi
+; ALL-NEXT: movq %rsi, %rax
+; ALL-NEXT: retq
+ %ret = and i64 %arg1, %arg2
+ ret i64 %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/fadd-scalar.ll b/test/CodeGen/X86/GlobalISel/fadd-scalar.ll
new file mode 100644
index 000000000000..6aee06a75f6a
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/fadd-scalar.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+define float @test_fadd_float(float %arg1, float %arg2) {
+; ALL-LABEL: test_fadd_float:
+; ALL: # BB#0:
+; ALL-NEXT: addss %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fadd float %arg1, %arg2
+ ret float %ret
+}
+
+define double @test_fadd_double(double %arg1, double %arg2) {
+; ALL-LABEL: test_fadd_double:
+; ALL: # BB#0:
+; ALL-NEXT: addsd %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fadd double %arg1, %arg2
+ ret double %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll b/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll
new file mode 100644
index 000000000000..268802dc06aa
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+define float @test_fdiv_float(float %arg1, float %arg2) {
+; ALL-LABEL: test_fdiv_float:
+; ALL: # BB#0:
+; ALL-NEXT: divss %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fdiv float %arg1, %arg2
+ ret float %ret
+}
+
+define double @test_fdiv_double(double %arg1, double %arg2) {
+; ALL-LABEL: test_fdiv_double:
+; ALL: # BB#0:
+; ALL-NEXT: divsd %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fdiv double %arg1, %arg2
+ ret double %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/fmul-scalar.ll b/test/CodeGen/X86/GlobalISel/fmul-scalar.ll
new file mode 100644
index 000000000000..c7a37a14c33c
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/fmul-scalar.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+define float @test_fmul_float(float %arg1, float %arg2) {
+; ALL-LABEL: test_fmul_float:
+; ALL: # BB#0:
+; ALL-NEXT: mulss %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fmul float %arg1, %arg2
+ ret float %ret
+}
+
+define double @test_fmul_double(double %arg1, double %arg2) {
+; ALL-LABEL: test_fmul_double:
+; ALL: # BB#0:
+; ALL-NEXT: mulsd %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fmul double %arg1, %arg2
+ ret double %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/fsub-scalar.ll b/test/CodeGen/X86/GlobalISel/fsub-scalar.ll
new file mode 100644
index 000000000000..32c25a3a0822
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/fsub-scalar.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+define float @test_fsub_float(float %arg1, float %arg2) {
+; ALL-LABEL: test_fsub_float:
+; ALL: # BB#0:
+; ALL-NEXT: subss %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fsub float %arg1, %arg2
+ ret float %ret
+}
+
+define double @test_fsub_double(double %arg1, double %arg2) {
+; ALL-LABEL: test_fsub_double:
+; ALL: # BB#0:
+; ALL-NEXT: subsd %xmm1, %xmm0
+; ALL-NEXT: retq
+ %ret = fsub double %arg1, %arg2
+ ret double %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir
new file mode 100644
index 000000000000..b57db15d4646
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir
@@ -0,0 +1,124 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+
+--- |
+ define i8 @test_and_i8() {
+ %ret = and i8 undef, undef
+ ret i8 %ret
+ }
+
+ define i16 @test_and_i16() {
+ %ret = and i16 undef, undef
+ ret i16 %ret
+ }
+
+ define i32 @test_and_i32() {
+ %ret = and i32 undef, undef
+ ret i32 %ret
+ }
+
+ define i64 @test_and_i64() {
+ %ret = and i64 undef, undef
+ ret i64 %ret
+ }
+
+...
+---
+name: test_and_i8
+# CHECK-LABEL: name: test_and_i8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s8) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s8) = G_AND %0, %0
+# CHECK-NEXT: %al = COPY %1(s8)
+# CHECK-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ %0(s8) = IMPLICIT_DEF
+ %1(s8) = G_AND %0, %0
+ %al = COPY %1(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_and_i16
+# CHECK-LABEL: name: test_and_i16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s16) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s16) = G_AND %0, %0
+# CHECK-NEXT: %ax = COPY %1(s16)
+# CHECK-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s16) = IMPLICIT_DEF
+ %1(s16) = G_AND %0, %0
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_and_i32
+# CHECK-LABEL: name: test_and_i32
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s32) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s32) = G_AND %0, %0
+# CHECK-NEXT: %eax = COPY %1(s32)
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = IMPLICIT_DEF
+ %1(s32) = G_AND %0, %0
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_and_i64
+# CHECK-LABEL: name: test_and_i64
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s64) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s64) = G_AND %0, %0
+# CHECK-NEXT: %rax = COPY %1(s64)
+# CHECK-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s64) = IMPLICIT_DEF
+ %1(s64) = G_AND %0, %0
+ %rax = COPY %1(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir
new file mode 100644
index 000000000000..353a26ca2c8a
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir
@@ -0,0 +1,74 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+ define float @test_fadd_float(float %arg1, float %arg2) {
+ %ret = fadd float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fadd_double(double %arg1, double %arg2) {
+ %ret = fadd double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fadd_float
+# CHECK-LABEL: name: test_fadd_float
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s32) = COPY %xmm0
+# CHECK-NEXT: %1(s32) = COPY %xmm1
+# CHECK-NEXT: %2(s32) = G_FADD %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s32)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FADD %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fadd_double
+# CHECK-LABEL: name: test_fadd_double
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s64) = COPY %xmm0
+# CHECK-NEXT: %1(s64) = COPY %xmm1
+# CHECK-NEXT: %2(s64) = G_FADD %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s64)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FADD %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir
new file mode 100644
index 000000000000..102d95c6390c
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir
@@ -0,0 +1,74 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+ define float @test_fdiv_float(float %arg1, float %arg2) {
+ %ret = fdiv float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fdiv_double(double %arg1, double %arg2) {
+ %ret = fdiv double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fdiv_float
+# CHECK-LABEL: name: test_fdiv_float
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s32) = COPY %xmm0
+# CHECK-NEXT: %1(s32) = COPY %xmm1
+# CHECK-NEXT: %2(s32) = G_FDIV %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s32)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FDIV %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fdiv_double
+# CHECK-LABEL: name: test_fdiv_double
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s64) = COPY %xmm0
+# CHECK-NEXT: %1(s64) = COPY %xmm1
+# CHECK-NEXT: %2(s64) = G_FDIV %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s64)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FDIV %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir
new file mode 100644
index 000000000000..eeacbfcf07b2
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir
@@ -0,0 +1,74 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+ define float @test_fmul_float(float %arg1, float %arg2) {
+ %ret = fmul float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fmul_double(double %arg1, double %arg2) {
+ %ret = fmul double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fmul_float
+# CHECK-LABEL: name: test_fmul_float
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s32) = COPY %xmm0
+# CHECK-NEXT: %1(s32) = COPY %xmm1
+# CHECK-NEXT: %2(s32) = G_FMUL %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s32)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FMUL %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fmul_double
+# CHECK-LABEL: name: test_fmul_double
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s64) = COPY %xmm0
+# CHECK-NEXT: %1(s64) = COPY %xmm1
+# CHECK-NEXT: %2(s64) = G_FMUL %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s64)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FMUL %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir
new file mode 100644
index 000000000000..3b3ee4aa0afb
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir
@@ -0,0 +1,74 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+ define float @test_fsub_float(float %arg1, float %arg2) {
+ %ret = fsub float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fsub_double(double %arg1, double %arg2) {
+ %ret = fsub double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fsub_float
+# CHECK-LABEL: name: test_fsub_float
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s32) = COPY %xmm0
+# CHECK-NEXT: %1(s32) = COPY %xmm1
+# CHECK-NEXT: %2(s32) = G_FSUB %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s32)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FSUB %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fsub_double
+# CHECK-LABEL: name: test_fsub_double
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s64) = COPY %xmm0
+# CHECK-NEXT: %1(s64) = COPY %xmm1
+# CHECK-NEXT: %2(s64) = G_FSUB %0, %1
+# CHECK-NEXT: %xmm0 = COPY %2(s64)
+# CHECK-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FSUB %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir
new file mode 100644
index 000000000000..a014f56a3588
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir
@@ -0,0 +1,124 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+
+--- |
+ define i8 @test_or_i8() {
+ %ret = or i8 undef, undef
+ ret i8 %ret
+ }
+
+ define i16 @test_or_i16() {
+ %ret = or i16 undef, undef
+ ret i16 %ret
+ }
+
+ define i32 @test_or_i32() {
+ %ret = or i32 undef, undef
+ ret i32 %ret
+ }
+
+ define i64 @test_or_i64() {
+ %ret = or i64 undef, undef
+ ret i64 %ret
+ }
+
+...
+---
+name: test_or_i8
+# CHECK-LABEL: name: test_or_i8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s8) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s8) = G_OR %0, %0
+# CHECK-NEXT: %al = COPY %1(s8)
+# CHECK-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ %0(s8) = IMPLICIT_DEF
+ %1(s8) = G_OR %0, %0
+ %al = COPY %1(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_or_i16
+# CHECK-LABEL: name: test_or_i16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s16) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s16) = G_OR %0, %0
+# CHECK-NEXT: %ax = COPY %1(s16)
+# CHECK-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s16) = IMPLICIT_DEF
+ %1(s16) = G_OR %0, %0
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_or_i32
+# CHECK-LABEL: name: test_or_i32
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s32) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s32) = G_OR %0, %0
+# CHECK-NEXT: %eax = COPY %1(s32)
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = IMPLICIT_DEF
+ %1(s32) = G_OR %0, %0
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_or_i64
+# CHECK-LABEL: name: test_or_i64
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s64) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s64) = G_OR %0, %0
+# CHECK-NEXT: %rax = COPY %1(s64)
+# CHECK-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s64) = IMPLICIT_DEF
+ %1(s64) = G_OR %0, %0
+ %rax = COPY %1(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir b/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir
new file mode 100644
index 000000000000..e2af91283026
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir
@@ -0,0 +1,124 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=legalizer %s -o - | FileCheck %s
+
+--- |
+ define i8 @test_xor_i8() {
+ %ret = xor i8 undef, undef
+ ret i8 %ret
+ }
+
+ define i16 @test_xor_i16() {
+ %ret = xor i16 undef, undef
+ ret i16 %ret
+ }
+
+ define i32 @test_xor_i32() {
+ %ret = xor i32 undef, undef
+ ret i32 %ret
+ }
+
+ define i64 @test_xor_i64() {
+ %ret = xor i64 undef, undef
+ ret i64 %ret
+ }
+
+...
+---
+name: test_xor_i8
+# CHECK-LABEL: name: test_xor_i8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s8) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s8) = G_XOR %0, %0
+# CHECK-NEXT: %al = COPY %1(s8)
+# CHECK-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ %0(s8) = IMPLICIT_DEF
+ %1(s8) = G_XOR %0, %0
+ %al = COPY %1(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_xor_i16
+# CHECK-LABEL: name: test_xor_i16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s16) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s16) = G_XOR %0, %0
+# CHECK-NEXT: %ax = COPY %1(s16)
+# CHECK-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s16) = IMPLICIT_DEF
+ %1(s16) = G_XOR %0, %0
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_xor_i32
+# CHECK-LABEL: name: test_xor_i32
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s32) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s32) = G_XOR %0, %0
+# CHECK-NEXT: %eax = COPY %1(s32)
+# CHECK-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = IMPLICIT_DEF
+ %1(s32) = G_XOR %0, %0
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_xor_i64
+# CHECK-LABEL: name: test_xor_i64
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# CHECK: %0(s64) = IMPLICIT_DEF
+# CHECK-NEXT: %1(s64) = G_XOR %0, %0
+# CHECK-NEXT: %rax = COPY %1(s64)
+# CHECK-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ %0(s64) = IMPLICIT_DEF
+ %1(s64) = G_XOR %0, %0
+ %rax = COPY %1(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/or-scalar.ll b/test/CodeGen/X86/GlobalISel/or-scalar.ll
new file mode 100644
index 000000000000..b0371457f76e
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/or-scalar.ll
@@ -0,0 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL
+
+define i8 @test_or_i8(i8 %arg1, i8 %arg2) {
+; ALL-LABEL: test_or_i8:
+; ALL: # BB#0:
+; ALL-NEXT: orb %dil, %sil
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = or i8 %arg1, %arg2
+ ret i8 %ret
+}
+
+define i16 @test_or_i16(i16 %arg1, i16 %arg2) {
+; ALL-LABEL: test_or_i16:
+; ALL: # BB#0:
+; ALL-NEXT: orw %di, %si
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = or i16 %arg1, %arg2
+ ret i16 %ret
+}
+
+define i32 @test_or_i32(i32 %arg1, i32 %arg2) {
+; ALL-LABEL: test_or_i32:
+; ALL: # BB#0:
+; ALL-NEXT: orl %edi, %esi
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = or i32 %arg1, %arg2
+ ret i32 %ret
+}
+
+define i64 @test_or_i64(i64 %arg1, i64 %arg2) {
+; ALL-LABEL: test_or_i64:
+; ALL: # BB#0:
+; ALL-NEXT: orq %rdi, %rsi
+; ALL-NEXT: movq %rsi, %rax
+; ALL-NEXT: retq
+ %ret = or i64 %arg1, %arg2
+ ret i64 %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
index 7bcc57aef4ac..3658bc9af957 100644
--- a/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
+++ b/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
@@ -35,6 +35,25 @@
%ret = fadd double %arg1, %arg2
ret double %ret
}
+
+ define void @test_fsub_float() {
+ %ret1 = fsub float undef, undef
+ %ret2 = fsub double undef, undef
+ ret void
+ }
+
+ define void @test_fmul_float() {
+ %ret1 = fmul float undef, undef
+ %ret2 = fmul double undef, undef
+ ret void
+ }
+
+ define void @test_fdiv_float() {
+ %ret1 = fdiv float undef, undef
+ %ret2 = fdiv double undef, undef
+ ret void
+ }
+
define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
%ret = add <4 x i32> %arg1, %arg2
@@ -135,6 +154,26 @@
ret i1 %r
}
+ define i8 @test_xor_i8() {
+ %ret = xor i8 undef, undef
+ ret i8 %ret
+ }
+
+ define i16 @test_or_i16() {
+ %ret = or i16 undef, undef
+ ret i16 %ret
+ }
+
+ define i32 @test_and_i32() {
+ %ret = and i32 undef, undef
+ ret i32 %ret
+ }
+
+ define i64 @test_and_i64() {
+ %ret = and i64 undef, undef
+ ret i64 %ret
+ }
+
...
---
name: test_add_i8
@@ -338,6 +377,105 @@ body: |
...
---
+name: test_fsub_float
+# CHECK-LABEL: name: test_fsub_float
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 2, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 3, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 4, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 5, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 6, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 7, class: vecr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = IMPLICIT_DEF
+ %2(s64) = IMPLICIT_DEF
+ %1(s32) = G_FSUB %0, %0
+ %3(s64) = G_FSUB %2, %2
+ RET 0
+
+...
+---
+name: test_fmul_float
+# CHECK-LABEL: name: test_fmul_float
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 2, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 3, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 4, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 5, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 6, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 7, class: vecr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = IMPLICIT_DEF
+ %2(s64) = IMPLICIT_DEF
+ %1(s32) = G_FMUL %0, %0
+ %3(s64) = G_FMUL %2, %2
+ RET 0
+
+...
+---
+name: test_fdiv_float
+# CHECK-LABEL: name: test_fdiv_float
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 2, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 3, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 4, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 5, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 6, class: vecr, preferred-register: '' }
+# CHECK-NEXT: - { id: 7, class: vecr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+ - { id: 2, class: _, preferred-register: '' }
+ - { id: 3, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = IMPLICIT_DEF
+ %2(s64) = IMPLICIT_DEF
+ %1(s32) = G_FDIV %0, %0
+ %3(s64) = G_FDIV %2, %2
+ RET 0
+
+...
+---
name: test_add_v4i32
alignment: 4
legalized: true
@@ -850,3 +988,100 @@ body: |
RET 0, implicit %al
...
+---
+name: test_xor_i8
+# CHECK-LABEL: name: test_xor_i8
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: gpr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.1 (%ir-block.0):
+ %0(s8) = IMPLICIT_DEF
+ %1(s8) = G_XOR %0, %0
+ %al = COPY %1(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_or_i16
+# CHECK-LABEL: name: test_or_i16
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: gpr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.1 (%ir-block.0):
+ %0(s16) = IMPLICIT_DEF
+ %1(s16) = G_OR %0, %0
+ %ax = COPY %1(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_and_i32
+# CHECK-LABEL: name: test_and_i32
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: gpr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = IMPLICIT_DEF
+ %1(s32) = G_AND %0, %0
+ %eax = COPY %1(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_and_i64
+# CHECK-LABEL: name: test_and_i64
+alignment: 4
+legalized: true
+regBankSelected: false
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gpr, preferred-register: '' }
+# CHECK-NEXT: - { id: 1, class: gpr, preferred-register: '' }
+registers:
+ - { id: 0, class: _, preferred-register: '' }
+ - { id: 1, class: _, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+body: |
+ bb.1 (%ir-block.0):
+ %0(s64) = IMPLICIT_DEF
+ %1(s64) = G_AND %0, %0
+ %rax = COPY %1(s64)
+ RET 0, implicit %rax
+
+...
+
diff --git a/test/CodeGen/X86/GlobalISel/select-add.mir b/test/CodeGen/X86/GlobalISel/select-add.mir
index 78e6bb6913a4..45811c5cdc26 100644
--- a/test/CodeGen/X86/GlobalISel/select-add.mir
+++ b/test/CodeGen/X86/GlobalISel/select-add.mir
@@ -24,16 +24,6 @@
ret i8 %ret
}
- define float @test_add_float(float %arg1, float %arg2) {
- %ret = fadd float %arg1, %arg2
- ret float %ret
- }
-
- define double @test_add_double(double %arg1, double %arg2) {
- %ret = fadd double %arg1, %arg2
- ret double %ret
- }
-
define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
%ret = add <4 x i32> %arg1, %arg2
ret <4 x i32> %ret
@@ -157,76 +147,6 @@ body: |
...
---
-name: test_add_float
-# ALL-LABEL: name: test_add_float
-alignment: 4
-legalized: true
-regBankSelected: true
-selected: false
-tracksRegLiveness: true
-# ALL: registers:
-# NO_AVX512F-NEXT: - { id: 0, class: fr32, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 1, class: fr32, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 2, class: fr32, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 0, class: fr32x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 1, class: fr32x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 2, class: fr32x, preferred-register: '' }
-registers:
- - { id: 0, class: vecr }
- - { id: 1, class: vecr }
- - { id: 2, class: vecr }
-# ALL: %0 = COPY %xmm0
-# ALL-NEXT: %1 = COPY %xmm1
-# SSE-NEXT: %2 = ADDSSrr %0, %1
-# AVX-NEXT: %2 = VADDSSrr %0, %1
-# AVX512F-NEXT: %2 = VADDSSZrr %0, %1
-body: |
- bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
-
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
- %2(s32) = G_FADD %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
-
-...
----
-name: test_add_double
-# ALL-LABEL: name: test_add_double
-alignment: 4
-legalized: true
-regBankSelected: true
-selected: false
-tracksRegLiveness: true
-# ALL: registers:
-# NO_AVX512F-NEXT: - { id: 0, class: fr64, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 1, class: fr64, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 2, class: fr64, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 0, class: fr64x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 1, class: fr64x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 2, class: fr64x, preferred-register: '' }
-registers:
- - { id: 0, class: vecr }
- - { id: 1, class: vecr }
- - { id: 2, class: vecr }
-# ALL: %0 = COPY %xmm0
-# ALL-NEXT: %1 = COPY %xmm1
-# SSE-NEXT: %2 = ADDSDrr %0, %1
-# AVX-NEXT: %2 = VADDSDrr %0, %1
-# AVX512F-NEXT: %2 = VADDSDZrr %0, %1
-body: |
- bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
-
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
- %2(s64) = G_FADD %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
-
-...
----
name: test_add_v4i32
# ALL-LABEL: name: test_add_v4i32
alignment: 4
diff --git a/test/CodeGen/X86/GlobalISel/select-and-scalar.mir b/test/CodeGen/X86/GlobalISel/select-and-scalar.mir
new file mode 100644
index 000000000000..c40cc224d50e
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-and-scalar.mir
@@ -0,0 +1,160 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
+
+--- |
+ define i8 @test_and_i8(i8 %arg1, i8 %arg2) {
+ %ret = and i8 %arg1, %arg2
+ ret i8 %ret
+ }
+
+ define i16 @test_and_i16(i16 %arg1, i16 %arg2) {
+ %ret = and i16 %arg1, %arg2
+ ret i16 %ret
+ }
+
+ define i32 @test_and_i32(i32 %arg1, i32 %arg2) {
+ %ret = and i32 %arg1, %arg2
+ ret i32 %ret
+ }
+
+ define i64 @test_and_i64(i64 %arg1, i64 %arg2) {
+ %ret = and i64 %arg1, %arg2
+ ret i64 %ret
+ }
+
+...
+---
+name: test_and_i8
+# ALL-LABEL: name: test_and_i8
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr8, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %dil
+# ALL-NEXT: %1 = COPY %sil
+# ALL-NEXT: %2 = AND8rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %al = COPY %2
+# ALL-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s8) = COPY %edi
+ %1(s8) = COPY %esi
+ %2(s8) = G_AND %0, %1
+ %al = COPY %2(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_and_i16
+# ALL-LABEL: name: test_and_i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr16, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %di
+# ALL-NEXT: %1 = COPY %si
+# ALL-NEXT: %2 = AND16rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %ax = COPY %2
+# ALL-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s16) = COPY %edi
+ %1(s16) = COPY %esi
+ %2(s16) = G_AND %0, %1
+ %ax = COPY %2(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_and_i32
+# ALL-LABEL: name: test_and_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %edi
+# ALL-NEXT: %1 = COPY %esi
+# ALL-NEXT: %2 = AND32rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %eax = COPY %2
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s32) = G_AND %0, %1
+ %eax = COPY %2(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_and_i64
+# ALL-LABEL: name: test_and_i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr64, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %rdi
+# ALL-NEXT: %1 = COPY %rsi
+# ALL-NEXT: %2 = AND64rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %rax = COPY %2
+# ALL-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(s64) = COPY %rdi
+ %1(s64) = COPY %rsi
+ %2(s64) = G_AND %0, %1
+ %rax = COPY %2(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-constant.mir b/test/CodeGen/X86/GlobalISel/select-constant.mir
index 7902a5084ce6..4b91b5f9f098 100644
--- a/test/CodeGen/X86/GlobalISel/select-constant.mir
+++ b/test/CodeGen/X86/GlobalISel/select-constant.mir
@@ -13,6 +13,10 @@
ret i32 4
}
+ define i32 @const_i32_0() {
+ ret i32 0
+ }
+
define i64 @const_i64() {
ret i64 68719476720
}
@@ -84,6 +88,23 @@ body: |
...
---
+name: const_i32_0
+# CHECK-LABEL: name: const_i32_0
+legalized: true
+regBankSelected: true
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr }
+# CHECK: %0 = MOV32r0 implicit-def %eflags
+body: |
+ bb.1 (%ir-block.0):
+ %0(s32) = G_CONSTANT i32 0
+ %eax = COPY %0(s32)
+ RET 0, implicit %eax
+
+...
+---
name: const_i64
legalized: true
regBankSelected: true
diff --git a/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir b/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
new file mode 100644
index 000000000000..fa4c529982cc
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
@@ -0,0 +1,119 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL
+--- |
+
+ define float @test_fadd_float(float %arg1, float %arg2) {
+ %ret = fadd float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fadd_double(double %arg1, double %arg2) {
+ %ret = fadd double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fadd_float
+# ALL-LABEL: name: test_fadd_float
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr32, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr32x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = ADDSSrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VADDSSrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VADDSSZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FADD %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fadd_double
+# ALL-LABEL: name: test_fadd_double
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr64, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr64x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = ADDSDrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VADDSDrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VADDSDZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FADD %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir b/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
new file mode 100644
index 000000000000..d2c1d1528652
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
@@ -0,0 +1,119 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL
+--- |
+
+ define float @test_fdiv_float(float %arg1, float %arg2) {
+ %ret = fdiv float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fdiv_double(double %arg1, double %arg2) {
+ %ret = fdiv double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fdiv_float
+# ALL-LABEL: name: test_fdiv_float
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr32, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr32x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = DIVSSrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VDIVSSrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VDIVSSZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FDIV %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fdiv_double
+# ALL-LABEL: name: test_fdiv_double
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr64, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr64x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = DIVSDrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VDIVSDrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VDIVSDZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FDIV %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir b/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
new file mode 100644
index 000000000000..98e5d303d7b1
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
@@ -0,0 +1,119 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL
+--- |
+
+ define float @test_fmul_float(float %arg1, float %arg2) {
+ %ret = fmul float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fmul_double(double %arg1, double %arg2) {
+ %ret = fmul double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fmul_float
+# ALL-LABEL: name: test_fmul_float
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr32, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr32x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = MULSSrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VMULSSrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VMULSSZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FMUL %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fmul_double
+# ALL-LABEL: name: test_fmul_double
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr64, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr64x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = MULSDrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VMULSDrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VMULSDZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FMUL %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir b/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
new file mode 100644
index 000000000000..9f58327d9bb6
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
@@ -0,0 +1,119 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=SSE
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=NO_AVX512F --check-prefix=AVX
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NO_AVX512VL --check-prefix=AVX512ALL --check-prefix=AVX512F
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512ALL --check-prefix=AVX512VL
+--- |
+
+ define float @test_fsub_float(float %arg1, float %arg2) {
+ %ret = fsub float %arg1, %arg2
+ ret float %ret
+ }
+
+ define double @test_fsub_double(double %arg1, double %arg2) {
+ %ret = fsub double %arg1, %arg2
+ ret double %ret
+ }
+
+...
+---
+name: test_fsub_float
+# ALL-LABEL: name: test_fsub_float
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr32, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr32, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr32x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr32x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = SUBSSrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VSUBSSrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VSUBSSZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s32) = COPY %xmm0
+ %1(s32) = COPY %xmm1
+ %2(s32) = G_FSUB %0, %1
+ %xmm0 = COPY %2(s32)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_fsub_double
+# ALL-LABEL: name: test_fsub_double
+alignment: 4
+legalized: true
+regBankSelected: true
+# NO_AVX512F: registers:
+# NO_AVX512F-NEXT: - { id: 0, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 1, class: fr64, preferred-register: '' }
+# NO_AVX512F-NEXT: - { id: 2, class: fr64, preferred-register: '' }
+#
+# AVX512ALL: registers:
+# AVX512ALL-NEXT: - { id: 0, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 1, class: fr64x, preferred-register: '' }
+# AVX512ALL-NEXT: - { id: 2, class: fr64x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr, preferred-register: '' }
+ - { id: 1, class: vecr, preferred-register: '' }
+ - { id: 2, class: vecr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# SSE: %0 = COPY %xmm0
+# SSE-NEXT: %1 = COPY %xmm1
+# SSE-NEXT: %2 = SUBSDrr %0, %1
+# SSE-NEXT: %xmm0 = COPY %2
+# SSE-NEXT: RET 0, implicit %xmm0
+#
+# AVX: %0 = COPY %xmm0
+# AVX-NEXT: %1 = COPY %xmm1
+# AVX-NEXT: %2 = VSUBSDrr %0, %1
+# AVX-NEXT: %xmm0 = COPY %2
+# AVX-NEXT: RET 0, implicit %xmm0
+#
+# AVX512ALL: %0 = COPY %xmm0
+# AVX512ALL-NEXT: %1 = COPY %xmm1
+# AVX512ALL-NEXT: %2 = VSUBSDZrr %0, %1
+# AVX512ALL-NEXT: %xmm0 = COPY %2
+# AVX512ALL-NEXT: RET 0, implicit %xmm0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(s64) = COPY %xmm0
+ %1(s64) = COPY %xmm1
+ %2(s64) = G_FSUB %0, %1
+ %xmm0 = COPY %2(s64)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir b/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
new file mode 100644
index 000000000000..8e31a904e360
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
@@ -0,0 +1,52 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512VL
+--- |
+ define void @test_merge() {
+ ret void
+ }
+...
+---
+name: test_merge
+# AVX-LABEL: name: test_merge
+#
+# AVX512VL-LABEL: name: test_merge
+alignment: 4
+legalized: true
+regBankSelected: true
+# AVX: registers:
+# AVX-NEXT: - { id: 0, class: vr128, preferred-register: '' }
+# AVX-NEXT: - { id: 1, class: vr256, preferred-register: '' }
+# AVX-NEXT: - { id: 2, class: vr256, preferred-register: '' }
+# AVX-NEXT: - { id: 3, class: vr256, preferred-register: '' }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr128x, preferred-register: '' }
+# AVX512VL-NEXT: - { id: 1, class: vr256x, preferred-register: '' }
+# AVX512VL-NEXT: - { id: 2, class: vr256x, preferred-register: '' }
+# AVX512VL-NEXT: - { id: 3, class: vr256x, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+# AVX: %0 = IMPLICIT_DEF
+# AVX-NEXT: undef %2.sub_xmm = COPY %0
+# AVX-NEXT: %3 = VINSERTF128rr %2, %0, 1
+# AVX-NEXT: %1 = COPY %3
+# AVX-NEXT: %ymm0 = COPY %1
+# AVX-NEXT: RET 0, implicit %ymm0
+#
+# AVX512VL: %0 = IMPLICIT_DEF
+# AVX512VL-NEXT: undef %2.sub_xmm = COPY %0
+# AVX512VL-NEXT: %3 = VINSERTF32x4Z256rr %2, %0, 1
+# AVX512VL-NEXT: %1 = COPY %3
+# AVX512VL-NEXT: %ymm0 = COPY %1
+# AVX512VL-NEXT: RET 0, implicit %ymm0
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<4 x s32>) = IMPLICIT_DEF
+ %1(<8 x s32>) = G_MERGE_VALUES %0(<4 x s32>), %0(<4 x s32>)
+ %ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit %ymm0
+
+...
+
diff --git a/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir b/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
new file mode 100644
index 000000000000..a072d582e505
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
@@ -0,0 +1,74 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
+--- |
+ define void @test_merge_v128() {
+ ret void
+ }
+
+ define void @test_merge_v256() {
+ ret void
+ }
+
+...
+---
+name: test_merge_v128
+# ALL-LABEL: name: test_merge_v128
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: vr128x, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: vr512, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: vr512, preferred-register: '' }
+# ALL-NEXT: - { id: 3, class: vr512, preferred-register: '' }
+# ALL-NEXT: - { id: 4, class: vr512, preferred-register: '' }
+# ALL-NEXT: - { id: 5, class: vr512, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+# ALL: %0 = IMPLICIT_DEF
+# ALL-NEXT: undef %2.sub_xmm = COPY %0
+# ALL-NEXT: %3 = VINSERTF32x4Zrr %2, %0, 1
+# ALL-NEXT: %4 = VINSERTF32x4Zrr %3, %0, 2
+# ALL-NEXT: %5 = VINSERTF32x4Zrr %4, %0, 3
+# ALL-NEXT: %1 = COPY %5
+# ALL-NEXT: %zmm0 = COPY %1
+# ALL-NEXT: RET 0, implicit %zmm0
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<4 x s32>) = IMPLICIT_DEF
+ %1(<16 x s32>) = G_MERGE_VALUES %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>)
+ %zmm0 = COPY %1(<16 x s32>)
+ RET 0, implicit %zmm0
+
+...
+---
+name: test_merge_v256
+# ALL-LABEL: name: test_merge_v256
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: vr256x, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: vr512, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: vr512, preferred-register: '' }
+# ALL-NEXT: - { id: 3, class: vr512, preferred-register: '' }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+# ALL: %0 = IMPLICIT_DEF
+# ALL-NEXT: undef %2.sub_ymm = COPY %0
+# ALL-NEXT: %3 = VINSERTF64x4Zrr %2, %0, 1
+# ALL-NEXT: %1 = COPY %3
+# ALL-NEXT: %zmm0 = COPY %1
+# ALL-NEXT: RET 0, implicit %zmm0
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<8 x s32>) = IMPLICIT_DEF
+ %1(<16 x s32>) = G_MERGE_VALUES %0(<8 x s32>), %0(<8 x s32>)
+ %zmm0 = COPY %1(<16 x s32>)
+ RET 0, implicit %zmm0
+
+...
+
diff --git a/test/CodeGen/X86/GlobalISel/select-or-scalar.mir b/test/CodeGen/X86/GlobalISel/select-or-scalar.mir
new file mode 100644
index 000000000000..4f7e48207838
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-or-scalar.mir
@@ -0,0 +1,160 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
+
+--- |
+ define i8 @test_or_i8(i8 %arg1, i8 %arg2) {
+ %ret = or i8 %arg1, %arg2
+ ret i8 %ret
+ }
+
+ define i16 @test_or_i16(i16 %arg1, i16 %arg2) {
+ %ret = or i16 %arg1, %arg2
+ ret i16 %ret
+ }
+
+ define i32 @test_or_i32(i32 %arg1, i32 %arg2) {
+ %ret = or i32 %arg1, %arg2
+ ret i32 %ret
+ }
+
+ define i64 @test_or_i64(i64 %arg1, i64 %arg2) {
+ %ret = or i64 %arg1, %arg2
+ ret i64 %ret
+ }
+
+...
+---
+name: test_or_i8
+# ALL-LABEL: name: test_or_i8
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr8, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %dil
+# ALL-NEXT: %1 = COPY %sil
+# ALL-NEXT: %2 = OR8rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %al = COPY %2
+# ALL-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s8) = COPY %edi
+ %1(s8) = COPY %esi
+ %2(s8) = G_OR %0, %1
+ %al = COPY %2(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_or_i16
+# ALL-LABEL: name: test_or_i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr16, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %di
+# ALL-NEXT: %1 = COPY %si
+# ALL-NEXT: %2 = OR16rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %ax = COPY %2
+# ALL-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s16) = COPY %edi
+ %1(s16) = COPY %esi
+ %2(s16) = G_OR %0, %1
+ %ax = COPY %2(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_or_i32
+# ALL-LABEL: name: test_or_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %edi
+# ALL-NEXT: %1 = COPY %esi
+# ALL-NEXT: %2 = OR32rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %eax = COPY %2
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s32) = G_OR %0, %1
+ %eax = COPY %2(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_or_i64
+# ALL-LABEL: name: test_or_i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr64, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %rdi
+# ALL-NEXT: %1 = COPY %rsi
+# ALL-NEXT: %2 = OR64rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %rax = COPY %2
+# ALL-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(s64) = COPY %rdi
+ %1(s64) = COPY %rsi
+ %2(s64) = G_OR %0, %1
+ %rax = COPY %2(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-sub.mir b/test/CodeGen/X86/GlobalISel/select-sub.mir
index 4768a2d93222..d47f77828c9b 100644
--- a/test/CodeGen/X86/GlobalISel/select-sub.mir
+++ b/test/CodeGen/X86/GlobalISel/select-sub.mir
@@ -14,16 +14,6 @@
ret i32 %ret
}
- define float @test_sub_float(float %arg1, float %arg2) {
- %ret = fsub float %arg1, %arg2
- ret float %ret
- }
-
- define double @test_sub_double(double %arg1, double %arg2) {
- %ret = fsub double %arg1, %arg2
- ret double %ret
- }
-
define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
%ret = sub <4 x i32> %arg1, %arg2
ret <4 x i32> %ret
@@ -87,73 +77,6 @@ body: |
...
---
-name: test_sub_float
-alignment: 4
-legalized: true
-regBankSelected: true
-selected: false
-tracksRegLiveness: true
-# ALL: registers:
-# NO_AVX512F-NEXT: - { id: 0, class: fr32, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 1, class: fr32, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 2, class: fr32, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 0, class: fr32x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 1, class: fr32x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 2, class: fr32x, preferred-register: '' }
-registers:
- - { id: 0, class: vecr }
- - { id: 1, class: vecr }
- - { id: 2, class: vecr }
-# ALL: %0 = COPY %xmm0
-# ALL-NEXT: %1 = COPY %xmm1
-# SSE-NEXT: %2 = SUBSSrr %0, %1
-# AVX-NEXT: %2 = VSUBSSrr %0, %1
-# AVX512F-NEXT: %2 = VSUBSSZrr %0, %1
-body: |
- bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
-
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
- %2(s32) = G_FSUB %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
-
-...
----
-name: test_sub_double
-alignment: 4
-legalized: true
-regBankSelected: true
-selected: false
-tracksRegLiveness: true
-# ALL: registers:
-# NO_AVX512F-NEXT: - { id: 0, class: fr64, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 1, class: fr64, preferred-register: '' }
-# NO_AVX512F-NEXT: - { id: 2, class: fr64, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 0, class: fr64x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 1, class: fr64x, preferred-register: '' }
-# AVX512ALL-NEXT: - { id: 2, class: fr64x, preferred-register: '' }
-registers:
- - { id: 0, class: vecr }
- - { id: 1, class: vecr }
- - { id: 2, class: vecr }
-# ALL: %0 = COPY %xmm0
-# ALL-NEXT: %1 = COPY %xmm1
-# SSE-NEXT: %2 = SUBSDrr %0, %1
-# AVX-NEXT: %2 = VSUBSDrr %0, %1
-# AVX512F-NEXT: %2 = VSUBSDZrr %0, %1
-body: |
- bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
-
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
- %2(s64) = G_FSUB %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
-...
----
name: test_sub_v4i32
alignment: 4
legalized: true
diff --git a/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir b/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir
new file mode 100644
index 000000000000..9d03c6a3f1a8
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir
@@ -0,0 +1,160 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
+
+--- |
+ define i8 @test_xor_i8(i8 %arg1, i8 %arg2) {
+ %ret = xor i8 %arg1, %arg2
+ ret i8 %ret
+ }
+
+ define i16 @test_xor_i16(i16 %arg1, i16 %arg2) {
+ %ret = xor i16 %arg1, %arg2
+ ret i16 %ret
+ }
+
+ define i32 @test_xor_i32(i32 %arg1, i32 %arg2) {
+ %ret = xor i32 %arg1, %arg2
+ ret i32 %ret
+ }
+
+ define i64 @test_xor_i64(i64 %arg1, i64 %arg2) {
+ %ret = xor i64 %arg1, %arg2
+ ret i64 %ret
+ }
+
+...
+---
+name: test_xor_i8
+# ALL-LABEL: name: test_xor_i8
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr8, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr8, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %dil
+# ALL-NEXT: %1 = COPY %sil
+# ALL-NEXT: %2 = XOR8rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %al = COPY %2
+# ALL-NEXT: RET 0, implicit %al
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s8) = COPY %edi
+ %1(s8) = COPY %esi
+ %2(s8) = G_XOR %0, %1
+ %al = COPY %2(s8)
+ RET 0, implicit %al
+
+...
+---
+name: test_xor_i16
+# ALL-LABEL: name: test_xor_i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr16, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr16, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %di
+# ALL-NEXT: %1 = COPY %si
+# ALL-NEXT: %2 = XOR16rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %ax = COPY %2
+# ALL-NEXT: RET 0, implicit %ax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s16) = COPY %edi
+ %1(s16) = COPY %esi
+ %2(s16) = G_XOR %0, %1
+ %ax = COPY %2(s16)
+ RET 0, implicit %ax
+
+...
+---
+name: test_xor_i32
+# ALL-LABEL: name: test_xor_i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr32, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %edi
+# ALL-NEXT: %1 = COPY %esi
+# ALL-NEXT: %2 = XOR32rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %eax = COPY %2
+# ALL-NEXT: RET 0, implicit %eax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %edi, %esi
+
+ %0(s32) = COPY %edi
+ %1(s32) = COPY %esi
+ %2(s32) = G_XOR %0, %1
+ %eax = COPY %2(s32)
+ RET 0, implicit %eax
+
+...
+---
+name: test_xor_i64
+# ALL-LABEL: name: test_xor_i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 1, class: gr64, preferred-register: '' }
+# ALL-NEXT: - { id: 2, class: gr64, preferred-register: '' }
+registers:
+ - { id: 0, class: gpr, preferred-register: '' }
+ - { id: 1, class: gpr, preferred-register: '' }
+ - { id: 2, class: gpr, preferred-register: '' }
+liveins:
+fixedStack:
+stack:
+constants:
+# ALL: %0 = COPY %rdi
+# ALL-NEXT: %1 = COPY %rsi
+# ALL-NEXT: %2 = XOR64rr %0, %1, implicit-def %eflags
+# ALL-NEXT: %rax = COPY %2
+# ALL-NEXT: RET 0, implicit %rax
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %rdi, %rsi
+
+ %0(s64) = COPY %rdi
+ %1(s64) = COPY %rsi
+ %2(s64) = G_XOR %0, %1
+ %rax = COPY %2(s64)
+ RET 0, implicit %rax
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/xor-scalar.ll b/test/CodeGen/X86/GlobalISel/xor-scalar.ll
new file mode 100644
index 000000000000..9941db8abd9c
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/xor-scalar.ll
@@ -0,0 +1,43 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL
+
+define i8 @test_xor_i8(i8 %arg1, i8 %arg2) {
+; ALL-LABEL: test_xor_i8:
+; ALL: # BB#0:
+; ALL-NEXT: xorb %dil, %sil
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = xor i8 %arg1, %arg2
+ ret i8 %ret
+}
+
+define i16 @test_xor_i16(i16 %arg1, i16 %arg2) {
+; ALL-LABEL: test_xor_i16:
+; ALL: # BB#0:
+; ALL-NEXT: xorw %di, %si
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = xor i16 %arg1, %arg2
+ ret i16 %ret
+}
+
+define i32 @test_xor_i32(i32 %arg1, i32 %arg2) {
+; ALL-LABEL: test_xor_i32:
+; ALL: # BB#0:
+; ALL-NEXT: xorl %edi, %esi
+; ALL-NEXT: movl %esi, %eax
+; ALL-NEXT: retq
+ %ret = xor i32 %arg1, %arg2
+ ret i32 %ret
+}
+
+define i64 @test_xor_i64(i64 %arg1, i64 %arg2) {
+; ALL-LABEL: test_xor_i64:
+; ALL: # BB#0:
+; ALL-NEXT: xorq %rdi, %rsi
+; ALL-NEXT: movq %rsi, %rax
+; ALL-NEXT: retq
+ %ret = xor i64 %arg1, %arg2
+ ret i64 %ret
+}
+
diff --git a/test/CodeGen/X86/atom-call-reg-indirect.ll b/test/CodeGen/X86/atom-call-reg-indirect.ll
index 663b6f1eee51..8045abc7bad6 100644
--- a/test/CodeGen/X86/atom-call-reg-indirect.ll
+++ b/test/CodeGen/X86/atom-call-reg-indirect.ll
@@ -4,6 +4,8 @@
; RUN: llc < %s -mcpu=core2 -mtriple=x86_64-linux | FileCheck -check-prefix=ATOM-NOT64 %s
; RUN: llc < %s -mcpu=slm -mtriple=i686-linux | FileCheck -check-prefix=SLM32 %s
; RUN: llc < %s -mcpu=slm -mtriple=x86_64-linux | FileCheck -check-prefix=SLM64 %s
+; RUN: llc < %s -mcpu=goldmont -mtriple=i686-linux | FileCheck -check-prefix=SLM32 %s
+; RUN: llc < %s -mcpu=goldmont -mtriple=x86_64-linux | FileCheck -check-prefix=SLM64 %s
; fn_ptr.ll
diff --git a/test/CodeGen/X86/atom-fixup-lea2.ll b/test/CodeGen/X86/atom-fixup-lea2.ll
index ec8261388734..68b376ea5cc2 100644
--- a/test/CodeGen/X86/atom-fixup-lea2.ll
+++ b/test/CodeGen/X86/atom-fixup-lea2.ll
@@ -1,4 +1,6 @@
; RUN: llc < %s -mcpu=atom -mtriple=i686-linux | FileCheck %s
+; RUN: llc < %s -mcpu=goldmont -mtriple=i686-linux | FileCheck %s
+
; CHECK:BB#5
; CHECK-NEXT:leal
; CHECK-NEXT:leal
diff --git a/test/CodeGen/X86/atom-sched.ll b/test/CodeGen/X86/atom-sched.ll
index b81359e2832b..bddb015a0dd5 100644
--- a/test/CodeGen/X86/atom-sched.ll
+++ b/test/CodeGen/X86/atom-sched.ll
@@ -1,5 +1,6 @@
; RUN: llc <%s -O2 -mcpu=atom -march=x86 -relocation-model=static | FileCheck -check-prefix=atom %s
; RUN: llc <%s -O2 -mcpu=slm -march=x86 -relocation-model=static | FileCheck -check-prefix=slm %s
+; RUN: llc <%s -O2 -mcpu=goldmont -march=x86 -relocation-model=static | FileCheck -check-prefix=slm %s
; RUN: llc <%s -O2 -mcpu=core2 -march=x86 -relocation-model=static | FileCheck %s
;
diff --git a/test/CodeGen/X86/avx2-arith.ll b/test/CodeGen/X86/avx2-arith.ll
index aec74424b9b2..017f54b40b2d 100644
--- a/test/CodeGen/X86/avx2-arith.ll
+++ b/test/CodeGen/X86/avx2-arith.ll
@@ -1,15 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define <4 x i64> @test_vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: test_vpaddq:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddq:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <4 x i64> %i, %j
@@ -18,12 +18,12 @@ define <4 x i64> @test_vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <8 x i32> @test_vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: test_vpaddd:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddd:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <8 x i32> %i, %j
@@ -32,12 +32,12 @@ define <8 x i32> @test_vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <16 x i16> @test_vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: test_vpaddw:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddw:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <16 x i16> %i, %j
@@ -46,12 +46,12 @@ define <16 x i16> @test_vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
define <32 x i8> @test_vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: test_vpaddb:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpaddb:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = add <32 x i8> %i, %j
@@ -60,12 +60,12 @@ define <32 x i8> @test_vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
define <4 x i64> @test_vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: test_vpsubq:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubq:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <4 x i64> %i, %j
@@ -74,12 +74,12 @@ define <4 x i64> @test_vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <8 x i32> @test_vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: test_vpsubd:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubd:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <8 x i32> %i, %j
@@ -88,12 +88,12 @@ define <8 x i32> @test_vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <16 x i16> @test_vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: test_vpsubw:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubw:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <16 x i16> %i, %j
@@ -102,12 +102,12 @@ define <16 x i16> @test_vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
define <32 x i8> @test_vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: test_vpsubb:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpsubb:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = sub <32 x i8> %i, %j
@@ -116,12 +116,12 @@ define <32 x i8> @test_vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
define <8 x i32> @test_vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: test_vpmulld:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpmulld:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = mul <8 x i32> %i, %j
@@ -130,12 +130,12 @@ define <8 x i32> @test_vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <16 x i16> @test_vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: test_vpmullw:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpmullw:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%x = mul <16 x i16> %i, %j
@@ -144,7 +144,7 @@ define <16 x i16> @test_vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone
define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
; X32-LABEL: mul_v16i8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovsxbw %xmm1, %ymm1
; X32-NEXT: vpmovsxbw %xmm0, %ymm0
; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0
@@ -157,7 +157,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
; X32-NEXT: retl
;
; X64-LABEL: mul_v16i8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxbw %xmm1, %ymm1
; X64-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-NEXT: vpmullw %ymm1, %ymm0, %ymm0
@@ -174,7 +174,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: mul_v32i8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vextracti128 $1, %ymm1, %xmm2
; X32-NEXT: vpmovsxbw %xmm2, %ymm2
; X32-NEXT: vextracti128 $1, %ymm0, %xmm3
@@ -196,7 +196,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-NEXT: retl
;
; X64-LABEL: mul_v32i8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vextracti128 $1, %ymm1, %xmm2
; X64-NEXT: vpmovsxbw %xmm2, %ymm2
; X64-NEXT: vextracti128 $1, %ymm0, %xmm3
@@ -222,7 +222,7 @@ define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: mul_v4i64:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlq $32, %ymm0, %ymm2
; X32-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
; X32-NEXT: vpsrlq $32, %ymm1, %ymm3
@@ -234,7 +234,7 @@ define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-NEXT: retl
;
; X64-LABEL: mul_v4i64:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlq $32, %ymm0, %ymm2
; X64-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
; X64-NEXT: vpsrlq $32, %ymm1, %ymm3
@@ -250,12 +250,12 @@ define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <8 x i32> @mul_const1(<8 x i32> %x) {
; X32-LABEL: mul_const1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <8 x i32> %x, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -264,12 +264,12 @@ define <8 x i32> @mul_const1(<8 x i32> %x) {
define <4 x i64> @mul_const2(<4 x i64> %x) {
; X32-LABEL: mul_const2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllq $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllq $2, %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <4 x i64> %x, <i64 4, i64 4, i64 4, i64 4>
@@ -278,12 +278,12 @@ define <4 x i64> @mul_const2(<4 x i64> %x) {
define <16 x i16> @mul_const3(<16 x i16> %x) {
; X32-LABEL: mul_const3:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $3, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const3:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $3, %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <16 x i16> %x, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -292,13 +292,13 @@ define <16 x i16> @mul_const3(<16 x i16> %x) {
define <4 x i64> @mul_const4(<4 x i64> %x) {
; X32-LABEL: mul_const4:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X32-NEXT: vpsubq %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const4:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X64-NEXT: vpsubq %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -308,12 +308,12 @@ define <4 x i64> @mul_const4(<4 x i64> %x) {
define <8 x i32> @mul_const5(<8 x i32> %x) {
; X32-LABEL: mul_const5:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vxorps %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const5:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vxorps %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <8 x i32> %x, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -322,12 +322,12 @@ define <8 x i32> @mul_const5(<8 x i32> %x) {
define <8 x i32> @mul_const6(<8 x i32> %x) {
; X32-LABEL: mul_const6:
-; X32: ## BB#0:
-; X32-NEXT: vpmulld LCPI18_0, %ymm0, %ymm0
+; X32: # BB#0:
+; X32-NEXT: vpmulld {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const6:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmulld {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
%y = mul <8 x i32> %x, <i32 0, i32 0, i32 0, i32 2, i32 0, i32 2, i32 0, i32 0>
@@ -336,13 +336,13 @@ define <8 x i32> @mul_const6(<8 x i32> %x) {
define <8 x i64> @mul_const7(<8 x i64> %x) {
; X32-LABEL: mul_const7:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X32-NEXT: vpaddq %ymm1, %ymm1, %ymm1
; X32-NEXT: retl
;
; X64-LABEL: mul_const7:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X64-NEXT: vpaddq %ymm1, %ymm1, %ymm1
; X64-NEXT: retq
@@ -352,12 +352,12 @@ define <8 x i64> @mul_const7(<8 x i64> %x) {
define <8 x i16> @mul_const8(<8 x i16> %x) {
; X32-LABEL: mul_const8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $3, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $3, %xmm0, %xmm0
; X64-NEXT: retq
%y = mul <8 x i16> %x, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
@@ -366,14 +366,14 @@ define <8 x i16> @mul_const8(<8 x i16> %x) {
define <8 x i32> @mul_const9(<8 x i32> %x) {
; X32-LABEL: mul_const9:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl $2, %eax
; X32-NEXT: vmovd %eax, %xmm1
; X32-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const9:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: movl $2, %eax
; X64-NEXT: vmovd %eax, %xmm1
; X64-NEXT: vpmulld %ymm1, %ymm0, %ymm0
@@ -385,13 +385,13 @@ define <8 x i32> @mul_const9(<8 x i32> %x) {
; %x * 0x01010101
define <4 x i32> @mul_const10(<4 x i32> %x) {
; X32-LABEL: mul_const10:
-; X32: ## BB#0:
-; X32-NEXT: vpbroadcastd LCPI22_0, %xmm1
+; X32: # BB#0:
+; X32-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm1
; X32-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const10:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
; X64-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
@@ -402,13 +402,13 @@ define <4 x i32> @mul_const10(<4 x i32> %x) {
; %x * 0x80808080
define <4 x i32> @mul_const11(<4 x i32> %x) {
; X32-LABEL: mul_const11:
-; X32: ## BB#0:
-; X32-NEXT: vpbroadcastd LCPI23_0, %xmm1
+; X32: # BB#0:
+; X32-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm1
; X32-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const11:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
; X64-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/avx2-cmp.ll b/test/CodeGen/X86/avx2-cmp.ll
index e2b550383c8d..2369aa5ac9a0 100644
--- a/test/CodeGen/X86/avx2-cmp.ll
+++ b/test/CodeGen/X86/avx2-cmp.ll
@@ -1,15 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define <8 x i32> @v8i32_cmpgt(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: v8i32_cmpgt:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v8i32_cmpgt:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <8 x i32> %i, %j
@@ -19,12 +19,12 @@ define <8 x i32> @v8i32_cmpgt(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <4 x i64> @v4i64_cmpgt(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: v4i64_cmpgt:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v4i64_cmpgt:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <4 x i64> %i, %j
@@ -34,12 +34,12 @@ define <4 x i64> @v4i64_cmpgt(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <16 x i16> @v16i16_cmpgt(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: v16i16_cmpgt:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v16i16_cmpgt:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <16 x i16> %i, %j
@@ -49,12 +49,12 @@ define <16 x i16> @v16i16_cmpgt(<16 x i16> %i, <16 x i16> %j) nounwind readnone
define <32 x i8> @v32i8_cmpgt(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: v32i8_cmpgt:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v32i8_cmpgt:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
%bincmp = icmp slt <32 x i8> %i, %j
@@ -64,12 +64,12 @@ define <32 x i8> @v32i8_cmpgt(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
define <8 x i32> @int256_cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
; X32-LABEL: int256_cmpeq:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: int256_cmpeq:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <8 x i32> %i, %j
@@ -79,12 +79,12 @@ define <8 x i32> @int256_cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
define <4 x i64> @v4i64_cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
; X32-LABEL: v4i64_cmpeq:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v4i64_cmpeq:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <4 x i64> %i, %j
@@ -94,12 +94,12 @@ define <4 x i64> @v4i64_cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
define <16 x i16> @v16i16_cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
; X32-LABEL: v16i16_cmpeq:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v16i16_cmpeq:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <16 x i16> %i, %j
@@ -109,12 +109,12 @@ define <16 x i16> @v16i16_cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind readnone
define <32 x i8> @v32i8_cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
; X32-LABEL: v32i8_cmpeq:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: v32i8_cmpeq:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%bincmp = icmp eq <32 x i8> %i, %j
diff --git a/test/CodeGen/X86/avx2-conversions.ll b/test/CodeGen/X86/avx2-conversions.ll
index 26edafbdb64f..60cc2cf199e6 100755
--- a/test/CodeGen/X86/avx2-conversions.ll
+++ b/test/CodeGen/X86/avx2-conversions.ll
@@ -1,21 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
; X32-LABEL: trunc4:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: trunc4:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%B = trunc <4 x i64> %A to <4 x i32>
@@ -24,18 +24,18 @@ define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
; X32-LABEL: trunc8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: trunc8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%B = trunc <8 x i32> %A to <8 x i16>
@@ -44,12 +44,12 @@ define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
define <4 x i64> @sext4(<4 x i32> %A) nounwind {
; X32-LABEL: sext4:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovsxdq %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext4:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
; X64-NEXT: retq
%B = sext <4 x i32> %A to <4 x i64>
@@ -58,12 +58,12 @@ define <4 x i64> @sext4(<4 x i32> %A) nounwind {
define <8 x i32> @sext8(<8 x i16> %A) nounwind {
; X32-LABEL: sext8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
; X64-NEXT: retq
%B = sext <8 x i16> %A to <8 x i32>
@@ -72,12 +72,12 @@ define <8 x i32> @sext8(<8 x i16> %A) nounwind {
define <4 x i64> @zext4(<4 x i32> %A) nounwind {
; X32-LABEL: zext4:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X32-NEXT: retl
;
; X64-LABEL: zext4:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-NEXT: retq
%B = zext <4 x i32> %A to <4 x i64>
@@ -86,12 +86,12 @@ define <4 x i64> @zext4(<4 x i32> %A) nounwind {
define <8 x i32> @zext8(<8 x i16> %A) nounwind {
; X32-LABEL: zext8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: retl
;
; X64-LABEL: zext8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: retq
%B = zext <8 x i16> %A to <8 x i32>
@@ -100,13 +100,13 @@ define <8 x i32> @zext8(<8 x i16> %A) nounwind {
define <8 x i32> @zext_8i8_8i32(<8 x i8> %A) nounwind {
; X32-LABEL: zext_8i8_8i32:
-; X32: ## BB#0:
-; X32-NEXT: vpand LCPI6_0, %xmm0, %xmm0
+; X32: # BB#0:
+; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: retl
;
; X64-LABEL: zext_8i8_8i32:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: retq
@@ -116,12 +116,12 @@ define <8 x i32> @zext_8i8_8i32(<8 x i8> %A) nounwind {
define <16 x i16> @zext_16i8_16i16(<16 x i8> %z) {
; X32-LABEL: zext_16i8_16i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; X32-NEXT: retl
;
; X64-LABEL: zext_16i8_16i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; X64-NEXT: retq
%t = zext <16 x i8> %z to <16 x i16>
@@ -130,12 +130,12 @@ define <16 x i16> @zext_16i8_16i16(<16 x i8> %z) {
define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
; X32-LABEL: sext_16i8_16i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovsxbw %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext_16i8_16i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxbw %xmm0, %ymm0
; X64-NEXT: retq
%t = sext <16 x i8> %z to <16 x i16>
@@ -144,7 +144,7 @@ define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
; X32-LABEL: trunc_16i16_16i8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
; X32-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; X32-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -154,7 +154,7 @@ define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
; X32-NEXT: retl
;
; X64-LABEL: trunc_16i16_16i8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
; X64-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; X64-NEXT: vpshufb %xmm2, %xmm1, %xmm1
@@ -168,13 +168,13 @@ define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
; X32-LABEL: load_sext_test1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxdq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxdq (%rdi), %ymm0
; X64-NEXT: retq
%X = load <4 x i32>, <4 x i32>* %ptr
@@ -184,13 +184,13 @@ define <4 x i64> @load_sext_test1(<4 x i32> *%ptr) {
define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
; X32-LABEL: load_sext_test2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxbq (%rdi), %ymm0
; X64-NEXT: retq
%X = load <4 x i8>, <4 x i8>* %ptr
@@ -200,13 +200,13 @@ define <4 x i64> @load_sext_test2(<4 x i8> *%ptr) {
define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
; X32-LABEL: load_sext_test3:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxwq (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test3:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxwq (%rdi), %ymm0
; X64-NEXT: retq
%X = load <4 x i16>, <4 x i16>* %ptr
@@ -216,13 +216,13 @@ define <4 x i64> @load_sext_test3(<4 x i16> *%ptr) {
define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
; X32-LABEL: load_sext_test4:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxwd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test4:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxwd (%rdi), %ymm0
; X64-NEXT: retq
%X = load <8 x i16>, <8 x i16>* %ptr
@@ -232,13 +232,13 @@ define <8 x i32> @load_sext_test4(<8 x i16> *%ptr) {
define <8 x i32> @load_sext_test5(<8 x i8> *%ptr) {
; X32-LABEL: load_sext_test5:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpmovsxbd (%eax), %ymm0
; X32-NEXT: retl
;
; X64-LABEL: load_sext_test5:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovsxbd (%rdi), %ymm0
; X64-NEXT: retq
%X = load <8 x i8>, <8 x i8>* %ptr
diff --git a/test/CodeGen/X86/avx2-fma-fneg-combine.ll b/test/CodeGen/X86/avx2-fma-fneg-combine.ll
index 345943bd7303..019593cc0f80 100644
--- a/test/CodeGen/X86/avx2-fma-fneg-combine.ll
+++ b/test/CodeGen/X86/avx2-fma-fneg-combine.ll
@@ -1,17 +1,17 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2,+fma | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fma | FileCheck %s --check-prefix=X64
; This test checks combinations of FNEG and FMA intrinsics
define <8 x float> @test1(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; X32-LABEL: test1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; X64-NEXT: retq
entry:
@@ -24,12 +24,12 @@ declare <8 x float> @llvm.x86.fma.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x f
define <4 x float> @test2(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; X32-LABEL: test2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0
; X64-NEXT: retq
entry:
@@ -42,14 +42,14 @@ declare <4 x float> @llvm.x86.fma.vfmadd.ps(<4 x float> %a, <4 x float> %b, <4 x
define <4 x float> @test3(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; X32-LABEL: test3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0
-; X32-NEXT: vbroadcastss LCPI2_0, %xmm1
+; X32-NEXT: vbroadcastss {{\.LCPI.*}}, %xmm1
; X32-NEXT: vxorps %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vfnmadd213ss %xmm2, %xmm1, %xmm0
; X64-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; X64-NEXT: vxorps %xmm1, %xmm0, %xmm0
@@ -64,12 +64,12 @@ declare <4 x float> @llvm.x86.fma.vfnmadd.ss(<4 x float> %a, <4 x float> %b, <4
define <8 x float> @test4(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; X32-LABEL: test4:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test4:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0
; X64-NEXT: retq
entry:
@@ -80,14 +80,14 @@ entry:
define <8 x float> @test5(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
; X32-LABEL: test5:
-; X32: ## BB#0: ## %entry
-; X32-NEXT: vbroadcastss LCPI4_0, %ymm3
+; X32: # BB#0: # %entry
+; X32-NEXT: vbroadcastss {{\.LCPI.*}}, %ymm3
; X32-NEXT: vxorps %ymm3, %ymm2, %ymm2
; X32-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test5:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vbroadcastss {{.*}}(%rip), %ymm3
; X64-NEXT: vxorps %ymm3, %ymm2, %ymm2
; X64-NEXT: vfmsub213ps %ymm2, %ymm1, %ymm0
@@ -103,12 +103,12 @@ declare <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x f
define <2 x double> @test6(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
; X32-LABEL: test6:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: test6:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/avx2-gather.ll b/test/CodeGen/X86/avx2-gather.ll
index d162b4755ee1..64dd6fa00616 100644
--- a/test/CodeGen/X86/avx2-gather.ll
+++ b/test/CodeGen/X86/avx2-gather.ll
@@ -1,13 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*,
<4 x i32>, <4 x float>, i8) nounwind readonly
define <4 x float> @test_x86_avx2_gather_d_ps(i8* %a1, <4 x i32> %idx, <4 x float> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_ps:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdps %xmm1, (%eax,%xmm0,2), %xmm2
@@ -15,7 +15,7 @@ define <4 x float> @test_x86_avx2_gather_d_ps(i8* %a1, <4 x i32> %idx, <4 x floa
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_ps:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdps %xmm1, (%rdi,%xmm0,2), %xmm2
; X64-NEXT: vmovaps %xmm2, %xmm0
@@ -30,7 +30,7 @@ declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, i8*,
define <2 x double> @test_x86_avx2_gather_d_pd(i8* %a1, <4 x i32> %idx, <2 x double> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_pd:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X32-NEXT: vgatherdpd %xmm1, (%eax,%xmm0,2), %xmm2
@@ -38,7 +38,7 @@ define <2 x double> @test_x86_avx2_gather_d_pd(i8* %a1, <4 x i32> %idx, <2 x dou
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_pd:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; X64-NEXT: vgatherdpd %xmm1, (%rdi,%xmm0,2), %xmm2
; X64-NEXT: vmovapd %xmm2, %xmm0
@@ -53,7 +53,7 @@ declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*,
define <8 x float> @test_x86_avx2_gather_d_ps_256(i8* %a1, <8 x i32> %idx, <8 x float> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_ps_256:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorps %ymm2, %ymm2, %ymm2
; X32-NEXT: vgatherdps %ymm1, (%eax,%ymm0,4), %ymm2
@@ -61,7 +61,7 @@ define <8 x float> @test_x86_avx2_gather_d_ps_256(i8* %a1, <8 x i32> %idx, <8 x
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_ps_256:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vxorps %ymm2, %ymm2, %ymm2
; X64-NEXT: vgatherdps %ymm1, (%rdi,%ymm0,4), %ymm2
; X64-NEXT: vmovaps %ymm2, %ymm0
@@ -76,7 +76,7 @@ declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, i8*,
define <4 x double> @test_x86_avx2_gather_d_pd_256(i8* %a1, <4 x i32> %idx, <4 x double> %mask) {
; X32-LABEL: test_x86_avx2_gather_d_pd_256:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vxorpd %ymm2, %ymm2, %ymm2
; X32-NEXT: vgatherdpd %ymm1, (%eax,%xmm0,8), %ymm2
@@ -84,7 +84,7 @@ define <4 x double> @test_x86_avx2_gather_d_pd_256(i8* %a1, <4 x i32> %idx, <4 x
; X32-NEXT: retl
;
; X64-LABEL: test_x86_avx2_gather_d_pd_256:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vxorpd %ymm2, %ymm2, %ymm2
; X64-NEXT: vgatherdpd %ymm1, (%rdi,%xmm0,8), %ymm2
; X64-NEXT: vmovapd %ymm2, %ymm0
@@ -96,7 +96,7 @@ define <4 x double> @test_x86_avx2_gather_d_pd_256(i8* %a1, <4 x i32> %idx, <4 x
define <2 x i64> @test_mm_i32gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_i32gather_epi32:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
@@ -105,7 +105,7 @@ define <2 x i64> @test_mm_i32gather_epi32(i32 *%a0, <2 x i64> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_i32gather_epi32:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpgatherdd %xmm2, (%rdi,%xmm0,2), %xmm1
@@ -122,7 +122,7 @@ declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, i8*, <4 x i32>, <4 x i32>
define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X32-LABEL: test_mm_i32gather_pd:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
@@ -131,7 +131,7 @@ define <2 x double> @test_mm_i32gather_pd(double *%a0, <2 x i64> %a1) {
; X32-NEXT: retl
;
; X64-LABEL: test_mm_i32gather_pd:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT: vgatherdpd %xmm2, (%rdi,%xmm0,2), %xmm1
diff --git a/test/CodeGen/X86/avx2-logic.ll b/test/CodeGen/X86/avx2-logic.ll
index 9208d959a755..68d486699cbc 100644
--- a/test/CodeGen/X86/avx2-logic.ll
+++ b/test/CodeGen/X86/avx2-logic.ll
@@ -1,17 +1,17 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define <4 x i64> @vpandn(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpandn:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X32-NEXT: vpsubq %ymm1, %ymm0, %ymm1
; X32-NEXT: vpandn %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpandn:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; X64-NEXT: vpsubq %ymm1, %ymm0, %ymm1
; X64-NEXT: vpandn %ymm0, %ymm1, %ymm0
@@ -26,14 +26,14 @@ entry:
define <4 x i64> @vpand(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpand:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpand %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpand:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpand %ymm1, %ymm0, %ymm0
@@ -47,14 +47,14 @@ entry:
define <4 x i64> @vpor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpor:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpor %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpor:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpor %ymm1, %ymm0, %ymm0
@@ -68,14 +68,14 @@ entry:
define <4 x i64> @vpxor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
; X32-LABEL: vpxor:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X32-NEXT: vpxor %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpxor:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2
; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; X64-NEXT: vpxor %ymm1, %ymm0, %ymm0
@@ -89,14 +89,14 @@ entry:
define <32 x i8> @vpblendvb(<32 x i1> %cond, <32 x i8> %x, <32 x i8> %y) {
; X32-LABEL: vpblendvb:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $7, %ymm0, %ymm0
-; X32-NEXT: vpand LCPI4_0, %ymm0, %ymm0
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vpblendvb:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $7, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
@@ -107,12 +107,12 @@ define <32 x i8> @vpblendvb(<32 x i1> %cond, <32 x i8> %x, <32 x i8> %y) {
define <8 x i32> @allOnes() nounwind {
; X32-LABEL: allOnes:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: allOnes:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
ret <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
@@ -120,12 +120,12 @@ define <8 x i32> @allOnes() nounwind {
define <16 x i16> @allOnes2() nounwind {
; X32-LABEL: allOnes2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: allOnes2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
ret <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
diff --git a/test/CodeGen/X86/avx2-phaddsub.ll b/test/CodeGen/X86/avx2-phaddsub.ll
index 9eafac902b86..232a3326fa13 100644
--- a/test/CodeGen/X86/avx2-phaddsub.ll
+++ b/test/CodeGen/X86/avx2-phaddsub.ll
@@ -1,15 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define <16 x i16> @phaddw1(<16 x i16> %x, <16 x i16> %y) {
; X32-LABEL: phaddw1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddw1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <16 x i16> %x, <16 x i16> %y, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 16, i32 18, i32 20, i32 22, i32 8, i32 10, i32 12, i32 14, i32 24, i32 26, i32 28, i32 30>
@@ -20,12 +20,12 @@ define <16 x i16> @phaddw1(<16 x i16> %x, <16 x i16> %y) {
define <16 x i16> @phaddw2(<16 x i16> %x, <16 x i16> %y) {
; X32-LABEL: phaddw2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddw2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphaddw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <16 x i16> %x, <16 x i16> %y, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 17, i32 19, i32 21, i32 23, i32 9, i32 11, i32 13, i32 15, i32 25, i32 27, i32 29, i32 31>
@@ -36,12 +36,12 @@ define <16 x i16> @phaddw2(<16 x i16> %x, <16 x i16> %y) {
define <8 x i32> @phaddd1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phaddd1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddd1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
@@ -52,12 +52,12 @@ define <8 x i32> @phaddd1(<8 x i32> %x, <8 x i32> %y) {
define <8 x i32> @phaddd2(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phaddd2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddd2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 1, i32 2, i32 9, i32 10, i32 5, i32 6, i32 13, i32 14>
@@ -68,12 +68,12 @@ define <8 x i32> @phaddd2(<8 x i32> %x, <8 x i32> %y) {
define <8 x i32> @phaddd3(<8 x i32> %x) {
; X32-LABEL: phaddd3:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphaddd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phaddd3:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphaddd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> <i32 undef, i32 2, i32 8, i32 10, i32 4, i32 6, i32 undef, i32 14>
@@ -84,12 +84,12 @@ define <8 x i32> @phaddd3(<8 x i32> %x) {
define <16 x i16> @phsubw1(<16 x i16> %x, <16 x i16> %y) {
; X32-LABEL: phsubw1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphsubw %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phsubw1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphsubw %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <16 x i16> %x, <16 x i16> %y, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 16, i32 18, i32 20, i32 22, i32 8, i32 10, i32 12, i32 14, i32 24, i32 26, i32 28, i32 30>
@@ -100,12 +100,12 @@ define <16 x i16> @phsubw1(<16 x i16> %x, <16 x i16> %y) {
define <8 x i32> @phsubd1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phsubd1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phsubd1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 2, i32 8, i32 10, i32 4, i32 6, i32 12, i32 14>
@@ -116,12 +116,12 @@ define <8 x i32> @phsubd1(<8 x i32> %x, <8 x i32> %y) {
define <8 x i32> @phsubd2(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: phsubd2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: phsubd2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vphsubd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%a = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 undef, i32 8, i32 undef, i32 4, i32 6, i32 12, i32 14>
diff --git a/test/CodeGen/X86/avx2-shift.ll b/test/CodeGen/X86/avx2-shift.ll
index 4345bd6f7926..47bbba2c7e08 100644
--- a/test/CodeGen/X86/avx2-shift.ll
+++ b/test/CodeGen/X86/avx2-shift.ll
@@ -1,15 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define <4 x i32> @variable_shl0(<4 x i32> %x, <4 x i32> %y) {
; X32-LABEL: variable_shl0:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl0:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = shl <4 x i32> %x, %y
@@ -18,12 +18,12 @@ define <4 x i32> @variable_shl0(<4 x i32> %x, <4 x i32> %y) {
define <8 x i32> @variable_shl1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: variable_shl1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = shl <8 x i32> %x, %y
@@ -32,12 +32,12 @@ define <8 x i32> @variable_shl1(<8 x i32> %x, <8 x i32> %y) {
define <2 x i64> @variable_shl2(<2 x i64> %x, <2 x i64> %y) {
; X32-LABEL: variable_shl2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = shl <2 x i64> %x, %y
@@ -46,12 +46,12 @@ define <2 x i64> @variable_shl2(<2 x i64> %x, <2 x i64> %y) {
define <4 x i64> @variable_shl3(<4 x i64> %x, <4 x i64> %y) {
; X32-LABEL: variable_shl3:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl3:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = shl <4 x i64> %x, %y
@@ -60,12 +60,12 @@ define <4 x i64> @variable_shl3(<4 x i64> %x, <4 x i64> %y) {
define <4 x i32> @variable_srl0(<4 x i32> %x, <4 x i32> %y) {
; X32-LABEL: variable_srl0:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl0:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = lshr <4 x i32> %x, %y
@@ -74,12 +74,12 @@ define <4 x i32> @variable_srl0(<4 x i32> %x, <4 x i32> %y) {
define <8 x i32> @variable_srl1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: variable_srl1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = lshr <8 x i32> %x, %y
@@ -88,12 +88,12 @@ define <8 x i32> @variable_srl1(<8 x i32> %x, <8 x i32> %y) {
define <2 x i64> @variable_srl2(<2 x i64> %x, <2 x i64> %y) {
; X32-LABEL: variable_srl2:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl2:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = lshr <2 x i64> %x, %y
@@ -102,12 +102,12 @@ define <2 x i64> @variable_srl2(<2 x i64> %x, <2 x i64> %y) {
define <4 x i64> @variable_srl3(<4 x i64> %x, <4 x i64> %y) {
; X32-LABEL: variable_srl3:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl3:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = lshr <4 x i64> %x, %y
@@ -116,12 +116,12 @@ define <4 x i64> @variable_srl3(<4 x i64> %x, <4 x i64> %y) {
define <4 x i32> @variable_sra0(<4 x i32> %x, <4 x i32> %y) {
; X32-LABEL: variable_sra0:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra0:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%k = ashr <4 x i32> %x, %y
@@ -130,12 +130,12 @@ define <4 x i32> @variable_sra0(<4 x i32> %x, <4 x i32> %y) {
define <8 x i32> @variable_sra1(<8 x i32> %x, <8 x i32> %y) {
; X32-LABEL: variable_sra1:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra1:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X64-NEXT: retq
%k = ashr <8 x i32> %x, %y
@@ -146,12 +146,12 @@ define <8 x i32> @variable_sra1(<8 x i32> %x, <8 x i32> %y) {
define <8 x i32> @vshift00(<8 x i32> %a) nounwind readnone {
; X32-LABEL: vshift00:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpslld $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift00:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpslld $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = shl <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -160,12 +160,12 @@ define <8 x i32> @vshift00(<8 x i32> %a) nounwind readnone {
define <16 x i16> @vshift01(<16 x i16> %a) nounwind readnone {
; X32-LABEL: vshift01:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift01:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = shl <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
@@ -174,12 +174,12 @@ define <16 x i16> @vshift01(<16 x i16> %a) nounwind readnone {
define <4 x i64> @vshift02(<4 x i64> %a) nounwind readnone {
; X32-LABEL: vshift02:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllq $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift02:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllq $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = shl <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2>
@@ -190,12 +190,12 @@ define <4 x i64> @vshift02(<4 x i64> %a) nounwind readnone {
define <8 x i32> @vshift03(<8 x i32> %a) nounwind readnone {
; X32-LABEL: vshift03:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrld $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift03:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrld $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = lshr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -204,12 +204,12 @@ define <8 x i32> @vshift03(<8 x i32> %a) nounwind readnone {
define <16 x i16> @vshift04(<16 x i16> %a) nounwind readnone {
; X32-LABEL: vshift04:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlw $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift04:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlw $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = lshr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
@@ -218,12 +218,12 @@ define <16 x i16> @vshift04(<16 x i16> %a) nounwind readnone {
define <4 x i64> @vshift05(<4 x i64> %a) nounwind readnone {
; X32-LABEL: vshift05:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlq $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift05:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlq $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = lshr <4 x i64> %a, <i64 2, i64 2, i64 2, i64 2>
@@ -234,12 +234,12 @@ define <4 x i64> @vshift05(<4 x i64> %a) nounwind readnone {
define <8 x i32> @vshift06(<8 x i32> %a) nounwind readnone {
; X32-LABEL: vshift06:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrad $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift06:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrad $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = ashr <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
@@ -248,12 +248,12 @@ define <8 x i32> @vshift06(<8 x i32> %a) nounwind readnone {
define <16 x i16> @vshift07(<16 x i16> %a) nounwind readnone {
; X32-LABEL: vshift07:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsraw $2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: vshift07:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsraw $2, %ymm0, %ymm0
; X64-NEXT: retq
%s = ashr <16 x i16> %a, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
@@ -262,13 +262,13 @@ define <16 x i16> @vshift07(<16 x i16> %a) nounwind readnone {
define <4 x i32> @variable_sra0_load(<4 x i32> %x, <4 x i32>* %y) {
; X32-LABEL: variable_sra0_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsravd (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra0_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsravd (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <4 x i32>, <4 x i32>* %y
@@ -278,13 +278,13 @@ define <4 x i32> @variable_sra0_load(<4 x i32> %x, <4 x i32>* %y) {
define <8 x i32> @variable_sra1_load(<8 x i32> %x, <8 x i32>* %y) {
; X32-LABEL: variable_sra1_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsravd (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_sra1_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsravd (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <8 x i32>, <8 x i32>* %y
@@ -294,13 +294,13 @@ define <8 x i32> @variable_sra1_load(<8 x i32> %x, <8 x i32>* %y) {
define <4 x i32> @variable_shl0_load(<4 x i32> %x, <4 x i32>* %y) {
; X32-LABEL: variable_shl0_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvd (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl0_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvd (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <4 x i32>, <4 x i32>* %y
@@ -310,13 +310,13 @@ define <4 x i32> @variable_shl0_load(<4 x i32> %x, <4 x i32>* %y) {
define <8 x i32> @variable_shl1_load(<8 x i32> %x, <8 x i32>* %y) {
; X32-LABEL: variable_shl1_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvd (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl1_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvd (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <8 x i32>, <8 x i32>* %y
@@ -326,13 +326,13 @@ define <8 x i32> @variable_shl1_load(<8 x i32> %x, <8 x i32>* %y) {
define <2 x i64> @variable_shl2_load(<2 x i64> %x, <2 x i64>* %y) {
; X32-LABEL: variable_shl2_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvq (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl2_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvq (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <2 x i64>, <2 x i64>* %y
@@ -342,13 +342,13 @@ define <2 x i64> @variable_shl2_load(<2 x i64> %x, <2 x i64>* %y) {
define <4 x i64> @variable_shl3_load(<4 x i64> %x, <4 x i64>* %y) {
; X32-LABEL: variable_shl3_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsllvq (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_shl3_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllvq (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <4 x i64>, <4 x i64>* %y
@@ -358,13 +358,13 @@ define <4 x i64> @variable_shl3_load(<4 x i64> %x, <4 x i64>* %y) {
define <4 x i32> @variable_srl0_load(<4 x i32> %x, <4 x i32>* %y) {
; X32-LABEL: variable_srl0_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvd (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl0_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvd (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <4 x i32>, <4 x i32>* %y
@@ -374,13 +374,13 @@ define <4 x i32> @variable_srl0_load(<4 x i32> %x, <4 x i32>* %y) {
define <8 x i32> @variable_srl1_load(<8 x i32> %x, <8 x i32>* %y) {
; X32-LABEL: variable_srl1_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvd (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl1_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvd (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <8 x i32>, <8 x i32>* %y
@@ -390,13 +390,13 @@ define <8 x i32> @variable_srl1_load(<8 x i32> %x, <8 x i32>* %y) {
define <2 x i64> @variable_srl2_load(<2 x i64> %x, <2 x i64>* %y) {
; X32-LABEL: variable_srl2_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvq (%eax), %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl2_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvq (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
%y1 = load <2 x i64>, <2 x i64>* %y
@@ -406,13 +406,13 @@ define <2 x i64> @variable_srl2_load(<2 x i64> %x, <2 x i64>* %y) {
define <4 x i64> @variable_srl3_load(<4 x i64> %x, <4 x i64>* %y) {
; X32-LABEL: variable_srl3_load:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vpsrlvq (%eax), %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: variable_srl3_load:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlvq (%rdi), %ymm0, %ymm0
; X64-NEXT: retq
%y1 = load <4 x i64>, <4 x i64>* %y
@@ -422,13 +422,13 @@ define <4 x i64> @variable_srl3_load(<4 x i64> %x, <4 x i64>* %y) {
define <32 x i8> @shl9(<32 x i8> %A) nounwind {
; X32-LABEL: shl9:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $3, %ymm0, %ymm0
-; X32-NEXT: vpand LCPI28_0, %ymm0, %ymm0
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: shl9:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $3, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -438,13 +438,13 @@ define <32 x i8> @shl9(<32 x i8> %A) nounwind {
define <32 x i8> @shr9(<32 x i8> %A) nounwind {
; X32-LABEL: shr9:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlw $3, %ymm0, %ymm0
-; X32-NEXT: vpand LCPI29_0, %ymm0, %ymm0
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: shr9:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlw $3, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -454,13 +454,13 @@ define <32 x i8> @shr9(<32 x i8> %A) nounwind {
define <32 x i8> @sra_v32i8_7(<32 x i8> %A) nounwind {
; X32-LABEL: sra_v32i8_7:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X32-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sra_v32i8_7:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1
; X64-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -470,16 +470,16 @@ define <32 x i8> @sra_v32i8_7(<32 x i8> %A) nounwind {
define <32 x i8> @sra_v32i8(<32 x i8> %A) nounwind {
; X32-LABEL: sra_v32i8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsrlw $3, %ymm0, %ymm0
-; X32-NEXT: vpand LCPI31_0, %ymm0, %ymm0
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; X32-NEXT: vpxor %ymm1, %ymm0, %ymm0
; X32-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sra_v32i8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsrlw $3, %ymm0, %ymm0
; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
@@ -492,13 +492,13 @@ define <32 x i8> @sra_v32i8(<32 x i8> %A) nounwind {
define <16 x i16> @sext_v16i16(<16 x i16> %a) nounwind {
; X32-LABEL: sext_v16i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $8, %ymm0, %ymm0
; X32-NEXT: vpsraw $8, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext_v16i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $8, %ymm0, %ymm0
; X64-NEXT: vpsraw $8, %ymm0, %ymm0
; X64-NEXT: retq
@@ -509,13 +509,13 @@ define <16 x i16> @sext_v16i16(<16 x i16> %a) nounwind {
define <8 x i32> @sext_v8i32(<8 x i32> %a) nounwind {
; X32-LABEL: sext_v8i32:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpslld $16, %ymm0, %ymm0
; X32-NEXT: vpsrad $16, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: sext_v8i32:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpslld $16, %ymm0, %ymm0
; X64-NEXT: vpsrad $16, %ymm0, %ymm0
; X64-NEXT: retq
@@ -526,24 +526,24 @@ define <8 x i32> @sext_v8i32(<8 x i32> %a) nounwind {
define <8 x i16> @variable_shl16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-LABEL: variable_shl16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: variable_shl16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shl <8 x i16> %lhs, %rhs
@@ -552,24 +552,24 @@ define <8 x i16> @variable_shl16(<8 x i16> %lhs, <8 x i16> %rhs) {
define <8 x i16> @variable_ashr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-LABEL: variable_ashr16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: variable_ashr16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = ashr <8 x i16> %lhs, %rhs
@@ -578,24 +578,24 @@ define <8 x i16> @variable_ashr16(<8 x i16> %lhs, <8 x i16> %rhs) {
define <8 x i16> @variable_lshr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-LABEL: variable_lshr16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: variable_lshr16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = lshr <8 x i16> %lhs, %rhs
diff --git a/test/CodeGen/X86/avx2-vector-shifts.ll b/test/CodeGen/X86/avx2-vector-shifts.ll
index 45a1cd975038..127726ea30da 100644
--- a/test/CodeGen/X86/avx2-vector-shifts.ll
+++ b/test/CodeGen/X86/avx2-vector-shifts.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
; AVX2 Logical Shift Left
define <16 x i16> @test_sllw_1(<16 x i16> %InVec) {
; X32-LABEL: test_sllw_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_sllw_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = shl <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -19,12 +19,12 @@ entry:
define <16 x i16> @test_sllw_2(<16 x i16> %InVec) {
; X32-LABEL: test_sllw_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpaddw %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllw_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpaddw %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -34,12 +34,12 @@ entry:
define <16 x i16> @test_sllw_3(<16 x i16> %InVec) {
; X32-LABEL: test_sllw_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsllw $15, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllw_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsllw $15, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -49,11 +49,11 @@ entry:
define <8 x i32> @test_slld_1(<8 x i32> %InVec) {
; X32-LABEL: test_slld_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_slld_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = shl <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -62,12 +62,12 @@ entry:
define <8 x i32> @test_slld_2(<8 x i32> %InVec) {
; X32-LABEL: test_slld_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_slld_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpaddd %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -77,14 +77,14 @@ entry:
define <8 x i32> @test_vpslld_var(i32 %shift) {
; X32-LABEL: test_vpslld_var:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [192,193,194,195,196,197,198,199]
; X32-NEXT: vpslld %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_vpslld_var:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vmovd %edi, %xmm0
; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [192,193,194,195,196,197,198,199]
; X64-NEXT: vpslld %xmm0, %ymm1, %ymm0
@@ -96,12 +96,12 @@ define <8 x i32> @test_vpslld_var(i32 %shift) {
define <8 x i32> @test_slld_3(<8 x i32> %InVec) {
; X32-LABEL: test_slld_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpslld $31, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_slld_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpslld $31, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -111,11 +111,11 @@ entry:
define <4 x i64> @test_sllq_1(<4 x i64> %InVec) {
; X32-LABEL: test_sllq_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_sllq_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = shl <4 x i64> %InVec, <i64 0, i64 0, i64 0, i64 0>
@@ -124,12 +124,12 @@ entry:
define <4 x i64> @test_sllq_2(<4 x i64> %InVec) {
; X32-LABEL: test_sllq_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllq_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpaddq %ymm0, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -139,12 +139,12 @@ entry:
define <4 x i64> @test_sllq_3(<4 x i64> %InVec) {
; X32-LABEL: test_sllq_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsllq $63, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sllq_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsllq $63, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -156,11 +156,11 @@ entry:
define <16 x i16> @test_sraw_1(<16 x i16> %InVec) {
; X32-LABEL: test_sraw_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_sraw_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = ashr <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -169,12 +169,12 @@ entry:
define <16 x i16> @test_sraw_2(<16 x i16> %InVec) {
; X32-LABEL: test_sraw_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsraw $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sraw_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsraw $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -184,12 +184,12 @@ entry:
define <16 x i16> @test_sraw_3(<16 x i16> %InVec) {
; X32-LABEL: test_sraw_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsraw $15, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_sraw_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsraw $15, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -199,11 +199,11 @@ entry:
define <8 x i32> @test_srad_1(<8 x i32> %InVec) {
; X32-LABEL: test_srad_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srad_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = ashr <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -212,12 +212,12 @@ entry:
define <8 x i32> @test_srad_2(<8 x i32> %InVec) {
; X32-LABEL: test_srad_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrad $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srad_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrad $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -227,12 +227,12 @@ entry:
define <8 x i32> @test_srad_3(<8 x i32> %InVec) {
; X32-LABEL: test_srad_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrad $31, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srad_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrad $31, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -244,11 +244,11 @@ entry:
define <16 x i16> @test_srlw_1(<16 x i16> %InVec) {
; X32-LABEL: test_srlw_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srlw_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = lshr <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
@@ -257,12 +257,12 @@ entry:
define <16 x i16> @test_srlw_2(<16 x i16> %InVec) {
; X32-LABEL: test_srlw_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrlw $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlw_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrlw $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -272,12 +272,12 @@ entry:
define <16 x i16> @test_srlw_3(<16 x i16> %InVec) {
; X32-LABEL: test_srlw_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrlw $15, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlw_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrlw $15, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -287,11 +287,11 @@ entry:
define <8 x i32> @test_srld_1(<8 x i32> %InVec) {
; X32-LABEL: test_srld_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srld_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = lshr <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
@@ -300,12 +300,12 @@ entry:
define <8 x i32> @test_srld_2(<8 x i32> %InVec) {
; X32-LABEL: test_srld_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrld $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srld_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrld $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -315,12 +315,12 @@ entry:
define <8 x i32> @test_srld_3(<8 x i32> %InVec) {
; X32-LABEL: test_srld_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrld $31, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srld_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrld $31, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -330,11 +330,11 @@ entry:
define <4 x i64> @test_srlq_1(<4 x i64> %InVec) {
; X32-LABEL: test_srlq_1:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: retl
;
; X64-LABEL: test_srlq_1:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: retq
entry:
%shl = lshr <4 x i64> %InVec, <i64 0, i64 0, i64 0, i64 0>
@@ -343,12 +343,12 @@ entry:
define <4 x i64> @test_srlq_2(<4 x i64> %InVec) {
; X32-LABEL: test_srlq_2:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrlq $1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlq_2:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrlq $1, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -358,12 +358,12 @@ entry:
define <4 x i64> @test_srlq_3(<4 x i64> %InVec) {
; X32-LABEL: test_srlq_3:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpsrlq $63, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_srlq_3:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpsrlq $63, %ymm0, %ymm0
; X64-NEXT: retq
entry:
@@ -373,17 +373,17 @@ entry:
define <4 x i32> @srl_trunc_and_v4i64(<4 x i32> %x, <4 x i64> %y) nounwind {
; X32-LABEL: srl_trunc_and_v4i64:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; X32-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
-; X32-NEXT: vpbroadcastd LCPI25_0, %xmm2
+; X32-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm2
; X32-NEXT: vpand %xmm2, %xmm1, %xmm1
; X32-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: srl_trunc_and_v4i64:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
; X64-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; X64-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
@@ -403,24 +403,24 @@ define <4 x i32> @srl_trunc_and_v4i64(<4 x i32> %x, <4 x i64> %y) nounwind {
define <8 x i16> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-LABEL: shl_8i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: shl_8i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%shl = shl <8 x i16> %r, %a
@@ -429,7 +429,7 @@ define <8 x i16> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-LABEL: shl_16i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpxor %ymm2, %ymm2, %ymm2
; X32-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -443,7 +443,7 @@ define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shl_16i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpxor %ymm2, %ymm2, %ymm2
; X64-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X64-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -461,13 +461,13 @@ define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-LABEL: shl_32i8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpsllw $4, %ymm0, %ymm2
-; X32-NEXT: vpand LCPI28_0, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpsllw $2, %ymm0, %ymm2
-; X32-NEXT: vpand LCPI28_1, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpaddb %ymm0, %ymm0, %ymm2
@@ -476,7 +476,7 @@ define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: shl_32i8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $5, %ymm1, %ymm1
; X64-NEXT: vpsllw $4, %ymm0, %ymm2
; X64-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
@@ -495,24 +495,24 @@ define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
define <8 x i16> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-LABEL: ashr_8i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovsxwd %xmm0, %ymm0
; X32-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: ashr_8i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovsxwd %xmm0, %ymm0
; X64-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%ashr = ashr <8 x i16> %r, %a
@@ -521,7 +521,7 @@ define <8 x i16> @ashr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-LABEL: ashr_16i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpxor %ymm2, %ymm2, %ymm2
; X32-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -535,7 +535,7 @@ define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: ashr_16i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpxor %ymm2, %ymm2, %ymm2
; X64-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X64-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -553,7 +553,7 @@ define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-LABEL: ashr_32i8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; X32-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -581,7 +581,7 @@ define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: ashr_32i8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $5, %ymm1, %ymm1
; X64-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
; X64-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
@@ -613,24 +613,24 @@ define <32 x i8> @ashr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-LABEL: lshr_8i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: lshr_8i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
; X64-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%lshr = lshr <8 x i16> %r, %a
@@ -639,7 +639,7 @@ define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-LABEL: lshr_16i16:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpxor %ymm2, %ymm2, %ymm2
; X32-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X32-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -653,7 +653,7 @@ define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
; X32-NEXT: retl
;
; X64-LABEL: lshr_16i16:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpxor %ymm2, %ymm2, %ymm2
; X64-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
; X64-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
@@ -671,23 +671,23 @@ define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
define <32 x i8> @lshr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32-LABEL: lshr_32i8:
-; X32: ## BB#0:
+; X32: # BB#0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpsrlw $4, %ymm0, %ymm2
-; X32-NEXT: vpand LCPI34_0, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpsrlw $2, %ymm0, %ymm2
-; X32-NEXT: vpand LCPI34_1, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpsrlw $1, %ymm0, %ymm2
-; X32-NEXT: vpand LCPI34_2, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: lshr_32i8:
-; X64: ## BB#0:
+; X64: # BB#0:
; X64-NEXT: vpsllw $5, %ymm1, %ymm1
; X64-NEXT: vpsrlw $4, %ymm0, %ymm2
; X64-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
diff --git a/test/CodeGen/X86/avx2-vperm.ll b/test/CodeGen/X86/avx2-vperm.ll
index d0e18550f6a8..d57daafab243 100755
--- a/test/CodeGen/X86/avx2-vperm.ll
+++ b/test/CodeGen/X86/avx2-vperm.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define <8 x i32> @perm_cl_int_8x32(<8 x i32> %A) nounwind readnone {
; X32-LABEL: perm_cl_int_8x32:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [0,7,2,1,2,7,6,0]
; X32-NEXT: vpermd %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_int_8x32:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [0,7,2,1,2,7,6,0]
; X64-NEXT: vpermd %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -22,13 +22,13 @@ entry:
define <8 x float> @perm_cl_fp_8x32(<8 x float> %A) nounwind readnone {
; X32-LABEL: perm_cl_fp_8x32:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vmovaps {{.*#+}} ymm1 = <u,7,2,u,4,u,1,6>
; X32-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_fp_8x32:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vmovaps {{.*#+}} ymm1 = <u,7,2,u,4,u,1,6>
; X64-NEXT: vpermps %ymm0, %ymm1, %ymm0
; X64-NEXT: retq
@@ -39,12 +39,12 @@ entry:
define <4 x i64> @perm_cl_int_4x64(<4 x i64> %A) nounwind readnone {
; X32-LABEL: perm_cl_int_4x64:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_int_4x64:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X64-NEXT: retq
entry:
@@ -54,12 +54,12 @@ entry:
define <4 x double> @perm_cl_fp_4x64(<4 x double> %A) nounwind readnone {
; X32-LABEL: perm_cl_fp_4x64:
-; X32: ## BB#0: ## %entry
+; X32: # BB#0: # %entry
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X32-NEXT: retl
;
; X64-LABEL: perm_cl_fp_4x64:
-; X64: ## BB#0: ## %entry
+; X64: # BB#0: # %entry
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,1]
; X64-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/avx512-arith.ll b/test/CodeGen/X86/avx512-arith.ll
index d96b5882556d..7c0f145bb717 100644
--- a/test/CodeGen/X86/avx512-arith.ll
+++ b/test/CodeGen/X86/avx512-arith.ll
@@ -1,13 +1,13 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f | FileCheck --check-prefix=CHECK --check-prefix=AVX512F %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512vl | FileCheck --check-prefix=CHECK --check-prefix=AVX512VL %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512bw | FileCheck --check-prefix=CHECK --check-prefix=AVX512BW %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512dq | FileCheck --check-prefix=CHECK --check-prefix=AVX512DQ %s
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512dq -mattr=+avx512bw -mattr=+avx512vl| FileCheck --check-prefix=CHECK --check-prefix=SKX %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512DQ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq,+avx512bw,+avx512vl | FileCheck %s --check-prefix=CHECK --check-prefix=SKX
define <8 x double> @addpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: addpd512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -17,7 +17,7 @@ entry:
define <8 x double> @addpd512fold(<8 x double> %y) {
; CHECK-LABEL: addpd512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vaddpd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -27,7 +27,7 @@ entry:
define <16 x float> @addps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: addps512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -37,7 +37,7 @@ entry:
define <16 x float> @addps512fold(<16 x float> %y) {
; CHECK-LABEL: addps512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vaddps {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -47,7 +47,7 @@ entry:
define <8 x double> @subpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: subpd512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vsubpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -57,7 +57,7 @@ entry:
define <8 x double> @subpd512fold(<8 x double> %y, <8 x double>* %x) {
; CHECK-LABEL: subpd512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vsubpd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -68,7 +68,7 @@ entry:
define <16 x float> @subps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: subps512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vsubps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -78,7 +78,7 @@ entry:
define <16 x float> @subps512fold(<16 x float> %y, <16 x float>* %x) {
; CHECK-LABEL: subps512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vsubps (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -89,7 +89,7 @@ entry:
define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
; AVX512F-LABEL: imulq512:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vpsrlq $32, %zmm1, %zmm2
; AVX512F-NEXT: vpmuludq %zmm0, %zmm2, %zmm2
; AVX512F-NEXT: vpsrlq $32, %zmm0, %zmm3
@@ -101,7 +101,7 @@ define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: imulq512:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrlq $32, %zmm1, %zmm2
; AVX512VL-NEXT: vpmuludq %zmm0, %zmm2, %zmm2
; AVX512VL-NEXT: vpsrlq $32, %zmm0, %zmm3
@@ -113,7 +113,7 @@ define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: imulq512:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpsrlq $32, %zmm1, %zmm2
; AVX512BW-NEXT: vpmuludq %zmm0, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlq $32, %zmm0, %zmm3
@@ -125,12 +125,12 @@ define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: imulq512:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq512:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vpmullq %zmm0, %zmm1, %zmm0
; SKX-NEXT: retq
%z = mul <8 x i64>%x, %y
@@ -139,7 +139,7 @@ define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) {
define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; AVX512F-LABEL: imulq256:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vpsrlq $32, %ymm1, %ymm2
; AVX512F-NEXT: vpmuludq %ymm0, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlq $32, %ymm0, %ymm3
@@ -151,7 +151,7 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: imulq256:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrlq $32, %ymm1, %ymm2
; AVX512VL-NEXT: vpmuludq %ymm0, %ymm2, %ymm2
; AVX512VL-NEXT: vpsrlq $32, %ymm0, %ymm3
@@ -163,7 +163,7 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: imulq256:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpsrlq $32, %ymm1, %ymm2
; AVX512BW-NEXT: vpmuludq %ymm0, %ymm2, %ymm2
; AVX512BW-NEXT: vpsrlq $32, %ymm0, %ymm3
@@ -175,15 +175,15 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: imulq256:
-; AVX512DQ: ## BB#0:
-; AVX512DQ-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512DQ-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq256:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vpmullq %ymm0, %ymm1, %ymm0
; SKX-NEXT: retq
%z = mul <4 x i64>%x, %y
@@ -192,7 +192,7 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512F-LABEL: imulq128:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512F-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
; AVX512F-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -204,7 +204,7 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: imulq128:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512VL-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
; AVX512VL-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -216,7 +216,7 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: imulq128:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpsrlq $32, %xmm1, %xmm2
; AVX512BW-NEXT: vpmuludq %xmm0, %xmm2, %xmm2
; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm3
@@ -228,16 +228,16 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: imulq128:
-; AVX512DQ: ## BB#0:
-; AVX512DQ-NEXT: ## kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq128:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vpmullq %xmm0, %xmm1, %xmm0
; SKX-NEXT: retq
%z = mul <2 x i64>%x, %y
@@ -246,7 +246,7 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
define <8 x double> @mulpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: mulpd512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vmulpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -256,7 +256,7 @@ entry:
define <8 x double> @mulpd512fold(<8 x double> %y) {
; CHECK-LABEL: mulpd512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vmulpd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -266,7 +266,7 @@ entry:
define <16 x float> @mulps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: mulps512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vmulps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -276,7 +276,7 @@ entry:
define <16 x float> @mulps512fold(<16 x float> %y) {
; CHECK-LABEL: mulps512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -286,7 +286,7 @@ entry:
define <8 x double> @divpd512(<8 x double> %y, <8 x double> %x) {
; CHECK-LABEL: divpd512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vdivpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -296,7 +296,7 @@ entry:
define <8 x double> @divpd512fold(<8 x double> %y) {
; CHECK-LABEL: divpd512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vdivpd {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -306,7 +306,7 @@ entry:
define <16 x float> @divps512(<16 x float> %y, <16 x float> %x) {
; CHECK-LABEL: divps512:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vdivps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
entry:
@@ -316,7 +316,7 @@ entry:
define <16 x float> @divps512fold(<16 x float> %y) {
; CHECK-LABEL: divps512fold:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vdivps {{.*}}(%rip), %zmm0, %zmm0
; CHECK-NEXT: retq
entry:
@@ -326,7 +326,7 @@ entry:
define <8 x i64> @vpaddq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
; CHECK-LABEL: vpaddq_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <8 x i64> %i, %j
@@ -335,7 +335,7 @@ define <8 x i64> @vpaddq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
define <8 x i64> @vpaddq_fold_test(<8 x i64> %i, <8 x i64>* %j) nounwind {
; CHECK-LABEL: vpaddq_fold_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddq (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load <8 x i64>, <8 x i64>* %j, align 4
@@ -345,7 +345,7 @@ define <8 x i64> @vpaddq_fold_test(<8 x i64> %i, <8 x i64>* %j) nounwind {
define <8 x i64> @vpaddq_broadcast_test(<8 x i64> %i) nounwind {
; CHECK-LABEL: vpaddq_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <8 x i64> %i, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -354,7 +354,7 @@ define <8 x i64> @vpaddq_broadcast_test(<8 x i64> %i) nounwind {
define <8 x i64> @vpaddq_broadcast2_test(<8 x i64> %i, i64* %j) nounwind {
; CHECK-LABEL: vpaddq_broadcast2_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddq (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load i64, i64* %j
@@ -372,7 +372,7 @@ define <8 x i64> @vpaddq_broadcast2_test(<8 x i64> %i, i64* %j) nounwind {
define <16 x i32> @vpaddd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpaddd_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <16 x i32> %i, %j
@@ -381,7 +381,7 @@ define <16 x i32> @vpaddd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
define <16 x i32> @vpaddd_fold_test(<16 x i32> %i, <16 x i32>* %j) nounwind {
; CHECK-LABEL: vpaddd_fold_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load <16 x i32>, <16 x i32>* %j, align 4
@@ -391,7 +391,7 @@ define <16 x i32> @vpaddd_fold_test(<16 x i32> %i, <16 x i32>* %j) nounwind {
define <16 x i32> @vpaddd_broadcast_test(<16 x i32> %i) nounwind {
; CHECK-LABEL: vpaddd_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = add <16 x i32> %i, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
@@ -400,7 +400,7 @@ define <16 x i32> @vpaddd_broadcast_test(<16 x i32> %i) nounwind {
define <16 x i32> @vpaddd_mask_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_mask_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm3, %zmm3, %zmm3
; CHECK-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1}
@@ -413,7 +413,7 @@ define <16 x i32> @vpaddd_mask_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %ma
define <16 x i32> @vpaddd_maskz_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_maskz_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm3, %zmm3, %zmm3
; CHECK-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1} {z}
@@ -426,7 +426,7 @@ define <16 x i32> @vpaddd_maskz_test(<16 x i32> %i, <16 x i32> %j, <16 x i32> %m
define <16 x i32> @vpaddd_mask_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_mask_fold_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1}
@@ -440,7 +440,7 @@ define <16 x i32> @vpaddd_mask_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16
define <16 x i32> @vpaddd_mask_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_mask_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1}
@@ -453,7 +453,7 @@ define <16 x i32> @vpaddd_mask_broadcast_test(<16 x i32> %i, <16 x i32> %mask1)
define <16 x i32> @vpaddd_maskz_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_maskz_fold_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z}
@@ -467,7 +467,7 @@ define <16 x i32> @vpaddd_maskz_fold_test(<16 x i32> %i, <16 x i32>* %j.ptr, <16
define <16 x i32> @vpaddd_maskz_broadcast_test(<16 x i32> %i, <16 x i32> %mask1) nounwind readnone {
; CHECK-LABEL: vpaddd_maskz_broadcast_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} {z}
@@ -480,7 +480,7 @@ define <16 x i32> @vpaddd_maskz_broadcast_test(<16 x i32> %i, <16 x i32> %mask1)
define <8 x i64> @vpsubq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
; CHECK-LABEL: vpsubq_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = sub <8 x i64> %i, %j
@@ -489,7 +489,7 @@ define <8 x i64> @vpsubq_test(<8 x i64> %i, <8 x i64> %j) nounwind readnone {
define <16 x i32> @vpsubd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
; CHECK-LABEL: vpsubd_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = sub <16 x i32> %i, %j
@@ -498,7 +498,7 @@ define <16 x i32> @vpsubd_test(<16 x i32> %i, <16 x i32> %j) nounwind readnone {
define <16 x i32> @vpmulld_test(<16 x i32> %i, <16 x i32> %j) {
; CHECK-LABEL: vpmulld_test:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpmulld %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%x = mul <16 x i32> %i, %j
@@ -508,7 +508,7 @@ define <16 x i32> @vpmulld_test(<16 x i32> %i, <16 x i32> %j) {
declare float @sqrtf(float) readnone
define float @sqrtA(float %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: sqrtA:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -519,7 +519,7 @@ entry:
declare double @sqrt(double) readnone
define double @sqrtB(double %a) nounwind uwtable readnone ssp {
; CHECK-LABEL: sqrtB:
-; CHECK: ## BB#0: ## %entry
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
entry:
@@ -530,7 +530,7 @@ entry:
declare float @llvm.sqrt.f32(float)
define float @sqrtC(float %a) nounwind {
; CHECK-LABEL: sqrtC:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retq
%b = call float @llvm.sqrt.f32(float %a)
@@ -540,7 +540,7 @@ define float @sqrtC(float %a) nounwind {
declare <16 x float> @llvm.sqrt.v16f32(<16 x float>)
define <16 x float> @sqrtD(<16 x float> %a) nounwind {
; CHECK-LABEL: sqrtD:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vsqrtps %zmm0, %zmm0
; CHECK-NEXT: retq
%b = call <16 x float> @llvm.sqrt.v16f32(<16 x float> %a)
@@ -550,7 +550,7 @@ define <16 x float> @sqrtD(<16 x float> %a) nounwind {
declare <8 x double> @llvm.sqrt.v8f64(<8 x double>)
define <8 x double> @sqrtE(<8 x double> %a) nounwind {
; CHECK-LABEL: sqrtE:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vsqrtpd %zmm0, %zmm0
; CHECK-NEXT: retq
%b = call <8 x double> @llvm.sqrt.v8f64(<8 x double> %a)
@@ -559,7 +559,7 @@ define <8 x double> @sqrtE(<8 x double> %a) nounwind {
define <16 x float> @fadd_broadcast(<16 x float> %a) nounwind {
; CHECK-LABEL: fadd_broadcast:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = fadd <16 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000>
@@ -568,7 +568,7 @@ define <16 x float> @fadd_broadcast(<16 x float> %a) nounwind {
define <8 x i64> @addq_broadcast(<8 x i64> %a) nounwind {
; CHECK-LABEL: addq_broadcast:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = add <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -577,27 +577,27 @@ define <8 x i64> @addq_broadcast(<8 x i64> %a) nounwind {
define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
; AVX512F-LABEL: orq_broadcast:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: orq_broadcast:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: orq_broadcast:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: orq_broadcast:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: orq_broadcast:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vorpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
%b = or <8 x i64> %a, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
@@ -606,27 +606,27 @@ define <8 x i64> @orq_broadcast(<8 x i64> %a) nounwind {
define <16 x i32> @andd512fold(<16 x i32> %y, <16 x i32>* %x) {
; AVX512F-LABEL: andd512fold:
-; AVX512F: ## BB#0: ## %entry
+; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: vpandd (%rdi), %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: andd512fold:
-; AVX512VL: ## BB#0: ## %entry
+; AVX512VL: # BB#0: # %entry
; AVX512VL-NEXT: vpandd (%rdi), %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: andd512fold:
-; AVX512BW: ## BB#0: ## %entry
+; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: vpandd (%rdi), %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: andd512fold:
-; AVX512DQ: ## BB#0: ## %entry
+; AVX512DQ: # BB#0: # %entry
; AVX512DQ-NEXT: vandps (%rdi), %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: andd512fold:
-; SKX: ## BB#0: ## %entry
+; SKX: # BB#0: # %entry
; SKX-NEXT: vandps (%rdi), %zmm0, %zmm0
; SKX-NEXT: retq
entry:
@@ -637,27 +637,27 @@ entry:
define <8 x i64> @andqbrst(<8 x i64> %p1, i64* %ap) {
; AVX512F-LABEL: andqbrst:
-; AVX512F: ## BB#0: ## %entry
+; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: andqbrst:
-; AVX512VL: ## BB#0: ## %entry
+; AVX512VL: # BB#0: # %entry
; AVX512VL-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: andqbrst:
-; AVX512BW: ## BB#0: ## %entry
+; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: vpandq (%rdi){1to8}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: andqbrst:
-; AVX512DQ: ## BB#0: ## %entry
+; AVX512DQ: # BB#0: # %entry
; AVX512DQ-NEXT: vandpd (%rdi){1to8}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: andqbrst:
-; SKX: ## BB#0: ## %entry
+; SKX: # BB#0: # %entry
; SKX-NEXT: vandpd (%rdi){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
entry:
@@ -670,7 +670,7 @@ entry:
define <16 x float> @test_mask_vaddps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vaddps:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm0 {%k1}
@@ -685,7 +685,7 @@ define <16 x float> @test_mask_vaddps(<16 x float> %dst, <16 x float> %i,
define <16 x float> @test_mask_vmulps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vmulps:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vmulps %zmm2, %zmm1, %zmm0 {%k1}
@@ -700,7 +700,7 @@ define <16 x float> @test_mask_vmulps(<16 x float> %dst, <16 x float> %i,
define <16 x float> @test_mask_vminps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vminps:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vminps %zmm2, %zmm1, %zmm0 {%k1}
@@ -716,38 +716,38 @@ define <16 x float> @test_mask_vminps(<16 x float> %dst, <16 x float> %i,
define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vminpd:
-; AVX512F: ## BB#0:
-; AVX512F-NEXT: ## kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512F: # BB#0:
+; AVX512F-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
; AVX512F-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512F-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512F-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_mask_vminpd:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512VL-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; AVX512VL-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_mask_vminpd:
-; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: ## kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
; AVX512BW-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512BW-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512BW-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_mask_vminpd:
-; AVX512DQ: ## BB#0:
-; AVX512DQ-NEXT: ## kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512DQ-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512DQ-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_mask_vminpd:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vpxor %ymm4, %ymm4, %ymm4
; SKX-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; SKX-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -763,7 +763,7 @@ define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
define <16 x float> @test_mask_vmaxps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vmaxps:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vmaxps %zmm2, %zmm1, %zmm0 {%k1}
@@ -779,38 +779,38 @@ define <16 x float> @test_mask_vmaxps(<16 x float> %dst, <16 x float> %i,
define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vmaxpd:
-; AVX512F: ## BB#0:
-; AVX512F-NEXT: ## kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512F: # BB#0:
+; AVX512F-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
; AVX512F-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512F-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512F-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_mask_vmaxpd:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512VL-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; AVX512VL-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_mask_vmaxpd:
-; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: ## kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
; AVX512BW-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512BW-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512BW-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_mask_vmaxpd:
-; AVX512DQ: ## BB#0:
-; AVX512DQ-NEXT: ## kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX512DQ-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512DQ-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_mask_vmaxpd:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vpxor %ymm4, %ymm4, %ymm4
; SKX-NEXT: vpcmpneqd %ymm4, %ymm3, %k1
; SKX-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -826,7 +826,7 @@ define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
define <16 x float> @test_mask_vsubps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vsubps:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vsubps %zmm2, %zmm1, %zmm0 {%k1}
@@ -841,7 +841,7 @@ define <16 x float> @test_mask_vsubps(<16 x float> %dst, <16 x float> %i,
define <16 x float> @test_mask_vdivps(<16 x float> %dst, <16 x float> %i,
; CHECK-LABEL: test_mask_vdivps:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; CHECK-NEXT: vdivps %zmm2, %zmm1, %zmm0 {%k1}
@@ -856,7 +856,7 @@ define <16 x float> @test_mask_vdivps(<16 x float> %dst, <16 x float> %i,
define <8 x double> @test_mask_vaddpd(<8 x double> %dst, <8 x double> %i,
; CHECK-LABEL: test_mask_vaddpd:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vpcmpneqq %zmm4, %zmm3, %k1
; CHECK-NEXT: vaddpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -871,7 +871,7 @@ define <8 x double> @test_mask_vaddpd(<8 x double> %dst, <8 x double> %i,
define <8 x double> @test_maskz_vaddpd(<8 x double> %i, <8 x double> %j,
; CHECK-LABEL: test_maskz_vaddpd:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm3, %zmm3, %zmm3
; CHECK-NEXT: vpcmpneqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0 {%k1} {z}
@@ -885,7 +885,7 @@ define <8 x double> @test_maskz_vaddpd(<8 x double> %i, <8 x double> %j,
define <8 x double> @test_mask_fold_vaddpd(<8 x double> %dst, <8 x double> %i,
; CHECK-LABEL: test_mask_fold_vaddpd:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm3, %zmm3, %zmm3
; CHECK-NEXT: vpcmpneqq %zmm3, %zmm2, %k1
; CHECK-NEXT: vaddpd (%rdi), %zmm1, %zmm0 {%k1}
@@ -901,7 +901,7 @@ define <8 x double> @test_mask_fold_vaddpd(<8 x double> %dst, <8 x double> %i,
define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j,
; CHECK-LABEL: test_maskz_fold_vaddpd:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpcmpneqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vaddpd (%rdi), %zmm0, %zmm0 {%k1} {z}
@@ -916,7 +916,7 @@ define <8 x double> @test_maskz_fold_vaddpd(<8 x double> %i, <8 x double>* %j,
define <8 x double> @test_broadcast_vaddpd(<8 x double> %i, double* %j) nounwind {
; CHECK-LABEL: test_broadcast_vaddpd:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vaddpd (%rdi){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
%tmp = load double, double* %j
@@ -929,7 +929,7 @@ define <8 x double> @test_broadcast_vaddpd(<8 x double> %i, double* %j) nounwind
define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double> %i,
; CHECK-LABEL: test_mask_broadcast_vaddpd:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm0, %zmm0, %zmm0
; CHECK-NEXT: vpcmpneqq %zmm0, %zmm2, %k1
; CHECK-NEXT: vaddpd (%rdi){1to8}, %zmm1, %zmm1 {%k1}
@@ -948,7 +948,7 @@ define <8 x double> @test_mask_broadcast_vaddpd(<8 x double> %dst, <8 x double>
define <8 x double> @test_maskz_broadcast_vaddpd(<8 x double> %i, double* %j,
; CHECK-LABEL: test_maskz_broadcast_vaddpd:
-; CHECK: ## BB#0:
+; CHECK: # BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpcmpneqq %zmm2, %zmm1, %k1
; CHECK-NEXT: vaddpd (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
@@ -966,27 +966,27 @@ define <8 x double> @test_maskz_broadcast_vaddpd(<8 x double> %i, double* %j,
define <16 x float> @test_fxor(<16 x float> %a) {
; AVX512F-LABEL: test_fxor:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_fxor:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_fxor:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_fxor:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_fxor:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; SKX-NEXT: retq
@@ -996,30 +996,30 @@ define <16 x float> @test_fxor(<16 x float> %a) {
define <8 x float> @test_fxor_8f32(<8 x float> %a) {
; AVX512F-LABEL: test_fxor_8f32:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vbroadcastss {{.*}}(%rip), %ymm1
; AVX512F-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: test_fxor_8f32:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpxord {{.*}}(%rip){1to8}, %ymm0, %ymm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: test_fxor_8f32:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vbroadcastss {{.*}}(%rip), %ymm1
; AVX512BW-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_fxor_8f32:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vbroadcastss {{.*}}(%rip), %ymm1
; AVX512DQ-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: test_fxor_8f32:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vxorps {{.*}}(%rip){1to8}, %ymm0, %ymm0
; SKX-NEXT: retq
%res = fsub <8 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
@@ -1028,27 +1028,27 @@ define <8 x float> @test_fxor_8f32(<8 x float> %a) {
define <8 x double> @fabs_v8f64(<8 x double> %p)
; AVX512F-LABEL: fabs_v8f64:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fabs_v8f64:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: fabs_v8f64:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: fabs_v8f64:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: fabs_v8f64:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
; SKX-NEXT: retq
{
@@ -1059,27 +1059,27 @@ declare <8 x double> @llvm.fabs.v8f64(<8 x double> %p)
define <16 x float> @fabs_v16f32(<16 x float> %p)
; AVX512F-LABEL: fabs_v16f32:
-; AVX512F: ## BB#0:
+; AVX512F: # BB#0:
; AVX512F-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fabs_v16f32:
-; AVX512VL: ## BB#0:
+; AVX512VL: # BB#0:
; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512VL-NEXT: retq
;
; AVX512BW-LABEL: fabs_v16f32:
-; AVX512BW: ## BB#0:
+; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: fabs_v16f32:
-; AVX512DQ: ## BB#0:
+; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: fabs_v16f32:
-; SKX: ## BB#0:
+; SKX: # BB#0:
; SKX-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0
; SKX-NEXT: retq
{
diff --git a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
index 300cb51f871c..edcc3933bc39 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
@@ -1004,8 +1004,6 @@ define i8 @test_pcmpeq_q_256(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: test_pcmpeq_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1018,8 +1016,6 @@ define i8 @test_mask_pcmpeq_q_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1058,8 +1054,6 @@ define i8 @test_pcmpgt_q_256(<4 x i64> %a, <4 x i64> %b) {
; CHECK-LABEL: test_pcmpgt_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x37,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1072,8 +1066,6 @@ define i8 @test_mask_pcmpgt_q_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x37,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1087,8 +1079,6 @@ define i8 @test_pcmpeq_d_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_pcmpeq_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1101,8 +1091,6 @@ define i8 @test_mask_pcmpeq_d_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x76,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1116,10 +1104,6 @@ define i8 @test_pcmpeq_q_128(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test_pcmpeq_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1132,10 +1116,6 @@ define i8 @test_mask_pcmpeq_q_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1149,8 +1129,6 @@ define i8 @test_pcmpgt_d_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: test_pcmpgt_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x66,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1163,8 +1141,6 @@ define i8 @test_mask_pcmpgt_d_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x66,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1178,10 +1154,6 @@ define i8 @test_pcmpgt_q_128(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test_pcmpgt_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x37,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -1194,10 +1166,6 @@ define i8 @test_mask_pcmpgt_q_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK: ## BB#0:
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x37,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
@@ -5164,23 +5132,11 @@ define <8 x i8> @test_cmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_cmp_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: vpcmpgtq %ymm0, %ymm1, %k1 ## encoding: [0x62,0xf2,0xf5,0x28,0x37,0xc8]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
; CHECK-NEXT: vpcmpleq %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xd1,0x02]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xd9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k4 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xe0,0x02]
-; CHECK-NEXT: kshiftlw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0c]
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf2,0xfd,0x28,0x37,0xe9]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc4,0xc0,0x00]
@@ -5219,43 +5175,31 @@ define <8 x i8> @test_cmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
define <8 x i8> @test_mask_cmp_q_256(<4 x i64> %a0, <4 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_q_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
-; CHECK-NEXT: vpcmpgtq %ymm0, %ymm1, %k2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x29,0x37,0xd0]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
-; CHECK-NEXT: vpcmpleq %ymm1, %ymm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1f,0xd9,0x02]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
-; CHECK-NEXT: kxorw %k0, %k0, %k4 ## encoding: [0xc5,0xfc,0x47,0xe0]
-; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1f,0xe9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
-; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k6 {%k1} ## encoding: [0x62,0xf3,0xf5,0x29,0x1f,0xf0,0x02]
+; CHECK-NEXT: kmovw %edi, %k6 ## encoding: [0xc5,0xf8,0x92,0xf7]
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k6} ## encoding: [0x62,0xf2,0xfd,0x2e,0x29,0xc1]
+; CHECK-NEXT: vpcmpgtq %ymm0, %ymm1, %k7 {%k6} ## encoding: [0x62,0xf2,0xf5,0x2e,0x37,0xf8]
+; CHECK-NEXT: vpcmpleq %ymm1, %ymm0, %k1 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1f,0xc9,0x02]
+; CHECK-NEXT: kxorw %k0, %k0, %k2 ## encoding: [0xc5,0xfc,0x47,0xd0]
+; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k3 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1f,0xd9,0x04]
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k4 {%k6} ## encoding: [0x62,0xf3,0xf5,0x2e,0x1f,0xe0,0x02]
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k5 {%k6} ## encoding: [0x62,0xf2,0xfd,0x2e,0x37,0xe9]
; CHECK-NEXT: kshiftlw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0c]
; CHECK-NEXT: kshiftrw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0c]
-; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k7 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x37,0xf9]
-; CHECK-NEXT: kshiftlw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0c]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 0, i8 %mask)
@@ -5283,23 +5227,11 @@ define <8 x i8> @test_ucmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; CHECK-LABEL: test_ucmp_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xc9,0x01]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
; CHECK-NEXT: vpcmpleuq %ymm1, %ymm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xd1,0x02]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x28,0x1f,0xd9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
; CHECK-NEXT: vpcmpnltuq %ymm1, %ymm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xe1,0x05]
-; CHECK-NEXT: kshiftlw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0c]
; CHECK-NEXT: vpcmpnleuq %ymm1, %ymm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x28,0x1e,0xe9,0x06]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc4,0xc0,0x00]
@@ -5338,43 +5270,31 @@ define <8 x i8> @test_ucmp_q_256(<4 x i64> %a0, <4 x i64> %a1) {
define <8 x i8> @test_mask_ucmp_q_256(<4 x i64> %a0, <4 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_q_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
-; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xd1,0x01]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
-; CHECK-NEXT: vpcmpleuq %ymm1, %ymm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xd9,0x02]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
-; CHECK-NEXT: kxorw %k0, %k0, %k4 ## encoding: [0xc5,0xfc,0x47,0xe0]
-; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1f,0xe9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
-; CHECK-NEXT: vpcmpnltuq %ymm1, %ymm0, %k6 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xf1,0x05]
+; CHECK-NEXT: kmovw %edi, %k6 ## encoding: [0xc5,0xf8,0x92,0xf7]
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k6} ## encoding: [0x62,0xf2,0xfd,0x2e,0x29,0xc1]
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k7 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1e,0xf9,0x01]
+; CHECK-NEXT: vpcmpleuq %ymm1, %ymm0, %k1 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1e,0xc9,0x02]
+; CHECK-NEXT: kxorw %k0, %k0, %k2 ## encoding: [0xc5,0xfc,0x47,0xd0]
+; CHECK-NEXT: vpcmpneqq %ymm1, %ymm0, %k3 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1f,0xd9,0x04]
+; CHECK-NEXT: vpcmpnltuq %ymm1, %ymm0, %k4 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1e,0xe1,0x05]
+; CHECK-NEXT: vpcmpnleuq %ymm1, %ymm0, %k5 {%k6} ## encoding: [0x62,0xf3,0xfd,0x2e,0x1e,0xe9,0x06]
; CHECK-NEXT: kshiftlw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0c]
; CHECK-NEXT: kshiftrw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0c]
-; CHECK-NEXT: vpcmpnleuq %ymm1, %ymm0, %k7 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x1e,0xf9,0x06]
-; CHECK-NEXT: kshiftlw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0c]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.256(<4 x i64> %a0, <4 x i64> %a1, i32 0, i8 %mask)
@@ -5402,23 +5322,11 @@ define <8 x i8> @test_cmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_cmp_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: vpcmpgtd %xmm0, %xmm1, %k1 ## encoding: [0x62,0xf1,0x75,0x08,0x66,0xc8]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
; CHECK-NEXT: vpcmpled %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xd1,0x02]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xd9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k4 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xe0,0x02]
-; CHECK-NEXT: kshiftlw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0c]
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf1,0x7d,0x08,0x66,0xe9]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc4,0xc0,0x00]
@@ -5457,43 +5365,31 @@ define <8 x i8> @test_cmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
define <8 x i8> @test_mask_cmp_d_128(<4 x i32> %a0, <4 x i32> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_d_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x76,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
-; CHECK-NEXT: vpcmpgtd %xmm0, %xmm1, %k2 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x66,0xd0]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
-; CHECK-NEXT: vpcmpled %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1f,0xd9,0x02]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
-; CHECK-NEXT: kxorw %k0, %k0, %k4 ## encoding: [0xc5,0xfc,0x47,0xe0]
-; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1f,0xe9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
-; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k6 {%k1} ## encoding: [0x62,0xf3,0x75,0x09,0x1f,0xf0,0x02]
+; CHECK-NEXT: kmovw %edi, %k6 ## encoding: [0xc5,0xf8,0x92,0xf7]
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k6} ## encoding: [0x62,0xf1,0x7d,0x0e,0x76,0xc1]
+; CHECK-NEXT: vpcmpgtd %xmm0, %xmm1, %k7 {%k6} ## encoding: [0x62,0xf1,0x75,0x0e,0x66,0xf8]
+; CHECK-NEXT: vpcmpled %xmm1, %xmm0, %k1 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1f,0xc9,0x02]
+; CHECK-NEXT: kxorw %k0, %k0, %k2 ## encoding: [0xc5,0xfc,0x47,0xd0]
+; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k3 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1f,0xd9,0x04]
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k4 {%k6} ## encoding: [0x62,0xf3,0x75,0x0e,0x1f,0xe0,0x02]
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k5 {%k6} ## encoding: [0x62,0xf1,0x7d,0x0e,0x66,0xe9]
; CHECK-NEXT: kshiftlw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0c]
; CHECK-NEXT: kshiftrw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0c]
-; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x66,0xf9]
-; CHECK-NEXT: kshiftlw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0c]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 0, i8 %mask)
@@ -5521,23 +5417,11 @@ define <8 x i8> @test_ucmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_ucmp_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xc9,0x01]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
; CHECK-NEXT: vpcmpleud %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xd1,0x02]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0x7d,0x08,0x1f,0xd9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
; CHECK-NEXT: vpcmpnltud %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xe1,0x05]
-; CHECK-NEXT: kshiftlw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0c]
; CHECK-NEXT: vpcmpnleud %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0x7d,0x08,0x1e,0xe9,0x06]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc4,0xc0,0x00]
@@ -5576,43 +5460,31 @@ define <8 x i8> @test_ucmp_d_128(<4 x i32> %a0, <4 x i32> %a1) {
define <8 x i8> @test_mask_ucmp_d_128(<4 x i32> %a0, <4 x i32> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_d_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x76,0xc1]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
-; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xd1,0x01]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
-; CHECK-NEXT: vpcmpleud %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xd9,0x02]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
-; CHECK-NEXT: kxorw %k0, %k0, %k4 ## encoding: [0xc5,0xfc,0x47,0xe0]
-; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1f,0xe9,0x04]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
-; CHECK-NEXT: vpcmpnltud %xmm1, %xmm0, %k6 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xf1,0x05]
+; CHECK-NEXT: kmovw %edi, %k6 ## encoding: [0xc5,0xf8,0x92,0xf7]
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k6} ## encoding: [0x62,0xf1,0x7d,0x0e,0x76,0xc1]
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k7 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1e,0xf9,0x01]
+; CHECK-NEXT: vpcmpleud %xmm1, %xmm0, %k1 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1e,0xc9,0x02]
+; CHECK-NEXT: kxorw %k0, %k0, %k2 ## encoding: [0xc5,0xfc,0x47,0xd0]
+; CHECK-NEXT: vpcmpneqd %xmm1, %xmm0, %k3 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1f,0xd9,0x04]
+; CHECK-NEXT: vpcmpnltud %xmm1, %xmm0, %k4 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1e,0xe1,0x05]
+; CHECK-NEXT: vpcmpnleud %xmm1, %xmm0, %k5 {%k6} ## encoding: [0x62,0xf3,0x7d,0x0e,0x1e,0xe9,0x06]
; CHECK-NEXT: kshiftlw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0c]
; CHECK-NEXT: kshiftrw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0c]
-; CHECK-NEXT: vpcmpnleud %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x1e,0xf9,0x06]
-; CHECK-NEXT: kshiftlw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0c]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.d.128(<4 x i32> %a0, <4 x i32> %a1, i32 0, i8 %mask)
@@ -5640,35 +5512,11 @@ define <8 x i8> @test_cmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_cmp_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: vpcmpgtq %xmm0, %xmm1, %k1 ## encoding: [0x62,0xf2,0xf5,0x08,0x37,0xc8]
-; CHECK-NEXT: kshiftlw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
; CHECK-NEXT: vpcmpleq %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xd1,0x02]
-; CHECK-NEXT: kshiftlw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xd9,0x04]
-; CHECK-NEXT: kshiftlw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k4 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xe0,0x02]
-; CHECK-NEXT: kshiftlw $14, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0c]
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf2,0xfd,0x08,0x37,0xe9]
-; CHECK-NEXT: kshiftlw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc4,0xc0,0x00]
@@ -5707,57 +5555,33 @@ define <8 x i8> @test_cmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
define <8 x i8> @test_mask_cmp_q_128(<2 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_cmp_q_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
-; CHECK-NEXT: vpcmpgtq %xmm0, %xmm1, %k2 {%k1} ## encoding: [0x62,0xf2,0xf5,0x09,0x37,0xd0]
-; CHECK-NEXT: kshiftlw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
-; CHECK-NEXT: vpcmpleq %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1f,0xd9,0x02]
-; CHECK-NEXT: kshiftlw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
-; CHECK-NEXT: kxorw %k0, %k0, %k4 ## encoding: [0xc5,0xfc,0x47,0xe0]
-; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1f,0xe9,0x04]
-; CHECK-NEXT: kshiftlw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
-; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k6 {%k1} ## encoding: [0x62,0xf3,0xf5,0x09,0x1f,0xf0,0x02]
-; CHECK-NEXT: kshiftlw $14, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0c]
-; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x37,0xf9]
+; CHECK-NEXT: kmovw %edi, %k7 ## encoding: [0xc5,0xf8,0x92,0xff]
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k7} ## encoding: [0x62,0xf2,0xfd,0x0f,0x29,0xc1]
+; CHECK-NEXT: vpcmpgtq %xmm0, %xmm1, %k6 {%k7} ## encoding: [0x62,0xf2,0xf5,0x0f,0x37,0xf0]
+; CHECK-NEXT: vpcmpleq %xmm1, %xmm0, %k1 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1f,0xc9,0x02]
+; CHECK-NEXT: kxorw %k0, %k0, %k2 ## encoding: [0xc5,0xfc,0x47,0xd0]
+; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k3 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1f,0xd9,0x04]
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k4 {%k7} ## encoding: [0x62,0xf3,0xf5,0x0f,0x1f,0xe0,0x02]
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k5 {%k7} ## encoding: [0x62,0xf2,0xfd,0x0f,0x37,0xe9]
; CHECK-NEXT: kshiftlw $14, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0e]
; CHECK-NEXT: kshiftrw $14, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0e]
; CHECK-NEXT: kshiftlw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0c]
; CHECK-NEXT: kshiftrw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0c]
-; CHECK-NEXT: kshiftlw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.cmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 0, i8 %mask)
@@ -5785,35 +5609,11 @@ define <8 x i8> @test_ucmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
; CHECK-LABEL: test_ucmp_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k1 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xc9,0x01]
-; CHECK-NEXT: kshiftlw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
; CHECK-NEXT: vpcmpleuq %xmm1, %xmm0, %k2 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xd1,0x02]
-; CHECK-NEXT: kshiftlw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k3 ## encoding: [0x62,0xf3,0xfd,0x08,0x1f,0xd9,0x04]
-; CHECK-NEXT: kshiftlw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
; CHECK-NEXT: vpcmpnltuq %xmm1, %xmm0, %k4 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xe1,0x05]
-; CHECK-NEXT: kshiftlw $14, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x32,0xe4,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k4, %k4 ## encoding: [0xc4,0xe3,0xf9,0x30,0xe4,0x0c]
; CHECK-NEXT: vpcmpnleuq %xmm1, %xmm0, %k5 ## encoding: [0x62,0xf3,0xfd,0x08,0x1e,0xe9,0x06]
-; CHECK-NEXT: kshiftlw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0]
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc4,0xc0,0x00]
@@ -5852,57 +5652,33 @@ define <8 x i8> @test_ucmp_q_128(<2 x i64> %a0, <2 x i64> %a1) {
define <8 x i8> @test_mask_ucmp_q_128(<2 x i64> %a0, <2 x i64> %a1, i8 %mask) {
; CHECK-LABEL: test_mask_ucmp_q_128:
; CHECK: ## BB#0:
-; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
-; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x29,0xc1]
-; CHECK-NEXT: kshiftlw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc0,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k0, %k0 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc0,0x0c]
-; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xd1,0x01]
-; CHECK-NEXT: kshiftlw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x32,0xd2,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k2, %k2 ## encoding: [0xc4,0xe3,0xf9,0x30,0xd2,0x0c]
-; CHECK-NEXT: vpcmpleuq %xmm1, %xmm0, %k3 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xd9,0x02]
-; CHECK-NEXT: kshiftlw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x32,0xdb,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k3, %k3 ## encoding: [0xc4,0xe3,0xf9,0x30,0xdb,0x0c]
-; CHECK-NEXT: kxorw %k0, %k0, %k4 ## encoding: [0xc5,0xfc,0x47,0xe0]
-; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k5 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1f,0xe9,0x04]
-; CHECK-NEXT: kshiftlw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x32,0xed,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k5, %k5 ## encoding: [0xc4,0xe3,0xf9,0x30,0xed,0x0c]
-; CHECK-NEXT: vpcmpnltuq %xmm1, %xmm0, %k6 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xf1,0x05]
-; CHECK-NEXT: kshiftlw $14, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x32,0xf6,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k6, %k6 ## encoding: [0xc4,0xe3,0xf9,0x30,0xf6,0x0c]
-; CHECK-NEXT: vpcmpnleuq %xmm1, %xmm0, %k7 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x1e,0xf9,0x06]
+; CHECK-NEXT: kmovw %edi, %k7 ## encoding: [0xc5,0xf8,0x92,0xff]
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k7} ## encoding: [0x62,0xf2,0xfd,0x0f,0x29,0xc1]
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k6 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xf1,0x01]
+; CHECK-NEXT: vpcmpleuq %xmm1, %xmm0, %k1 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xc9,0x02]
+; CHECK-NEXT: kxorw %k0, %k0, %k2 ## encoding: [0xc5,0xfc,0x47,0xd0]
+; CHECK-NEXT: vpcmpneqq %xmm1, %xmm0, %k3 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1f,0xd9,0x04]
+; CHECK-NEXT: vpcmpnltuq %xmm1, %xmm0, %k4 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xe1,0x05]
+; CHECK-NEXT: vpcmpnleuq %xmm1, %xmm0, %k5 {%k7} ## encoding: [0x62,0xf3,0xfd,0x0f,0x1e,0xe9,0x06]
; CHECK-NEXT: kshiftlw $14, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0e]
; CHECK-NEXT: kshiftrw $14, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0e]
; CHECK-NEXT: kshiftlw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x32,0xff,0x0c]
; CHECK-NEXT: kshiftrw $12, %k7, %k7 ## encoding: [0xc4,0xe3,0xf9,0x30,0xff,0x0c]
-; CHECK-NEXT: kshiftlw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0e]
-; CHECK-NEXT: kshiftrw $14, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0e]
-; CHECK-NEXT: kshiftlw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x32,0xc9,0x0c]
-; CHECK-NEXT: kshiftrw $12, %k1, %k1 ## encoding: [0xc4,0xe3,0xf9,0x30,0xc9,0x0c]
-; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
+; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
; CHECK-NEXT: kmovw %k0, %ecx ## encoding: [0xc5,0xf8,0x93,0xc8]
; CHECK-NEXT: vmovd %ecx, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc1]
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x02]
-; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
+; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x04]
-; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
+; CHECK-NEXT: kmovw %k2, %eax ## encoding: [0xc5,0xf8,0x93,0xc2]
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x06]
-; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
+; CHECK-NEXT: kmovw %k3, %eax ## encoding: [0xc5,0xf8,0x93,0xc3]
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x08]
-; CHECK-NEXT: kmovw %k6, %eax ## encoding: [0xc5,0xf8,0x93,0xc6]
+; CHECK-NEXT: kmovw %k4, %eax ## encoding: [0xc5,0xf8,0x93,0xc4]
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0a]
-; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
+; CHECK-NEXT: kmovw %k5, %eax ## encoding: [0xc5,0xf8,0x93,0xc5]
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0c]
-; CHECK-NEXT: kmovw %k1, %eax ## encoding: [0xc5,0xf8,0x93,0xc1]
+; CHECK-NEXT: kmovw %k7, %eax ## encoding: [0xc5,0xf8,0x93,0xc7]
; CHECK-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x20,0xc0,0x0e]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.128(<2 x i64> %a0, <2 x i64> %a1, i32 0, i8 %mask)
diff --git a/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll b/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
new file mode 100644
index 000000000000..f297fc3db95f
--- /dev/null
+++ b/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
@@ -0,0 +1,13485 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s -check-prefix=NoVLX
+
+define zeroext i32 @test_vpcmpeqb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqb (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqb (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqb (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqb (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp eq <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp eq <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqb (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp eq <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp eq <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqb (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp eq <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpeqw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpeqw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqw_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqw_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp eq <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqw_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqw_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp eq <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp eq <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqw_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqw (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp eq <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp eq <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqw_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqw (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp eq <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqd_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqd (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqd_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i4 @test_vpcmpeqq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_vpcmpeqq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i4 @test_vpcmpeqq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpeqq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp eq <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpeqq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpeqq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp eq <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtb (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtb (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtb (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtb (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sgt <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp sgt <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtb (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp sgt <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtb %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp sgt <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtb (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp sgt <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgtw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtw_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtw_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sgt <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtw_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtw_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sgt <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp sgt <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtw_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtw (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp sgt <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp sgt <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtw (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp sgt <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtd_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtd (%rsi){1to16}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i4 @test_vpcmpsgtq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_vpcmpsgtq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i4 @test_vpcmpsgtq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sgt <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgtq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sgt <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgeb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleb %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgeb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltb (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgeb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleb %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgeb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltb (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleb %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgeb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltb (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleb %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgeb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltb (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp sge <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleb %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp sge <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgeb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltb (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp sge <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleb %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp sge <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltb (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp sge <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgew_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgew_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgew_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgew_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgew_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgew_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgew_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgew_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgew_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgew_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgew_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgew_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp sge <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgew_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgew_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgew_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgew_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgew_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgew_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgew_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgew_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp sge <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgew_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmplew %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp sge <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgew_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgew_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltw (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp sge <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmplew %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp sge <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgew_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltw (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp sge <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %xmm1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %xmm1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %xmm1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %xmm1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %ymm1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %ymm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %ymm1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %ymm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %ymm1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %ymm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %zmm1
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %zmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltd (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsged_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rdi), %zmm1
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsged_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastd (%rsi), %zmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i4 @test_vpcmpsgeq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_vpcmpsgeq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i4 @test_vpcmpsgeq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %xmm1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %xmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp sge <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %ymm1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %ymm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %ymm1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %ymm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %ymm1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %ymm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %ymm1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %ymm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %zmm1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %zmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %zmm1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %zmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpsgeq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rdi), %zmm1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpbroadcastq (%rsi), %zmm1
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp sge <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpultb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltub (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultb_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultb_v16i1_v32i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultb_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltub (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltub (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultb_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltub %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %1 = bitcast <2 x i64> %__b to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultb_v16i1_v64i1_mask_mem(i16 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultb_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltub (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <16 x i8>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <16 x i8>
+ %2 = icmp ult <16 x i8> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltub %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp ult <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltub (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp ult <32 x i8> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultb_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltub %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %1 = bitcast <4 x i64> %__b to <32 x i8>
+ %2 = icmp ult <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask_mem(i32 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultb_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltub (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <32 x i8>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <32 x i8>
+ %2 = icmp ult <32 x i8> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpultw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpultw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpultw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpultw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultw_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %1 = bitcast <2 x i64> %__b to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultw_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <8 x i16>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <8 x i16>
+ %2 = icmp ult <8 x i16> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpultw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultw_v16i1_v32i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %1 = bitcast <4 x i64> %__b to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultw_v16i1_v64i1_mask_mem(i16 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <16 x i16>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <16 x i16>
+ %2 = icmp ult <16 x i16> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp ult <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultw_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuw (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp ult <32 x i16> %0, %1
+ %3 = shufflevector <32 x i1> %2, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v32i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %1 = bitcast <8 x i64> %__b to <32 x i16>
+ %2 = icmp ult <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask_mem(i32 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultw_v32i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuw (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <32 x i16>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <32 x i16>
+ %2 = icmp ult <32 x i16> %0, %1
+ %3 = bitcast i32 %__u to <32 x i1>
+ %4 = and <32 x i1> %2, %3
+ %5 = shufflevector <32 x i1> %4, <32 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %1 = bitcast <2 x i64> %__b to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x i32>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <4 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
+; NoVLX-NEXT: kmovw %edi, %k1
+; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %1 = bitcast <4 x i64> %__b to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x i32>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <8 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i32> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to16}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %1 = bitcast <8 x i64> %__b to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask_mem(i16 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x i32>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %2, %3
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultd_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltud (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u, <8 x i64> %__a, i32* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultd_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltud (%rsi){1to16}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x i32>
+ %load = load i32, i32* %__b
+ %vec = insertelement <16 x i32> undef, i32 %load, i32 0
+ %1 = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <16 x i32> %0, %1
+ %3 = bitcast i16 %__u to <16 x i1>
+ %4 = and <16 x i1> %3, %2
+ %5 = shufflevector <16 x i1> %4, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i4 @test_vpcmpultq_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_vpcmpultq_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i4 @test_vpcmpultq_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_masked_vpcmpultq_v2i1_v4i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %6 = bitcast <4 x i1> %5 to i4
+ ret i4 %6
+}
+
+
+define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %1 = bitcast <2 x i64> %__b to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x i64>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %2, %extract.i
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, <2 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <2 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> <i32 0, i32 0>
+ %2 = icmp ult <2 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %4 = and <2 x i1> %extract.i, %2
+ %5 = shufflevector <2 x i1> %4, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <8 x i1> %5 to i8
+ ret i8 %6
+}
+
+
+define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %1 = bitcast <4 x i64> %__b to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x i64>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %2, %extract.i
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, <4 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <4 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <4 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = and <4 x i1> %extract.i, %2
+ %5 = shufflevector <4 x i1> %4, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <16 x i1> %5 to i16
+ ret i16 %6
+}
+
+
+define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <32 x i1> %5 to i32
+ ret i32 %6
+}
+
+
+define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %1 = bitcast <8 x i64> %__b to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x i64>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %2, %3
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vpcmpultq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, <8 x i64> %__a, i64* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_masked_vpcmpultq_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: kmovd %edi, %k1
+; CHECK-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x i64>
+ %load = load i64, i64* %__b
+ %vec = insertelement <8 x i64> undef, i64 %load, i32 0
+ %1 = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = icmp ult <8 x i64> %0, %1
+ %3 = bitcast i8 %__u to <8 x i1>
+ %4 = and <8 x i1> %3, %2
+ %5 = shufflevector <8 x i1> %4, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %6 = bitcast <64 x i1> %5 to i64
+ ret i64 %6
+}
+
+
+declare i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> , <16 x float> , i32, i16, i32)
+define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %1 = bitcast <2 x i64> %__b to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <4 x float> undef, float %load, i32 0
+ %1 = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+
+define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %1 = bitcast <2 x i64> %__b to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <4 x float> undef, float %load, i32 0
+ %1 = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+
+define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %1 = bitcast <2 x i64> %__b to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <4 x float> undef, float %load, i32 0
+ %1 = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+
+define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %1 = bitcast <2 x i64> %__b to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <4 x float>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <4 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <4 x float> undef, float %load, i32 0
+ %1 = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x float> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+
+define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %1 = bitcast <4 x i64> %__b to <8 x float>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vmovaps (%rdi), %ymm1
+; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x float>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+;
+; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem_b:
+; NoVLX: ## BB#0: ## %entry
+; NoVLX-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
+; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; NoVLX-NEXT: kshiftlw $8, %k0, %k0
+; NoVLX-NEXT: kshiftrw $8, %k0, %k0
+; NoVLX-NEXT: kmovw %k0, %eax
+; NoVLX-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <8 x float> undef, float %load, i32 0
+ %1 = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+
+define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %1 = bitcast <4 x i64> %__b to <8 x float>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x float>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <8 x float> undef, float %load, i32 0
+ %1 = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+
+define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %1 = bitcast <4 x i64> %__b to <8 x float>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <8 x float>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <8 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <8 x float> undef, float %load, i32 0
+ %1 = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <8 x float> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+
+define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %1 = bitcast <8 x i64> %__b to <16 x float>
+ %2 = fcmp oeq <16 x float> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x float>
+ %2 = fcmp oeq <16 x float> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <16 x float> undef, float %load, i32 0
+ %1 = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <16 x float> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+
+define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v32i1_sae_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %1 = bitcast <8 x i64> %__b to <16 x float>
+ %2 = call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %0, <16 x float> %1, i32 2, i16 -1, i32 8)
+ %3 = zext i16 %2 to i32
+ ret i32 %3
+}
+
+
+define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %1 = bitcast <8 x i64> %__b to <16 x float>
+ %2 = fcmp oeq <16 x float> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <16 x float>
+ %2 = fcmp oeq <16 x float> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, float* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqps (%rdi){1to16}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %load = load float, float* %__b
+ %vec = insertelement <16 x float> undef, float %load, i32 0
+ %1 = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <16 x float> %0, %1
+ %3 = shufflevector <16 x i1> %2, <16 x i1> zeroinitializer, <64 x i32> <i32 0,i32 1,i32 2,i32 3,i32 4,i32 5,i32 6,i32 7,i32 8,i32 9,i32 10,i32 11,i32 12,i32 13,i32 14,i32 15,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31,i32 16,i32 17,i32 18,i32 19,i32 20,i32 21,i32 22,i32 23,i32 24,i32 25,i32 26,i32 27,i32 28,i32 29,i32 30,i32 31>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+
+define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqps_v16i1_v64i1_sae_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: movzwl %ax, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <16 x float>
+ %1 = bitcast <8 x i64> %__b to <16 x float>
+ %2 = call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %0, <16 x float> %1, i32 2, i16 -1, i32 8)
+ %3 = zext i16 %2 to i64
+ ret i64 %3
+}
+
+
+declare i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> , <8 x double> , i32, i8, i32)
+define zeroext i4 @test_vcmpoeqpd_v2i1_v4i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %1 = bitcast <2 x i64> %__b to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_vcmpoeqpd_v2i1_v4i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+define zeroext i4 @test_vcmpoeqpd_v2i1_v4i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v4i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <2 x double> undef, double %load, i32 0
+ %1 = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> <i32 0, i32 0>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %4 = bitcast <4 x i1> %3 to i4
+ ret i4 %4
+}
+
+
+define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %1 = bitcast <2 x i64> %__b to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <2 x double> undef, double %load, i32 0
+ %1 = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> <i32 0, i32 0>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+
+define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %1 = bitcast <2 x i64> %__b to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <2 x double> undef, double %load, i32 0
+ %1 = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> <i32 0, i32 0>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+
+define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %1 = bitcast <2 x i64> %__b to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <2 x double> undef, double %load, i32 0
+ %1 = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> <i32 0, i32 0>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+
+define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %1 = bitcast <2 x i64> %__b to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load <2 x i64>, <2 x i64>* %__b
+ %1 = bitcast <2 x i64> %load to <2 x double>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v2i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <2 x i64> %__a to <2 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <2 x double> undef, double %load, i32 0
+ %1 = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> <i32 0, i32 0>
+ %2 = fcmp oeq <2 x double> %0, %1
+ %3 = shufflevector <2 x i1> %2, <2 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+
+define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %1 = bitcast <4 x i64> %__b to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v8i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <4 x double> undef, double %load, i32 0
+ %1 = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <8 x i1> %3 to i8
+ ret i8 %4
+}
+
+
+define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %1 = bitcast <4 x i64> %__b to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <4 x double> undef, double %load, i32 0
+ %1 = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+
+define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %1 = bitcast <4 x i64> %__b to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <4 x double> undef, double %load, i32 0
+ %1 = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+
+define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %1 = bitcast <4 x i64> %__b to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load <4 x i64>, <4 x i64>* %__b
+ %1 = bitcast <4 x i64> %load to <4 x double>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v4i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <4 x i64> %__a to <4 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <4 x double> undef, double %load, i32 0
+ %1 = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <4 x double> %0, %1
+ %3 = shufflevector <4 x i1> %2, <4 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+
+define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %1 = bitcast <8 x i64> %__b to <8 x double>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x double>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v16i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <8 x double> undef, double %load, i32 0
+ %1 = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <16 x i1> %3 to i16
+ ret i16 %4
+}
+
+
+define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v16i1_sae_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %1 = bitcast <8 x i64> %__b to <8 x double>
+ %2 = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %0, <8 x double> %1, i32 2, i8 -1, i32 8)
+ %3 = zext i8 %2 to i16
+ ret i16 %3
+}
+
+
+define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %1 = bitcast <8 x i64> %__b to <8 x double>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x double>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v32i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <8 x double> undef, double %load, i32 0
+ %1 = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <32 x i1> %3 to i32
+ ret i32 %4
+}
+
+
+define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v32i1_sae_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovb %k0, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %1 = bitcast <8 x i64> %__b to <8 x double>
+ %2 = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %0, <8 x double> %1, i32 2, i8 -1, i32 8)
+ %3 = zext i8 %2 to i32
+ ret i32 %3
+}
+
+
+define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %1 = bitcast <8 x i64> %__b to <8 x double>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask_mem:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %load = load <8 x i64>, <8 x i64>* %__b
+ %1 = bitcast <8 x i64> %load to <8 x double>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, double* %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v64i1_mask_mem_b:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, %rax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %load = load double, double* %__b
+ %vec = insertelement <8 x double> undef, double %load, i32 0
+ %1 = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
+ %2 = fcmp oeq <8 x double> %0, %1
+ %3 = shufflevector <8 x i1> %2, <8 x i1> zeroinitializer, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = bitcast <64 x i1> %3 to i64
+ ret i64 %4
+}
+
+
+define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_sae_mask(<8 x i64> %__a, <8 x i64> %__b) local_unnamed_addr {
+; CHECK-LABEL: test_vcmpoeqpd_v8i1_v64i1_sae_mask:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, %eax
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
+entry:
+ %0 = bitcast <8 x i64> %__a to <8 x double>
+ %1 = bitcast <8 x i64> %__b to <8 x double>
+ %2 = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %0, <8 x double> %1, i32 2, i8 -1, i32 8)
+ %3 = zext i8 %2 to i64
+ ret i64 %3
+}
+
+
+
diff --git a/test/CodeGen/X86/bswap-vector.ll b/test/CodeGen/X86/bswap-vector.ll
index fd5983df8325..7463f5f6d086 100644
--- a/test/CodeGen/X86/bswap-vector.ll
+++ b/test/CodeGen/X86/bswap-vector.ll
@@ -1,11 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mcpu=x86-64 | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-SSE --check-prefix=CHECK-NOSSSE3
-; RUN: llc < %s -mcpu=core2 | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-SSE --check-prefix=CHECK-SSSE3
-; RUN: llc < %s -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-AVX --check-prefix=CHECK-AVX2
-; RUN: llc < %s -mcpu=core-avx2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-WIDE-AVX --check-prefix=CHECK-WIDE-AVX2
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-SSE --check-prefix=CHECK-NOSSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+ssse3 | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-SSE --check-prefix=CHECK-SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-AVX --check-prefix=CHECK-AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-ALL --check-prefix=CHECK-WIDE-AVX --check-prefix=CHECK-WIDE-AVX2
declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>)
declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
diff --git a/test/CodeGen/X86/bswap-wide-int.ll b/test/CodeGen/X86/bswap-wide-int.ll
new file mode 100644
index 000000000000..db48eb80de4b
--- /dev/null
+++ b/test/CodeGen/X86/bswap-wide-int.ll
@@ -0,0 +1,173 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+movbe | FileCheck %s --check-prefix=X86-MOVBE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+movbe | FileCheck %s --check-prefix=X64-MOVBE
+
+declare i64 @llvm.bswap.i64(i64)
+declare i128 @llvm.bswap.i128(i128)
+declare i256 @llvm.bswap.i256(i256)
+
+define i64 @bswap_i64(i64 %a0) nounwind {
+; X86-LABEL: bswap_i64:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: bswapl %eax
+; X86-NEXT: bswapl %edx
+; X86-NEXT: retl
+;
+; X86-MOVBE-LABEL: bswap_i64:
+; X86-MOVBE: # BB#0:
+; X86-MOVBE-NEXT: movbel {{[0-9]+}}(%esp), %eax
+; X86-MOVBE-NEXT: movbel {{[0-9]+}}(%esp), %edx
+; X86-MOVBE-NEXT: retl
+;
+; X64-LABEL: bswap_i64:
+; X64: # BB#0:
+; X64-NEXT: bswapq %rdi
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: retq
+;
+; X64-MOVBE-LABEL: bswap_i64:
+; X64-MOVBE: # BB#0:
+; X64-MOVBE-NEXT: bswapq %rdi
+; X64-MOVBE-NEXT: movq %rdi, %rax
+; X64-MOVBE-NEXT: retq
+ %1 = call i64 @llvm.bswap.i64(i64 %a0)
+ ret i64 %1
+}
+
+define i128 @bswap_i128(i128 %a0) nounwind {
+; X86-LABEL: bswap_i128:
+; X86: # BB#0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: bswapl %edi
+; X86-NEXT: bswapl %esi
+; X86-NEXT: bswapl %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 12(%eax)
+; X86-NEXT: movl %edx, 8(%eax)
+; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: movl %edi, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
+;
+; X86-MOVBE-LABEL: bswap_i128:
+; X86-MOVBE: # BB#0:
+; X86-MOVBE-NEXT: pushl %edi
+; X86-MOVBE-NEXT: pushl %esi
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-MOVBE-NEXT: movbel %esi, 12(%eax)
+; X86-MOVBE-NEXT: movbel %edi, 8(%eax)
+; X86-MOVBE-NEXT: movbel %ecx, 4(%eax)
+; X86-MOVBE-NEXT: movbel %edx, (%eax)
+; X86-MOVBE-NEXT: popl %esi
+; X86-MOVBE-NEXT: popl %edi
+; X86-MOVBE-NEXT: retl $4
+;
+; X64-LABEL: bswap_i128:
+; X64: # BB#0:
+; X64-NEXT: bswapq %rsi
+; X64-NEXT: bswapq %rdi
+; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: movq %rdi, %rdx
+; X64-NEXT: retq
+;
+; X64-MOVBE-LABEL: bswap_i128:
+; X64-MOVBE: # BB#0:
+; X64-MOVBE-NEXT: bswapq %rsi
+; X64-MOVBE-NEXT: bswapq %rdi
+; X64-MOVBE-NEXT: movq %rsi, %rax
+; X64-MOVBE-NEXT: movq %rdi, %rdx
+; X64-MOVBE-NEXT: retq
+ %1 = call i128 @llvm.bswap.i128(i128 %a0)
+ ret i128 %1
+}
+
+define i256 @bswap_i256(i256 %a0) nounwind {
+; X86-LABEL: bswap_i256:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 28(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 24(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 20(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 16(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 12(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 8(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, 4(%eax)
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: retl $4
+;
+; X86-MOVBE-LABEL: bswap_i256:
+; X86-MOVBE: # BB#0:
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, 28(%eax)
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, 24(%eax)
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, 20(%eax)
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, 16(%eax)
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, 12(%eax)
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, 8(%eax)
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, 4(%eax)
+; X86-MOVBE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-MOVBE-NEXT: movbel %ecx, (%eax)
+; X86-MOVBE-NEXT: retl $4
+;
+; X64-LABEL: bswap_i256:
+; X64: # BB#0:
+; X64-NEXT: bswapq %r8
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: bswapq %rsi
+; X64-NEXT: movq %rsi, 24(%rdi)
+; X64-NEXT: movq %rdx, 16(%rdi)
+; X64-NEXT: movq %rcx, 8(%rdi)
+; X64-NEXT: movq %r8, (%rdi)
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: retq
+;
+; X64-MOVBE-LABEL: bswap_i256:
+; X64-MOVBE: # BB#0:
+; X64-MOVBE-NEXT: movbeq %rsi, 24(%rdi)
+; X64-MOVBE-NEXT: movbeq %rdx, 16(%rdi)
+; X64-MOVBE-NEXT: movbeq %rcx, 8(%rdi)
+; X64-MOVBE-NEXT: movbeq %r8, (%rdi)
+; X64-MOVBE-NEXT: movq %rdi, %rax
+; X64-MOVBE-NEXT: retq
+ %1 = call i256 @llvm.bswap.i256(i256 %a0)
+ ret i256 %1
+}
diff --git a/test/CodeGen/X86/compress_expand.ll b/test/CodeGen/X86/compress_expand.ll
index e09fcf2a336e..f62e18869a98 100644
--- a/test/CodeGen/X86/compress_expand.ll
+++ b/test/CodeGen/X86/compress_expand.ll
@@ -265,9 +265,7 @@ define <2 x float> @test13(float* %base, <2 x float> %src0, <2 x i32> %trigger)
; SKX: # BB#0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; SKX-NEXT: vpcmpeqq %xmm2, %xmm1, %k0
-; SKX-NEXT: kshiftlb $6, %k0, %k0
-; SKX-NEXT: kshiftrb $6, %k0, %k1
+; SKX-NEXT: vpcmpeqq %xmm2, %xmm1, %k1
; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1}
; SKX-NEXT: retq
;
@@ -295,9 +293,7 @@ define void @test14(float* %base, <2 x float> %V, <2 x i32> %trigger) {
; SKX: # BB#0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; SKX-NEXT: vpcmpeqq %xmm2, %xmm1, %k0
-; SKX-NEXT: kshiftlb $6, %k0, %k0
-; SKX-NEXT: kshiftrb $6, %k0, %k1
+; SKX-NEXT: vpcmpeqq %xmm2, %xmm1, %k1
; SKX-NEXT: vcompressps %xmm0, (%rdi) {%k1}
; SKX-NEXT: retq
;
diff --git a/test/CodeGen/X86/cpus.ll b/test/CodeGen/X86/cpus.ll
index 20ce932a184b..7901858cb5dc 100644
--- a/test/CodeGen/X86/cpus.ll
+++ b/test/CodeGen/X86/cpus.ll
@@ -18,6 +18,7 @@
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=broadwell 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=bonnell 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=silvermont 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=goldmont 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=k8 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=opteron 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=athlon64 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
@@ -34,3 +35,4 @@
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=btver1 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=btver2 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
; RUN: llc < %s -o /dev/null -mtriple=x86_64-unknown-unknown -mcpu=znver1 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ERROR --allow-empty
+
diff --git a/test/CodeGen/X86/fp128-cast.ll b/test/CodeGen/X86/fp128-cast.ll
index 6568f73029e0..6543292c08b4 100644
--- a/test/CodeGen/X86/fp128-cast.ll
+++ b/test/CodeGen/X86/fp128-cast.ll
@@ -61,10 +61,10 @@ entry:
; X32: retl
;
; X64-LABEL: TestFPToSIF128_I32:
-; X64: movaps vf128(%rip), %xmm0
-; X64-NEXT: callq __fixtfsi
-; X64-NEXT: movl %eax, vi32(%rip)
-; X64: retq
+; X64: movaps vf128(%rip), %xmm0
+; X64-NEXT: callq __fixtfsi
+; X64-NEXT: movl %eax, vi32(%rip)
+; X64: retq
}
define void @TestFPToUIF128_U32() {
@@ -78,10 +78,10 @@ entry:
; X32: retl
;
; X64-LABEL: TestFPToUIF128_U32:
-; X64: movaps vf128(%rip), %xmm0
-; X64-NEXT: callq __fixunstfsi
-; X64-NEXT: movl %eax, vu32(%rip)
-; X64: retq
+; X64: movaps vf128(%rip), %xmm0
+; X64-NEXT: callq __fixunstfsi
+; X64-NEXT: movl %eax, vu32(%rip)
+; X64: retq
}
define void @TestFPToSIF128_I64() {
diff --git a/test/CodeGen/X86/insertelement-zero.ll b/test/CodeGen/X86/insertelement-zero.ll
index e30772b528bc..ea7418f4707e 100644
--- a/test/CodeGen/X86/insertelement-zero.ll
+++ b/test/CodeGen/X86/insertelement-zero.ll
@@ -1,13 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-unknown"
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
define <2 x double> @insert_v2f64_z1(<2 x double> %a) {
; SSE2-LABEL: insert_v2f64_z1:
diff --git a/test/CodeGen/X86/lower-vec-shift.ll b/test/CodeGen/X86/lower-vec-shift.ll
index 783cda0a8dd7..8d64baf5f2a4 100644
--- a/test/CodeGen/X86/lower-vec-shift.ll
+++ b/test/CodeGen/X86/lower-vec-shift.ll
@@ -1,8 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
-
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+ssse3 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
; Verify that the following shifts are lowered into a sequence of two shifts plus
; a blend. On pre-avx2 targets, instead of scalarizing logical and arithmetic
diff --git a/test/CodeGen/X86/lower-vec-shuffle-bug.ll b/test/CodeGen/X86/lower-vec-shuffle-bug.ll
index 5918e8045f62..7a081b556867 100644
--- a/test/CodeGen/X86/lower-vec-shuffle-bug.ll
+++ b/test/CodeGen/X86/lower-vec-shuffle-bug.ll
@@ -1,8 +1,9 @@
-; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
define <4 x double> @test1(<4 x double> %A, <4 x double> %B) {
; CHECK-LABEL: test1:
-; CHECK: # BB#0:
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -12,7 +13,7 @@ entry:
define <4 x double> @test2(<4 x double> %A, <4 x double> %B) {
; CHECK-LABEL: test2:
-; CHECK: # BB#0:
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -22,7 +23,7 @@ entry:
define <4 x double> @test3(<4 x double> %A, <4 x double> %B) {
; CHECK-LABEL: test3:
-; CHECK: # BB#0:
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
@@ -32,7 +33,7 @@ entry:
define <4 x double> @test4(<4 x double> %A, <4 x double> %B) {
; CHECK-LABEL: test4:
-; CHECK: # BB#0:
+; CHECK: # BB#0: # %entry
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/masked_memop.ll b/test/CodeGen/X86/masked_memop.ll
index 3c616e8a9f43..7a2e41e10a37 100644
--- a/test/CodeGen/X86/masked_memop.ll
+++ b/test/CodeGen/X86/masked_memop.ll
@@ -462,9 +462,7 @@ define void @test14(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %val) {
; SKX: ## BB#0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
-; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0
-; SKX-NEXT: kshiftlw $14, %k0, %k0
-; SKX-NEXT: kshiftrw $14, %k0, %k1
+; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1
; SKX-NEXT: vmovups %xmm1, (%rdi) {%k1}
; SKX-NEXT: retq
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
@@ -550,9 +548,7 @@ define <2 x float> @test16(<2 x i32> %trigger, <2 x float>* %addr, <2 x float> %
; SKX: ## BB#0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
-; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0
-; SKX-NEXT: kshiftlw $14, %k0, %k0
-; SKX-NEXT: kshiftrw $14, %k0, %k1
+; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1
; SKX-NEXT: vblendmps (%rdi), %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
@@ -601,9 +597,7 @@ define <2 x i32> @test17(<2 x i32> %trigger, <2 x i32>* %addr, <2 x i32> %dst) {
; SKX: ## BB#0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
-; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k0
-; SKX-NEXT: kshiftlw $14, %k0, %k0
-; SKX-NEXT: kshiftrw $14, %k0, %k1
+; SKX-NEXT: vpcmpeqq %xmm2, %xmm0, %k1
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
; SKX-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1}
; SKX-NEXT: vpmovsxdq %xmm0, %xmm0
@@ -645,9 +639,7 @@ define <2 x float> @test18(<2 x i32> %trigger, <2 x float>* %addr) {
; SKX: ## BB#0:
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
-; SKX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
-; SKX-NEXT: kshiftlw $14, %k0, %k0
-; SKX-NEXT: kshiftrw $14, %k0, %k1
+; SKX-NEXT: vpcmpeqq %xmm1, %xmm0, %k1
; SKX-NEXT: vmovups (%rdi), %xmm0 {%k1} {z}
; SKX-NEXT: retq
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
diff --git a/test/CodeGen/X86/memcmp.ll b/test/CodeGen/X86/memcmp.ll
index 9d26aee2e8b8..0e09abf73c8c 100644
--- a/test/CodeGen/X86/memcmp.ll
+++ b/test/CodeGen/X86/memcmp.ll
@@ -1,7 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=AVX2 | FileCheck %s --check-prefix=X64 --check-prefix=AVX2
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86 --check-prefix=X86-NOSSE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=AVX2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2
; This tests codegen time inlining/optimization of memcmp
; rdar://6480398
@@ -11,60 +12,70 @@
declare i32 @memcmp(i8*, i8*, i64)
define i32 @length2(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length2:
-; X32: # BB#0: # %loadbb
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movzwl (%ecx), %ecx
-; X32-NEXT: movzwl (%eax), %eax
-; X32-NEXT: rolw $8, %cx
-; X32-NEXT: rolw $8, %ax
-; X32-NEXT: movzwl %cx, %ecx
-; X32-NEXT: movzwl %ax, %eax
-; X32-NEXT: cmpl %eax, %ecx
-; X32-NEXT: je .LBB0_1
-; X32-NEXT: # BB#2: # %res_block
-; X32-NEXT: movl $-1, %eax
-; X32-NEXT: jb .LBB0_4
-; X32-NEXT: # BB#3: # %res_block
-; X32-NEXT: movl $1, %eax
-; X32-NEXT: .LBB0_4: # %endblock
-; X32-NEXT: retl
-; X32-NEXT: .LBB0_1:
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: retl
+; X86-NOSSE-LABEL: length2:
+; X86-NOSSE: # BB#0:
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movzwl (%ecx), %ecx
+; X86-NOSSE-NEXT: movzwl (%eax), %eax
+; X86-NOSSE-NEXT: rolw $8, %cx
+; X86-NOSSE-NEXT: rolw $8, %ax
+; X86-NOSSE-NEXT: cmpw %ax, %cx
+; X86-NOSSE-NEXT: movl $-1, %eax
+; X86-NOSSE-NEXT: jae .LBB0_1
+; X86-NOSSE-NEXT: # BB#2:
+; X86-NOSSE-NEXT: je .LBB0_3
+; X86-NOSSE-NEXT: .LBB0_4:
+; X86-NOSSE-NEXT: retl
+; X86-NOSSE-NEXT: .LBB0_1:
+; X86-NOSSE-NEXT: movl $1, %eax
+; X86-NOSSE-NEXT: jne .LBB0_4
+; X86-NOSSE-NEXT: .LBB0_3:
+; X86-NOSSE-NEXT: xorl %eax, %eax
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length2:
+; X86-SSE2: # BB#0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movzwl (%ecx), %ecx
+; X86-SSE2-NEXT: movzwl (%eax), %eax
+; X86-SSE2-NEXT: rolw $8, %cx
+; X86-SSE2-NEXT: rolw $8, %ax
+; X86-SSE2-NEXT: xorl %edx, %edx
+; X86-SSE2-NEXT: cmpw %ax, %cx
+; X86-SSE2-NEXT: movl $-1, %ecx
+; X86-SSE2-NEXT: movl $1, %eax
+; X86-SSE2-NEXT: cmovbl %ecx, %eax
+; X86-SSE2-NEXT: cmovel %edx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-LABEL: length2:
-; X64: # BB#0: # %loadbb
+; X64: # BB#0:
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: movzwl (%rsi), %ecx
; X64-NEXT: rolw $8, %ax
; X64-NEXT: rolw $8, %cx
-; X64-NEXT: movzwl %ax, %eax
-; X64-NEXT: movzwl %cx, %ecx
-; X64-NEXT: cmpq %rcx, %rax
-; X64-NEXT: je .LBB0_1
-; X64-NEXT: # BB#2: # %res_block
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: cmpw %cx, %ax
; X64-NEXT: movl $-1, %ecx
; X64-NEXT: movl $1, %eax
; X64-NEXT: cmovbl %ecx, %eax
-; X64-NEXT: retq
-; X64-NEXT: .LBB0_1:
-; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmovel %edx, %eax
; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind
ret i32 %m
}
define i1 @length2_eq(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length2_eq:
-; X32: # BB#0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movzwl (%ecx), %ecx
-; X32-NEXT: cmpw (%eax), %cx
-; X32-NEXT: sete %al
-; X32-NEXT: retl
+; X86-LABEL: length2_eq:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %ecx
+; X86-NEXT: cmpw (%eax), %cx
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
; X64-LABEL: length2_eq:
; X64: # BB#0:
@@ -78,13 +89,13 @@ define i1 @length2_eq(i8* %X, i8* %Y) nounwind {
}
define i1 @length2_eq_const(i8* %X) nounwind {
-; X32-LABEL: length2_eq_const:
-; X32: # BB#0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movzwl (%eax), %eax
-; X32-NEXT: cmpl $12849, %eax # imm = 0x3231
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length2_eq_const:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl (%eax), %eax
+; X86-NEXT: cmpl $12849, %eax # imm = 0x3231
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: length2_eq_const:
; X64: # BB#0:
@@ -98,17 +109,17 @@ define i1 @length2_eq_const(i8* %X) nounwind {
}
define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length2_eq_nobuiltin_attr:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $2
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: sete %al
-; X32-NEXT: retl
+; X86-LABEL: length2_eq_nobuiltin_attr:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $2
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
; X64-LABEL: length2_eq_nobuiltin_attr:
; X64: # BB#0:
@@ -125,15 +136,15 @@ define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind {
}
define i32 @length3(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length3:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $3
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: retl
+; X86-LABEL: length3:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $3
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
;
; X64-LABEL: length3:
; X64: # BB#0:
@@ -144,17 +155,17 @@ define i32 @length3(i8* %X, i8* %Y) nounwind {
}
define i1 @length3_eq(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length3_eq:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $3
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length3_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $3
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: length3_eq:
; X64: # BB#0:
@@ -171,56 +182,70 @@ define i1 @length3_eq(i8* %X, i8* %Y) nounwind {
}
define i32 @length4(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length4:
-; X32: # BB#0: # %loadbb
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl (%ecx), %ecx
-; X32-NEXT: movl (%eax), %eax
-; X32-NEXT: bswapl %ecx
-; X32-NEXT: bswapl %eax
-; X32-NEXT: cmpl %eax, %ecx
-; X32-NEXT: je .LBB6_1
-; X32-NEXT: # BB#2: # %res_block
-; X32-NEXT: movl $-1, %eax
-; X32-NEXT: jb .LBB6_4
-; X32-NEXT: # BB#3: # %res_block
-; X32-NEXT: movl $1, %eax
-; X32-NEXT: .LBB6_4: # %endblock
-; X32-NEXT: retl
-; X32-NEXT: .LBB6_1:
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: retl
+; X86-NOSSE-LABEL: length4:
+; X86-NOSSE: # BB#0:
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl (%ecx), %ecx
+; X86-NOSSE-NEXT: movl (%eax), %eax
+; X86-NOSSE-NEXT: bswapl %ecx
+; X86-NOSSE-NEXT: bswapl %eax
+; X86-NOSSE-NEXT: cmpl %eax, %ecx
+; X86-NOSSE-NEXT: movl $-1, %eax
+; X86-NOSSE-NEXT: jae .LBB6_1
+; X86-NOSSE-NEXT: # BB#2:
+; X86-NOSSE-NEXT: je .LBB6_3
+; X86-NOSSE-NEXT: .LBB6_4:
+; X86-NOSSE-NEXT: retl
+; X86-NOSSE-NEXT: .LBB6_1:
+; X86-NOSSE-NEXT: movl $1, %eax
+; X86-NOSSE-NEXT: jne .LBB6_4
+; X86-NOSSE-NEXT: .LBB6_3:
+; X86-NOSSE-NEXT: xorl %eax, %eax
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length4:
+; X86-SSE2: # BB#0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl (%ecx), %ecx
+; X86-SSE2-NEXT: movl (%eax), %eax
+; X86-SSE2-NEXT: bswapl %ecx
+; X86-SSE2-NEXT: bswapl %eax
+; X86-SSE2-NEXT: xorl %edx, %edx
+; X86-SSE2-NEXT: cmpl %eax, %ecx
+; X86-SSE2-NEXT: movl $-1, %ecx
+; X86-SSE2-NEXT: movl $1, %eax
+; X86-SSE2-NEXT: cmovbl %ecx, %eax
+; X86-SSE2-NEXT: cmovel %edx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-LABEL: length4:
-; X64: # BB#0: # %loadbb
+; X64: # BB#0:
; X64-NEXT: movl (%rdi), %eax
; X64-NEXT: movl (%rsi), %ecx
; X64-NEXT: bswapl %eax
; X64-NEXT: bswapl %ecx
-; X64-NEXT: cmpq %rcx, %rax
-; X64-NEXT: je .LBB6_1
-; X64-NEXT: # BB#2: # %res_block
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: cmpl %ecx, %eax
; X64-NEXT: movl $-1, %ecx
; X64-NEXT: movl $1, %eax
; X64-NEXT: cmovbl %ecx, %eax
-; X64-NEXT: retq
-; X64-NEXT: .LBB6_1:
-; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmovel %edx, %eax
; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind
ret i32 %m
}
define i1 @length4_eq(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length4_eq:
-; X32: # BB#0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl (%ecx), %ecx
-; X32-NEXT: cmpl (%eax), %ecx
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length4_eq:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %ecx
+; X86-NEXT: cmpl (%eax), %ecx
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: length4_eq:
; X64: # BB#0:
@@ -234,12 +259,12 @@ define i1 @length4_eq(i8* %X, i8* %Y) nounwind {
}
define i1 @length4_eq_const(i8* %X) nounwind {
-; X32-LABEL: length4_eq_const:
-; X32: # BB#0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: cmpl $875770417, (%eax) # imm = 0x34333231
-; X32-NEXT: sete %al
-; X32-NEXT: retl
+; X86-LABEL: length4_eq_const:
+; X86: # BB#0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $875770417, (%eax) # imm = 0x34333231
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
; X64-LABEL: length4_eq_const:
; X64: # BB#0:
@@ -252,15 +277,15 @@ define i1 @length4_eq_const(i8* %X) nounwind {
}
define i32 @length5(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length5:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $5
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: retl
+; X86-LABEL: length5:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $5
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
;
; X64-LABEL: length5:
; X64: # BB#0:
@@ -271,17 +296,17 @@ define i32 @length5(i8* %X, i8* %Y) nounwind {
}
define i1 @length5_eq(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length5_eq:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $5
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length5_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $5
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: length5_eq:
; X64: # BB#0:
@@ -298,48 +323,45 @@ define i1 @length5_eq(i8* %X, i8* %Y) nounwind {
}
define i32 @length8(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length8:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $8
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: retl
+; X86-LABEL: length8:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $8
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
;
; X64-LABEL: length8:
-; X64: # BB#0: # %loadbb
+; X64: # BB#0:
; X64-NEXT: movq (%rdi), %rax
; X64-NEXT: movq (%rsi), %rcx
; X64-NEXT: bswapq %rax
; X64-NEXT: bswapq %rcx
+; X64-NEXT: xorl %edx, %edx
; X64-NEXT: cmpq %rcx, %rax
-; X64-NEXT: je .LBB11_1
-; X64-NEXT: # BB#2: # %res_block
; X64-NEXT: movl $-1, %ecx
; X64-NEXT: movl $1, %eax
; X64-NEXT: cmovbl %ecx, %eax
-; X64-NEXT: retq
-; X64-NEXT: .LBB11_1:
-; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmovel %edx, %eax
; X64-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind
ret i32 %m
}
define i1 @length8_eq(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length8_eq:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $8
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: sete %al
-; X32-NEXT: retl
+; X86-LABEL: length8_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $8
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
; X64-LABEL: length8_eq:
; X64: # BB#0:
@@ -353,17 +375,17 @@ define i1 @length8_eq(i8* %X, i8* %Y) nounwind {
}
define i1 @length8_eq_const(i8* %X) nounwind {
-; X32-LABEL: length8_eq_const:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $8
-; X32-NEXT: pushl $.L.str
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length8_eq_const:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $8
+; X86-NEXT: pushl $.L.str
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: length8_eq_const:
; X64: # BB#0:
@@ -377,17 +399,17 @@ define i1 @length8_eq_const(i8* %X) nounwind {
}
define i1 @length12_eq(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length12_eq:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $12
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length12_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $12
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: length12_eq:
; X64: # BB#0:
@@ -404,15 +426,15 @@ define i1 @length12_eq(i8* %X, i8* %Y) nounwind {
}
define i32 @length12(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length12:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $12
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: retl
+; X86-LABEL: length12:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $12
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
;
; X64-LABEL: length12:
; X64: # BB#0:
@@ -425,15 +447,15 @@ define i32 @length12(i8* %X, i8* %Y) nounwind {
; PR33329 - https://bugs.llvm.org/show_bug.cgi?id=33329
define i32 @length16(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length16:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $16
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: retl
+; X86-LABEL: length16:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $16
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
;
; X64-LABEL: length16:
; X64: # BB#0:
@@ -444,86 +466,108 @@ define i32 @length16(i8* %X, i8* %Y) nounwind {
}
define i1 @length16_eq(i8* %x, i8* %y) nounwind {
-; X32-LABEL: length16_eq:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $16
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-NOSSE-LABEL: length16_eq:
+; X86-NOSSE: # BB#0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $16
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: setne %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length16_eq:
+; X86-SSE2: # BB#0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: movdqu (%eax), %xmm1
+; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; X86-SSE2-NEXT: pmovmskb %xmm1, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: setne %al
+; X86-SSE2-NEXT: retl
;
-; SSE2-LABEL: length16_eq:
-; SSE2: # BB#0:
-; SSE2-NEXT: movdqu (%rsi), %xmm0
-; SSE2-NEXT: movdqu (%rdi), %xmm1
-; SSE2-NEXT: pcmpeqb %xmm0, %xmm1
-; SSE2-NEXT: pmovmskb %xmm1, %eax
-; SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
-; SSE2-NEXT: setne %al
-; SSE2-NEXT: retq
+; X64-SSE2-LABEL: length16_eq:
+; X64-SSE2: # BB#0:
+; X64-SSE2-NEXT: movdqu (%rsi), %xmm0
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm1
+; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; X64-SSE2-NEXT: pmovmskb %xmm1, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: retq
;
-; AVX2-LABEL: length16_eq:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqu (%rdi), %xmm0
-; AVX2-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
-; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
-; AVX2-NEXT: setne %al
-; AVX2-NEXT: retq
+; X64-AVX2-LABEL: length16_eq:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX2-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX2-NEXT: setne %al
+; X64-AVX2-NEXT: retq
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind
%cmp = icmp ne i32 %call, 0
ret i1 %cmp
}
define i1 @length16_eq_const(i8* %X) nounwind {
-; X32-LABEL: length16_eq_const:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $16
-; X32-NEXT: pushl $.L.str
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: sete %al
-; X32-NEXT: retl
+; X86-NOSSE-LABEL: length16_eq_const:
+; X86-NOSSE: # BB#0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $16
+; X86-NOSSE-NEXT: pushl $.L.str
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: sete %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length16_eq_const:
+; X86-SSE2: # BB#0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movdqu (%eax), %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: sete %al
+; X86-SSE2-NEXT: retl
;
-; SSE2-LABEL: length16_eq_const:
-; SSE2: # BB#0:
-; SSE2-NEXT: movdqu (%rdi), %xmm0
-; SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pmovmskb %xmm0, %eax
-; SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
-; SSE2-NEXT: sete %al
-; SSE2-NEXT: retq
+; X64-SSE2-LABEL: length16_eq_const:
+; X64-SSE2: # BB#0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: retq
;
-; AVX2-LABEL: length16_eq_const:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqu (%rdi), %xmm0
-; AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
-; AVX2-NEXT: sete %al
-; AVX2-NEXT: retq
+; X64-AVX2-LABEL: length16_eq_const:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX2-NEXT: sete %al
+; X64-AVX2-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 16) nounwind
%c = icmp eq i32 %m, 0
ret i1 %c
}
define i32 @length32(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length32:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $32
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: retl
+; X86-LABEL: length32:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $32
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
;
; X64-LABEL: length32:
; X64: # BB#0:
@@ -536,90 +580,90 @@ define i32 @length32(i8* %X, i8* %Y) nounwind {
; PR33325 - https://bugs.llvm.org/show_bug.cgi?id=33325
define i1 @length32_eq(i8* %x, i8* %y) nounwind {
-; X32-LABEL: length32_eq:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $32
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: sete %al
-; X32-NEXT: retl
+; X86-LABEL: length32_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $32
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
-; SSE2-LABEL: length32_eq:
-; SSE2: # BB#0:
-; SSE2-NEXT: pushq %rax
-; SSE2-NEXT: movl $32, %edx
-; SSE2-NEXT: callq memcmp
-; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: sete %al
-; SSE2-NEXT: popq %rcx
-; SSE2-NEXT: retq
+; X64-SSE2-LABEL: length32_eq:
+; X64-SSE2: # BB#0:
+; X64-SSE2-NEXT: pushq %rax
+; X64-SSE2-NEXT: movl $32, %edx
+; X64-SSE2-NEXT: callq memcmp
+; X64-SSE2-NEXT: testl %eax, %eax
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: popq %rcx
+; X64-SSE2-NEXT: retq
;
-; AVX2-LABEL: length32_eq:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqu (%rdi), %ymm0
-; AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
-; AVX2-NEXT: vpmovmskb %ymm0, %eax
-; AVX2-NEXT: cmpl $-1, %eax
-; AVX2-NEXT: sete %al
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; X64-AVX2-LABEL: length32_eq:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: sete %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind
%cmp = icmp eq i32 %call, 0
ret i1 %cmp
}
define i1 @length32_eq_const(i8* %X) nounwind {
-; X32-LABEL: length32_eq_const:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $32
-; X32-NEXT: pushl $.L.str
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length32_eq_const:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $32
+; X86-NEXT: pushl $.L.str
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
-; SSE2-LABEL: length32_eq_const:
-; SSE2: # BB#0:
-; SSE2-NEXT: pushq %rax
-; SSE2-NEXT: movl $.L.str, %esi
-; SSE2-NEXT: movl $32, %edx
-; SSE2-NEXT: callq memcmp
-; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: setne %al
-; SSE2-NEXT: popq %rcx
-; SSE2-NEXT: retq
+; X64-SSE2-LABEL: length32_eq_const:
+; X64-SSE2: # BB#0:
+; X64-SSE2-NEXT: pushq %rax
+; X64-SSE2-NEXT: movl $.L.str, %esi
+; X64-SSE2-NEXT: movl $32, %edx
+; X64-SSE2-NEXT: callq memcmp
+; X64-SSE2-NEXT: testl %eax, %eax
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: popq %rcx
+; X64-SSE2-NEXT: retq
;
-; AVX2-LABEL: length32_eq_const:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovdqu (%rdi), %ymm0
-; AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vpmovmskb %ymm0, %eax
-; AVX2-NEXT: cmpl $-1, %eax
-; AVX2-NEXT: setne %al
-; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: retq
+; X64-AVX2-LABEL: length32_eq_const:
+; X64-AVX2: # BB#0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: setne %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
%m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 32) nounwind
%c = icmp ne i32 %m, 0
ret i1 %c
}
define i32 @length64(i8* %X, i8* %Y) nounwind {
-; X32-LABEL: length64:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $64
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: retl
+; X86-LABEL: length64:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
;
; X64-LABEL: length64:
; X64: # BB#0:
@@ -630,17 +674,17 @@ define i32 @length64(i8* %X, i8* %Y) nounwind {
}
define i1 @length64_eq(i8* %x, i8* %y) nounwind {
-; X32-LABEL: length64_eq:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $64
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: setne %al
-; X32-NEXT: retl
+; X86-LABEL: length64_eq:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
;
; X64-LABEL: length64_eq:
; X64: # BB#0:
@@ -657,17 +701,17 @@ define i1 @length64_eq(i8* %x, i8* %y) nounwind {
}
define i1 @length64_eq_const(i8* %X) nounwind {
-; X32-LABEL: length64_eq_const:
-; X32: # BB#0:
-; X32-NEXT: pushl $0
-; X32-NEXT: pushl $64
-; X32-NEXT: pushl $.L.str
-; X32-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-NEXT: calll memcmp
-; X32-NEXT: addl $16, %esp
-; X32-NEXT: testl %eax, %eax
-; X32-NEXT: sete %al
-; X32-NEXT: retl
+; X86-LABEL: length64_eq_const:
+; X86: # BB#0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl $.L.str
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
;
; X64-LABEL: length64_eq_const:
; X64: # BB#0:
diff --git a/test/CodeGen/X86/palignr.ll b/test/CodeGen/X86/palignr.ll
index 11e97aadb45d..700c9cf5f3af 100644
--- a/test/CodeGen/X86/palignr.ll
+++ b/test/CodeGen/X86/palignr.ll
@@ -1,132 +1,162 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -march=x86 -mcpu=core2 -mattr=+ssse3 | FileCheck %s
-; RUN: llc < %s -march=x86 -mcpu=yonah | FileCheck --check-prefix=CHECK-YONAH %s
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-SSE --check-prefix=CHECK-SSE2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-SSE --check-prefix=CHECK-SSSE3
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AVX
define <4 x i32> @test1(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK-LABEL: test1:
-; CHECK: # BB#0:
-; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,2,3,0]
-; CHECK-NEXT: retl
+; CHECK-SSE-LABEL: test1:
+; CHECK-SSE: # BB#0:
+; CHECK-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,2,3,0]
+; CHECK-SSE-NEXT: retl
;
-; CHECK-YONAH-LABEL: test1:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,2,3,0]
-; CHECK-YONAH-NEXT: retl
+; CHECK-AVX-LABEL: test1:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,2,3,0]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x i32> %A, <4 x i32> undef, <4 x i32> < i32 1, i32 2, i32 3, i32 0 >
- ret <4 x i32> %C
+ ret <4 x i32> %C
}
define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK-LABEL: test2:
-; CHECK: # BB#0:
-; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
-; CHECK-NEXT: retl
+; CHECK-SSE2-LABEL: test2:
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
+; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
+; CHECK-SSE2-NEXT: retl
;
-; CHECK-YONAH-LABEL: test2:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
-; CHECK-YONAH-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
-; CHECK-YONAH-NEXT: retl
+; CHECK-SSSE3-LABEL: test2:
+; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
+; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSSE3-NEXT: retl
+;
+; CHECK-AVX-LABEL: test2:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 1, i32 2, i32 3, i32 4 >
- ret <4 x i32> %C
+ ret <4 x i32> %C
}
define <4 x i32> @test3(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK-LABEL: test3:
-; CHECK: # BB#0:
-; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
-; CHECK-NEXT: retl
+; CHECK-SSE2-LABEL: test3:
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
+; CHECK-SSE2-NEXT: retl
+;
+; CHECK-SSSE3-LABEL: test3:
+; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
+; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSSE3-NEXT: retl
;
-; CHECK-YONAH-LABEL: test3:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm1[2,0]
-; CHECK-YONAH-NEXT: retl
+; CHECK-AVX-LABEL: test3:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[4,5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 1, i32 2, i32 undef, i32 4 >
- ret <4 x i32> %C
+ ret <4 x i32> %C
}
define <4 x i32> @test4(<4 x i32> %A, <4 x i32> %B) nounwind {
-; CHECK-LABEL: test4:
-; CHECK: # BB#0:
-; CHECK-NEXT: palignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
-; CHECK-NEXT: retl
+; CHECK-SSE2-LABEL: test4:
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
+; CHECK-SSE2-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retl
;
-; CHECK-YONAH-LABEL: test4:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
-; CHECK-YONAH-NEXT: movapd %xmm1, %xmm0
-; CHECK-YONAH-NEXT: retl
+; CHECK-SSSE3-LABEL: test4:
+; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; CHECK-SSSE3-NEXT: retl
+;
+; CHECK-AVX-LABEL: test4:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x i32> %A, <4 x i32> %B, <4 x i32> < i32 6, i32 7, i32 undef, i32 1 >
- ret <4 x i32> %C
+ ret <4 x i32> %C
}
define <4 x float> @test5(<4 x float> %A, <4 x float> %B) nounwind {
-; CHECK-LABEL: test5:
-; CHECK: # BB#0:
-; CHECK-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
-; CHECK-NEXT: movapd %xmm1, %xmm0
-; CHECK-NEXT: retl
+; CHECK-SSE-LABEL: test5:
+; CHECK-SSE: # BB#0:
+; CHECK-SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
+; CHECK-SSE-NEXT: movapd %xmm1, %xmm0
+; CHECK-SSE-NEXT: retl
;
-; CHECK-YONAH-LABEL: test5:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
-; CHECK-YONAH-NEXT: movapd %xmm1, %xmm0
-; CHECK-YONAH-NEXT: retl
+; CHECK-AVX-LABEL: test5:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm1[1],xmm0[0]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <4 x float> %A, <4 x float> %B, <4 x i32> < i32 6, i32 7, i32 undef, i32 1 >
- ret <4 x float> %C
+ ret <4 x float> %C
}
define <8 x i16> @test6(<8 x i16> %A, <8 x i16> %B) nounwind {
-; CHECK-LABEL: test6:
-; CHECK: # BB#0:
-; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
-; CHECK-NEXT: retl
+; CHECK-SSE2-LABEL: test6:
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
+; CHECK-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5]
+; CHECK-SSE2-NEXT: por %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retl
+;
+; CHECK-SSSE3-LABEL: test6:
+; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5]
+; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSSE3-NEXT: retl
;
-; CHECK-YONAH-LABEL: test6:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
-; CHECK-YONAH-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5]
-; CHECK-YONAH-NEXT: por %xmm1, %xmm0
-; CHECK-YONAH-NEXT: retl
+; CHECK-AVX-LABEL: test6:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 3, i32 4, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10 >
- ret <8 x i16> %C
+ ret <8 x i16> %C
}
define <8 x i16> @test7(<8 x i16> %A, <8 x i16> %B) nounwind {
-; CHECK-LABEL: test7:
-; CHECK: # BB#0:
-; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
-; CHECK-NEXT: retl
+; CHECK-SSE2-LABEL: test7:
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; CHECK-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9]
+; CHECK-SSE2-NEXT: por %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retl
;
-; CHECK-YONAH-LABEL: test7:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-YONAH-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7,8,9]
-; CHECK-YONAH-NEXT: por %xmm1, %xmm0
-; CHECK-YONAH-NEXT: retl
+; CHECK-SSSE3-LABEL: test7:
+; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9]
+; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSSE3-NEXT: retl
+;
+; CHECK-AVX-LABEL: test7:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 undef, i32 6, i32 undef, i32 8, i32 9, i32 10, i32 11, i32 12 >
- ret <8 x i16> %C
+ ret <8 x i16> %C
}
define <16 x i8> @test8(<16 x i8> %A, <16 x i8> %B) nounwind {
-; CHECK-LABEL: test8:
-; CHECK: # BB#0:
-; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
-; CHECK-NEXT: retl
+; CHECK-SSE2-LABEL: test8:
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
+; CHECK-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4]
+; CHECK-SSE2-NEXT: por %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retl
+;
+; CHECK-SSSE3-LABEL: test8:
+; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
+; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSSE3-NEXT: retl
;
-; CHECK-YONAH-LABEL: test8:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: psrldq {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero
-; CHECK-YONAH-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4]
-; CHECK-YONAH-NEXT: por %xmm1, %xmm0
-; CHECK-YONAH-NEXT: retl
+; CHECK-AVX-LABEL: test8:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <16 x i8> %A, <16 x i8> %B, <16 x i32> < i32 5, i32 6, i32 7, i32 undef, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20 >
- ret <16 x i8> %C
+ ret <16 x i8> %C
}
; Check that we don't do unary (circular on single operand) palignr incorrectly.
@@ -134,21 +164,26 @@ define <16 x i8> @test8(<16 x i8> %A, <16 x i8> %B) nounwind {
; incorrectly. In particular, one of the operands of the palignr node
; was an UNDEF.)
define <8 x i16> @test9(<8 x i16> %A, <8 x i16> %B) nounwind {
-; CHECK-LABEL: test9:
-; CHECK: # BB#0:
-; CHECK-NEXT: palignr {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
-; CHECK-NEXT: retl
+; CHECK-SSE2-LABEL: test9:
+; CHECK-SSE2: # BB#0:
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; CHECK-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
+; CHECK-SSE2-NEXT: por %xmm0, %xmm1
+; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSE2-NEXT: retl
+;
+; CHECK-SSSE3-LABEL: test9:
+; CHECK-SSSE3: # BB#0:
+; CHECK-SSSE3-NEXT: palignr {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
+; CHECK-SSSE3-NEXT: movdqa %xmm1, %xmm0
+; CHECK-SSSE3-NEXT: retl
;
-; CHECK-YONAH-LABEL: test9:
-; CHECK-YONAH: # BB#0:
-; CHECK-YONAH-NEXT: movdqa %xmm1, %xmm0
-; CHECK-YONAH-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; CHECK-YONAH-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; CHECK-YONAH-NEXT: por %xmm0, %xmm1
-; CHECK-YONAH-NEXT: movdqa %xmm1, %xmm0
-; CHECK-YONAH-NEXT: retl
+; CHECK-AVX-LABEL: test9:
+; CHECK-AVX: # BB#0:
+; CHECK-AVX-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15,0,1]
+; CHECK-AVX-NEXT: retl
%C = shufflevector <8 x i16> %B, <8 x i16> %A, <8 x i32> < i32 undef, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0 >
- ret <8 x i16> %C
+ ret <8 x i16> %C
}
diff --git a/test/CodeGen/X86/peephole-recurrence.mir b/test/CodeGen/X86/peephole-recurrence.mir
new file mode 100644
index 000000000000..af57a4fd526f
--- /dev/null
+++ b/test/CodeGen/X86/peephole-recurrence.mir
@@ -0,0 +1,232 @@
+# RUN: llc -mtriple=x86_64-- -run-pass=peephole-opt -o - %s | FileCheck %s
+
+--- |
+ define i32 @foo(i32 %a) {
+ bb0:
+ br label %bb1
+
+ bb1: ; preds = %bb7, %bb0
+ %vreg0 = phi i32 [ 0, %bb0 ], [ %vreg3, %bb7 ]
+ %cond0 = icmp eq i32 %a, 0
+ br i1 %cond0, label %bb4, label %bb3
+
+ bb3: ; preds = %bb1
+ br label %bb4
+
+ bb4: ; preds = %bb1, %bb3
+ %vreg5 = phi i32 [ 2, %bb3 ], [ 1, %bb1 ]
+ %cond1 = icmp eq i32 %vreg5, 0
+ br i1 %cond1, label %bb7, label %bb6
+
+ bb6: ; preds = %bb4
+ br label %bb7
+
+ bb7: ; preds = %bb4, %bb6
+ %vreg1 = phi i32 [ 2, %bb6 ], [ 1, %bb4 ]
+ %vreg2 = add i32 %vreg5, %vreg0
+ %vreg3 = add i32 %vreg1, %vreg2
+ %cond2 = icmp slt i32 %vreg3, 10
+ br i1 %cond2, label %bb1, label %bb8
+
+ bb8: ; preds = %bb7
+ ret i32 0
+ }
+
+ define i32 @bar(i32 %a, i32* %p) {
+ bb0:
+ br label %bb1
+
+ bb1: ; preds = %bb7, %bb0
+ %vreg0 = phi i32 [ 0, %bb0 ], [ %vreg3, %bb7 ]
+ %cond0 = icmp eq i32 %a, 0
+ br i1 %cond0, label %bb4, label %bb3
+
+ bb3: ; preds = %bb1
+ br label %bb4
+
+ bb4: ; preds = %bb1, %bb3
+ %vreg5 = phi i32 [ 2, %bb3 ], [ 1, %bb1 ]
+ %cond1 = icmp eq i32 %vreg5, 0
+ br i1 %cond1, label %bb7, label %bb6
+
+ bb6: ; preds = %bb4
+ br label %bb7
+
+ bb7: ; preds = %bb4, %bb6
+ %vreg1 = phi i32 [ 2, %bb6 ], [ 1, %bb4 ]
+ %vreg2 = add i32 %vreg5, %vreg0
+ store i32 %vreg0, i32* %p
+ %vreg3 = add i32 %vreg1, %vreg2
+ %cond2 = icmp slt i32 %vreg3, 10
+ br i1 %cond2, label %bb1, label %bb8
+
+ bb8: ; preds = %bb7
+ ret i32 0
+ }
+
+...
+---
+# There is a recurrence formulated around %0, %10, and %3. Check that operands
+# are commuted for ADD instructions in bb.5.bb7 so that the values involved in
+# the recurrence are tied. This will remove redundant copy instruction.
+name: foo
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr32, preferred-register: '' }
+ - { id: 1, class: gr32, preferred-register: '' }
+ - { id: 2, class: gr32, preferred-register: '' }
+ - { id: 3, class: gr32, preferred-register: '' }
+ - { id: 4, class: gr32, preferred-register: '' }
+ - { id: 5, class: gr32, preferred-register: '' }
+ - { id: 6, class: gr32, preferred-register: '' }
+ - { id: 7, class: gr32, preferred-register: '' }
+ - { id: 8, class: gr32, preferred-register: '' }
+ - { id: 9, class: gr32, preferred-register: '' }
+ - { id: 10, class: gr32, preferred-register: '' }
+ - { id: 11, class: gr32, preferred-register: '' }
+ - { id: 12, class: gr32, preferred-register: '' }
+liveins:
+ - { reg: '%edi', virtual-reg: '%4' }
+body: |
+ bb.0.bb0:
+ successors: %bb.1.bb1(0x80000000)
+ liveins: %edi
+
+ %4 = COPY %edi
+ %5 = MOV32r0 implicit-def dead %eflags
+
+ bb.1.bb1:
+ successors: %bb.3.bb4(0x30000000), %bb.2.bb3(0x50000000)
+
+ ; CHECK: %0 = PHI %5, %bb.0.bb0, %3, %bb.5.bb7
+ %0 = PHI %5, %bb.0.bb0, %3, %bb.5.bb7
+ %6 = MOV32ri 1
+ TEST32rr %4, %4, implicit-def %eflags
+ JE_1 %bb.3.bb4, implicit %eflags
+ JMP_1 %bb.2.bb3
+
+ bb.2.bb3:
+ successors: %bb.3.bb4(0x80000000)
+
+ %7 = MOV32ri 2
+
+ bb.3.bb4:
+ successors: %bb.5.bb7(0x30000000), %bb.4.bb6(0x50000000)
+
+ %1 = PHI %6, %bb.1.bb1, %7, %bb.2.bb3
+ TEST32rr %1, %1, implicit-def %eflags
+ JE_1 %bb.5.bb7, implicit %eflags
+ JMP_1 %bb.4.bb6
+
+ bb.4.bb6:
+ successors: %bb.5.bb7(0x80000000)
+
+ %9 = MOV32ri 2
+
+ bb.5.bb7:
+ successors: %bb.1.bb1(0x7c000000), %bb.6.bb8(0x04000000)
+
+ %2 = PHI %6, %bb.3.bb4, %9, %bb.4.bb6
+ %10 = ADD32rr %1, %0, implicit-def dead %eflags
+ ; CHECK: %10 = ADD32rr
+ ; CHECK-SAME: %0,
+ ; CHECK-SAME: %1,
+ %3 = ADD32rr %2, killed %10, implicit-def dead %eflags
+ ; CHECK: %3 = ADD32rr
+ ; CHECK-SAME: %10,
+ ; CHECK-SAME: %2,
+ %11 = SUB32ri8 %3, 10, implicit-def %eflags
+ JL_1 %bb.1.bb1, implicit %eflags
+ JMP_1 %bb.6.bb8
+
+ bb.6.bb8:
+ %12 = MOV32r0 implicit-def dead %eflags
+ %eax = COPY %12
+ RET 0, %eax
+
+...
+---
+# Here a recurrence is formulated around %0, %11, and %3, but operands should
+# not be commuted because %0 has a use outside of recurrence. This is to
+# prevent the case of commuting operands ties the values with overlapping live
+# ranges.
+name: bar
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr32, preferred-register: '' }
+ - { id: 1, class: gr32, preferred-register: '' }
+ - { id: 2, class: gr32, preferred-register: '' }
+ - { id: 3, class: gr32, preferred-register: '' }
+ - { id: 4, class: gr32, preferred-register: '' }
+ - { id: 5, class: gr64, preferred-register: '' }
+ - { id: 6, class: gr32, preferred-register: '' }
+ - { id: 7, class: gr32, preferred-register: '' }
+ - { id: 8, class: gr32, preferred-register: '' }
+ - { id: 9, class: gr32, preferred-register: '' }
+ - { id: 10, class: gr32, preferred-register: '' }
+ - { id: 11, class: gr32, preferred-register: '' }
+ - { id: 12, class: gr32, preferred-register: '' }
+ - { id: 13, class: gr32, preferred-register: '' }
+liveins:
+ - { reg: '%edi', virtual-reg: '%4' }
+ - { reg: '%rsi', virtual-reg: '%5' }
+body: |
+ bb.0.bb0:
+ successors: %bb.1.bb1(0x80000000)
+ liveins: %edi, %rsi
+
+ %5 = COPY %rsi
+ %4 = COPY %edi
+ %6 = MOV32r0 implicit-def dead %eflags
+
+ bb.1.bb1:
+ successors: %bb.3.bb4(0x30000000), %bb.2.bb3(0x50000000)
+
+ %0 = PHI %6, %bb.0.bb0, %3, %bb.5.bb7
+ ; CHECK: %0 = PHI %6, %bb.0.bb0, %3, %bb.5.bb7
+ %7 = MOV32ri 1
+ TEST32rr %4, %4, implicit-def %eflags
+ JE_1 %bb.3.bb4, implicit %eflags
+ JMP_1 %bb.2.bb3
+
+ bb.2.bb3:
+ successors: %bb.3.bb4(0x80000000)
+
+ %8 = MOV32ri 2
+
+ bb.3.bb4:
+ successors: %bb.5.bb7(0x30000000), %bb.4.bb6(0x50000000)
+
+ %1 = PHI %7, %bb.1.bb1, %8, %bb.2.bb3
+ TEST32rr %1, %1, implicit-def %eflags
+ JE_1 %bb.5.bb7, implicit %eflags
+ JMP_1 %bb.4.bb6
+
+ bb.4.bb6:
+ successors: %bb.5.bb7(0x80000000)
+
+ %10 = MOV32ri 2
+
+ bb.5.bb7:
+ successors: %bb.1.bb1(0x7c000000), %bb.6.bb8(0x04000000)
+
+ %2 = PHI %7, %bb.3.bb4, %10, %bb.4.bb6
+ %11 = ADD32rr %1, %0, implicit-def dead %eflags
+ ; CHECK: %11 = ADD32rr
+ ; CHECK-SAME: %1,
+ ; CHECK-SAME: %0,
+ MOV32mr %5, 1, _, 0, _, %0 :: (store 4 into %ir.p)
+ %3 = ADD32rr %2, killed %11, implicit-def dead %eflags
+ ; CHECK: %3 = ADD32rr
+ ; CHECK-SAME: %2,
+ ; CHECK-SAME: %11,
+ %12 = SUB32ri8 %3, 10, implicit-def %eflags
+ JL_1 %bb.1.bb1, implicit %eflags
+ JMP_1 %bb.6.bb8
+
+ bb.6.bb8:
+ %13 = MOV32r0 implicit-def dead %eflags
+ %eax = COPY %13
+ RET 0, %eax
+
+...
diff --git a/test/CodeGen/X86/sbb.ll b/test/CodeGen/X86/sbb.ll
index bc00fc7c66ad..414780b2d4e6 100644
--- a/test/CodeGen/X86/sbb.ll
+++ b/test/CodeGen/X86/sbb.ll
@@ -111,6 +111,86 @@ define i8 @i8_select_neg1_or_0_commuted_as_math(i8 %x) {
ret i8 %add
}
+; (X <u Y) ? -1 : 0 --> cmp, sbb
+
+define i32 @ult_select_neg1_or_0(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: ult_select_neg1_or_0:
+; CHECK: # BB#0:
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: sbbl %eax, %eax
+; CHECK-NEXT: retq
+ %cmp = icmp ult i32 %x, %y
+ %ext = sext i1 %cmp to i32
+ ret i32 %ext
+}
+
+; Swap the predicate and compare operands:
+; (Y >u X) ? -1 : 0 --> cmp, sbb
+
+define i32 @ugt_select_neg1_or_0(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: ugt_select_neg1_or_0:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: cmpl %edi, %esi
+; CHECK-NEXT: movl $-1, %eax
+; CHECK-NEXT: cmovbel %ecx, %eax
+; CHECK-NEXT: retq
+ %cmp = icmp ugt i32 %y, %x
+ %ext = sext i1 %cmp to i32
+ ret i32 %ext
+}
+
+; Invert the predicate and effectively swap the select operands:
+; (X >=u Y) ? 0 : -1 --> (X <u Y) ? -1 : 0 --> cmp, sbb
+
+define i32 @uge_select_0_or_neg1(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: uge_select_0_or_neg1:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: setae %al
+; CHECK-NEXT: decl %eax
+; CHECK-NEXT: retq
+ %cmp = icmp uge i32 %x, %y
+ %ext = zext i1 %cmp to i32
+ %add = add i32 %ext, -1
+ ret i32 %add
+}
+
+; Swap the predicate and compare operands:
+; (Y <=u X) ? 0 : -1 --> (X <u Y) ? -1 : 0 --> cmp, sbb
+
+define i32 @ule_select_0_or_neg1(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: ule_select_0_or_neg1:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpl %edi, %esi
+; CHECK-NEXT: setbe %al
+; CHECK-NEXT: decl %eax
+; CHECK-NEXT: retq
+ %cmp = icmp ule i32 %y, %x
+ %ext = zext i1 %cmp to i32
+ %add = add i32 %ext, -1
+ ret i32 %add
+}
+
+; Verify that subtract with constant is the same thing.
+; (X >=u Y) ? 0 : -1 --> (X <u Y) ? -1 : 0 --> cmp, sbb
+
+define i32 @uge_select_0_or_neg1_sub(i32 %x, i32 %y) nounwind {
+; CHECK-LABEL: uge_select_0_or_neg1_sub:
+; CHECK: # BB#0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: cmpl %esi, %edi
+; CHECK-NEXT: setae %al
+; CHECK-NEXT: decl %eax
+; CHECK-NEXT: retq
+ %cmp = icmp uge i32 %x, %y
+ %ext = zext i1 %cmp to i32
+ %sub = sub i32 %ext, 1
+ ret i32 %sub
+}
+
; Make sure we're creating nodes with the right value types. This would crash.
; https://bugs.llvm.org/show_bug.cgi?id=33560
diff --git a/test/CodeGen/X86/vector-shuffle-512-v16.ll b/test/CodeGen/X86/vector-shuffle-512-v16.ll
index 2e65bd8c75c7..174a487160c7 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v16.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v16.ll
@@ -279,6 +279,35 @@ define <16 x i32> @shuffle_v16i32_0_1_2_19_u_u_u_u_u_u_u_u_u_u_u_u(<16 x i32> %a
ret <16 x i32> %c
}
+;FIXME: can do better with vpcompress
+define <8 x i32> @test_v16i32_1_3_5_7_9_11_13_15(<16 x i32> %v) {
+; ALL-LABEL: test_v16i32_1_3_5_7_9_11_13_15:
+; ALL: # BB#0:
+; ALL-NEXT: vextracti32x8 $1, %zmm0, %ymm1
+; ALL-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[1,3],ymm0[5,7],ymm1[5,7]
+; ALL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
+; ALL-NEXT: retq
+ %res = shufflevector <16 x i32> %v, <16 x i32> undef, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i32> %res
+}
+
+;FIXME: can do better with vpcompress
+define <4 x i32> @test_v16i32_0_1_2_12 (<16 x i32> %v) {
+; ALL-LABEL: test_v16i32_0_1_2_12:
+; ALL: # BB#0:
+; ALL-NEXT: vpextrd $1, %xmm0, %eax
+; ALL-NEXT: vpinsrd $1, %eax, %xmm0, %xmm1
+; ALL-NEXT: vpextrd $2, %xmm0, %eax
+; ALL-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
+; ALL-NEXT: vextracti32x4 $3, %zmm0, %xmm0
+; ALL-NEXT: vmovd %xmm0, %eax
+; ALL-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; ALL-NEXT: vzeroupper
+; ALL-NEXT: retq
+ %res = shufflevector <16 x i32> %v, <16 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 12>
+ ret <4 x i32> %res
+}
+
define <8 x float> @shuffle_v16f32_extract_256(float* %RET, float* %a) {
; ALL-LABEL: shuffle_v16f32_extract_256:
; ALL: # BB#0:
@@ -290,6 +319,34 @@ define <8 x float> @shuffle_v16f32_extract_256(float* %RET, float* %a) {
ret <8 x float> %v2
}
+;FIXME: can do better with vcompressp
+define <8 x float> @test_v16f32_0_1_2_3_4_6_7_10 (<16 x float> %v) {
+; ALL-LABEL: test_v16f32_0_1_2_3_4_6_7_10:
+; ALL: # BB#0:
+; ALL-NEXT: vextractf32x8 $1, %zmm0, %ymm1
+; ALL-NEXT: vmovsldup {{.*#+}} xmm1 = xmm1[0,0,2,2]
+; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,7,u]
+; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6],ymm1[7]
+; ALL-NEXT: retq
+ %res = shufflevector <16 x float> %v, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 6, i32 7, i32 10>
+ ret <8 x float> %res
+}
+
+;FIXME: can do better with vcompressp
+define <4 x float> @test_v16f32_0_1_3_6 (<16 x float> %v) {
+; ALL-LABEL: test_v16f32_0_1_3_6:
+; ALL: # BB#0:
+; ALL-NEXT: vextractf32x4 $1, %zmm0, %xmm1
+; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; ALL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,3,3]
+; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; ALL-NEXT: vzeroupper
+; ALL-NEXT: retq
+ %res = shufflevector <16 x float> %v, <16 x float> undef, <4 x i32> <i32 0, i32 1, i32 3, i32 6>
+ ret <4 x float> %res
+}
+
define <16 x i32> @shuffle_v16i16_1_0_0_0_5_4_4_4_9_8_8_8_13_12_12_12(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i16_1_0_0_0_5_4_4_4_9_8_8_8_13_12_12_12:
; ALL: # BB#0:
diff --git a/test/CodeGen/X86/vector-shuffle-512-v8.ll b/test/CodeGen/X86/vector-shuffle-512-v8.ll
index 30c8d1b2373e..d0b7e4eb205c 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v8.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v8.ll
@@ -2659,3 +2659,91 @@ define <8 x double> @shuffle_v2f64_v8f64_01010101(<2 x double> %a) {
%shuffle = shufflevector <2 x double> %a, <2 x double> undef, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>
ret <8 x double> %shuffle
}
+
+;FIXME: compressp
+define <4 x double> @test_v8f64_2346 (<8 x double> %v) {
+; AVX512F-LABEL: test_v8f64_2346:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX512F-NEXT: vextractf64x4 $1, %zmm0, %ymm0
+; AVX512F-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,2]
+; AVX512F-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX512F-NEXT: retq
+;
+; AVX512F-32-LABEL: test_v8f64_2346:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX512F-32-NEXT: vextractf64x4 $1, %zmm0, %ymm0
+; AVX512F-32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,2]
+; AVX512F-32-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
+; AVX512F-32-NEXT: retl
+ %res = shufflevector <8 x double> %v, <8 x double> undef, <4 x i32> <i32 2, i32 3, i32 4, i32 6>
+ ret <4 x double> %res
+}
+
+;FIXME: compressp
+define <2 x double> @test_v8f64_34 (<8 x double> %v) {
+; AVX512F-LABEL: test_v8f64_34:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextractf32x4 $2, %zmm0, %xmm1
+; AVX512F-NEXT: vextractf32x4 $1, %zmm0, %xmm0
+; AVX512F-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512F-32-LABEL: test_v8f64_34:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vextractf32x4 $2, %zmm0, %xmm1
+; AVX512F-32-NEXT: vextractf32x4 $1, %zmm0, %xmm0
+; AVX512F-32-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0]
+; AVX512F-32-NEXT: vzeroupper
+; AVX512F-32-NEXT: retl
+ %res = shufflevector <8 x double> %v, <8 x double> undef, <2 x i32> <i32 3, i32 4>
+ ret <2 x double> %res
+}
+
+; FIXME: vpcompress
+define <4 x i64> @test_v8i64_1257 (<8 x i64> %v) {
+; AVX512F-LABEL: test_v8i64_1257:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
+; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,2,2,3]
+; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-NEXT: retq
+;
+; AVX512F-32-LABEL: test_v8i64_1257:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-32-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3]
+; AVX512F-32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,2,2,3]
+; AVX512F-32-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX512F-32-NEXT: retl
+ %res = shufflevector <8 x i64> %v, <8 x i64> undef, <4 x i32> <i32 1, i32 2, i32 5, i32 7>
+ ret <4 x i64> %res
+}
+
+define <2 x i64> @test_v8i64_2_5 (<8 x i64> %v) {
+; AVX512F-LABEL: test_v8i64_2_5:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm1
+; AVX512F-NEXT: vextracti32x4 $1, %zmm0, %xmm0
+; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX512F-NEXT: vzeroupper
+; AVX512F-NEXT: retq
+;
+; AVX512F-32-LABEL: test_v8i64_2_5:
+; AVX512F-32: # BB#0:
+; AVX512F-32-NEXT: vextracti32x4 $1, %zmm0, %xmm1
+; AVX512F-32-NEXT: vpextrd $1, %xmm1, %eax
+; AVX512F-32-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; AVX512F-32-NEXT: vextracti32x4 $2, %zmm0, %xmm0
+; AVX512F-32-NEXT: vpextrd $2, %xmm0, %eax
+; AVX512F-32-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
+; AVX512F-32-NEXT: vpextrd $3, %xmm0, %eax
+; AVX512F-32-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; AVX512F-32-NEXT: vzeroupper
+; AVX512F-32-NEXT: retl
+ %res = shufflevector <8 x i64> %v, <8 x i64> undef, <2 x i32> <i32 2, i32 5>
+ ret <2 x i64> %res
+}
diff --git a/test/CodeGen/X86/vector-truncate-combine.ll b/test/CodeGen/X86/vector-truncate-combine.ll
new file mode 100644
index 000000000000..1a6dac8fa6e4
--- /dev/null
+++ b/test/CodeGen/X86/vector-truncate-combine.ll
@@ -0,0 +1,35 @@
+; RUN: llc -mtriple=x86_64-- -O2 -start-after=stack-protector -stop-before=loops %s -o - | FileCheck %s
+
+; This test verifies the fix for PR33368.
+;
+; The expected outcome of the operation is to store bytes 0 and 2 of the incoming
+; parameter into c2 (a 2 x i8 vector). DAGCombine converts shuffles into a
+; sequence of extend and subsequent truncate operations. The bug was that an extension
+; by 4 followed by a truncation by 8 was completely eliminated.
+
+; The test checks for the correct sequence of operations that results from the
+; preservation of the extend/truncate operations mentioned above (2 extend and
+; 3 truncate instructions).
+;
+; NOTE: This operation could be collapsed in to a single truncate. Once that is done
+; this test will have to be adjusted.
+
+; CHECK: PUNPCKLBWrr
+; CHECK: PUNPCKLWDrr
+; CHECK: PACKUSWBrr
+; CHECK: PACKUSWBrr
+; CHECK: PACKUSWBrr
+
+define void @test(double %vec.coerce) local_unnamed_addr {
+entry:
+ %c2 = alloca <2 x i8>, align 2
+ %0 = bitcast double %vec.coerce to <8 x i8>
+ %1 = shufflevector <8 x i8> %0, <8 x i8> undef, <4 x i32> <i32 2, i32 undef, i32 undef, i32 0>
+ %2 = shufflevector <4 x i8> %1, <4 x i8> undef, <2 x i32> <i32 3, i32 0>
+ store volatile <2 x i8> %2, <2 x i8>* %c2, align 2
+ br label %if.end
+
+if.end:
+ %3 = bitcast <2 x i8> %2 to i16
+ ret void
+}
diff --git a/test/CodeGen/X86/x86-interleaved-access.ll b/test/CodeGen/X86/x86-interleaved-access.ll
index 6047279bc6ed..1263605a6dc0 100644
--- a/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/test/CodeGen/X86/x86-interleaved-access.ll
@@ -194,6 +194,64 @@ define void @store_factori64_4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, <
define void @interleaved_store_vf32_i8_stride4(<32 x i8> %x1, <32 x i8> %x2, <32 x i8> %x3, <32 x i8> %x4, <128 x i8>* %p) {
+; AVX1-LABEL: interleaved_store_vf32_i8_stride4:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm5
+; AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [65535,0,65535,0,65535,0,65535,0,65535,0,65535,0,65535,0,65535,0]
+; AVX1-NEXT: vandnps %ymm5, %ymm4, %ymm5
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6
+; AVX1-NEXT: vandps %ymm4, %ymm6, %ymm6
+; AVX1-NEXT: vorps %ymm5, %ymm6, %ymm8
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6
+; AVX1-NEXT: vandnps %ymm6, %ymm4, %ymm6
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
+; AVX1-NEXT: vandps %ymm4, %ymm5, %ymm5
+; AVX1-NEXT: vorps %ymm6, %ymm5, %ymm9
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
+; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5
+; AVX1-NEXT: vandnps %ymm5, %ymm4, %ymm5
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm6
+; AVX1-NEXT: vandps %ymm4, %ymm6, %ymm6
+; AVX1-NEXT: vorps %ymm5, %ymm6, %ymm5
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT: vandnps %ymm2, %ymm4, %ymm2
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovaps %ymm0, 96(%rdi)
+; AVX1-NEXT: vmovaps %ymm5, 64(%rdi)
+; AVX1-NEXT: vmovaps %ymm9, 32(%rdi)
+; AVX1-NEXT: vmovaps %ymm8, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
; AVX2-LABEL: interleaved_store_vf32_i8_stride4:
; AVX2: # BB#0:
; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7]