summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-01-09 21:23:09 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-01-09 21:23:09 +0000
commit909545a822eef491158f831688066f0ec2866938 (patch)
tree5b0bf0e81294007a9b462b21031b3df272c655c3 /test
parent7e7b6700743285c0af506ac6299ddf82ebd434b9 (diff)
downloadsrc-test-909545a822eef491158f831688066f0ec2866938.tar.gz
src-test-909545a822eef491158f831688066f0ec2866938.zip
Notes
Diffstat (limited to 'test')
-rw-r--r--test/Analysis/CostModel/X86/shuffle-reverse.ll2
-rw-r--r--test/Analysis/CostModel/X86/testshiftlshr.ll4
-rw-r--r--test/Analysis/CostModel/X86/testshiftshl.ll4
-rw-r--r--test/Analysis/CostModel/X86/vshift-ashr-cost.ll45
-rw-r--r--test/Analysis/CostModel/X86/vshift-lshr-cost.ll66
-rw-r--r--test/Analysis/CostModel/X86/vshift-shl-cost.ll70
-rw-r--r--test/Analysis/ScalarEvolution/invalidation.ll70
-rw-r--r--test/Analysis/ValueTracking/assume.ll22
-rw-r--r--test/Bindings/Go/lit.local.cfg2
-rw-r--r--test/Bindings/OCaml/lit.local.cfg2
-rw-r--r--test/CMakeLists.txt14
-rw-r--r--test/CodeGen/AMDGPU/load-constant-i16.ll138
-rw-r--r--test/CodeGen/AMDGPU/load-global-i16.ll331
-rw-r--r--test/CodeGen/AMDGPU/min.ll172
-rw-r--r--test/CodeGen/AMDGPU/r600-legalize-umax-bug.ll16
-rw-r--r--test/CodeGen/AMDGPU/store-private.ll743
-rw-r--r--test/CodeGen/AVR/intrinsics/read_register.ll17
-rw-r--r--test/CodeGen/WebAssembly/function-bitcasts.ll56
-rw-r--r--test/CodeGen/WebAssembly/unsupported-function-bitcasts.ll26
-rw-r--r--test/CodeGen/X86/avx2-arith.ll101
-rw-r--r--test/CodeGen/X86/avx512-bugfix-23634.ll2
-rw-r--r--test/CodeGen/X86/avx512-calling-conv.ll24
-rw-r--r--test/CodeGen/X86/avx512-cvt.ll14
-rw-r--r--test/CodeGen/X86/avx512-ext.ll33
-rw-r--r--test/CodeGen/X86/avx512-insert-extract.ll56
-rw-r--r--test/CodeGen/X86/avx512-mask-op.ll110
-rw-r--r--test/CodeGen/X86/avx512-mov.ll16
-rw-r--r--test/CodeGen/X86/avx512-regcall-NoMask.ll30
-rw-r--r--test/CodeGen/X86/avx512-vbroadcast.ll3
-rw-r--r--test/CodeGen/X86/avx512-vec-cmp.ll141
-rw-r--r--test/CodeGen/X86/avx512bw-mov.ll4
-rw-r--r--test/CodeGen/X86/avx512bw-vec-cmp.ll36
-rw-r--r--test/CodeGen/X86/avx512bwvl-mov.ll8
-rw-r--r--test/CodeGen/X86/avx512bwvl-vec-cmp.ll72
-rw-r--r--test/CodeGen/X86/avx512vl-mov.ll32
-rw-r--r--test/CodeGen/X86/avx512vl-vec-cmp.ll144
-rw-r--r--test/CodeGen/X86/cmov.ll6
-rw-r--r--test/CodeGen/X86/fma-fneg-combine.ll12
-rw-r--r--test/CodeGen/X86/fmaddsub-combine.ll129
-rw-r--r--test/CodeGen/X86/sse-fsignum.ll11
-rw-r--r--test/CodeGen/X86/vector-compare-results.ll6208
-rw-r--r--test/CodeGen/X86/vector-sext.ll45
-rw-r--r--test/CodeGen/X86/vector-shift-ashr-128.ll130
-rw-r--r--test/CodeGen/X86/vector-shift-ashr-256.ll234
-rw-r--r--test/CodeGen/X86/vector-shift-ashr-512.ll52
-rw-r--r--test/CodeGen/X86/vector-shift-lshr-128.ll94
-rw-r--r--test/CodeGen/X86/vector-shift-lshr-256.ll162
-rw-r--r--test/CodeGen/X86/vector-shift-lshr-512.ll52
-rw-r--r--test/CodeGen/X86/vector-shift-shl-128.ll88
-rw-r--r--test/CodeGen/X86/vector-shift-shl-256.ll154
-rw-r--r--test/CodeGen/X86/vector-shift-shl-512.ll27
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v64.ll9
-rw-r--r--test/CodeGen/X86/vector-shuffle-masked.ll33
-rw-r--r--test/CodeGen/X86/vector-shuffle-v1.ll74
-rw-r--r--test/ExecutionEngine/Interpreter/lit.local.cfg2
-rw-r--r--test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_BE-relocations.s11
-rw-r--r--test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_local_branch.s14
-rw-r--r--test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s35
-rw-r--r--test/Instrumentation/AddressSanitizer/global_metadata_darwin.ll2
-rw-r--r--test/JitListener/lit.local.cfg2
-rw-r--r--test/ThinLTO/X86/Inputs/funcimport-tbaa.ll11
-rw-r--r--test/ThinLTO/X86/Inputs/local_name_conflict1.ll17
-rw-r--r--test/ThinLTO/X86/Inputs/local_name_conflict2.ll17
-rw-r--r--test/ThinLTO/X86/funcimport-tbaa.ll38
-rw-r--r--test/ThinLTO/X86/local_name_conflict.ll29
-rw-r--r--test/Transforms/GVN/invariant.group.ll52
-rw-r--r--test/Transforms/InstCombine/assume.ll45
-rw-r--r--test/Transforms/InstCombine/assume2.ll141
-rw-r--r--test/Transforms/InstCombine/fabs.ll42
-rw-r--r--test/Transforms/InstCombine/fast-math.ll6
-rw-r--r--test/Transforms/InstCombine/urem-simplify-bug.ll52
-rw-r--r--test/Transforms/InstSimplify/div.ll15
-rw-r--r--test/Transforms/InstSimplify/rem.ll14
-rw-r--r--test/Transforms/LICM/hoisting.ll27
-rw-r--r--test/Transforms/LoopLoadElim/forward.ll6
-rw-r--r--test/Transforms/LoopVectorize/iv_outside_user.ll45
-rw-r--r--test/Transforms/NewGVN/basic-cyclic-opt.ll235
-rw-r--r--test/Transforms/NewGVN/cyclic-phi-handling.ll37
-rw-r--r--test/Transforms/NewGVN/invariant.group.ll52
-rw-r--r--test/Transforms/NewGVN/memory-handling.ll195
-rw-r--r--test/Transforms/NewGVN/pr31501.ll136
-rw-r--r--test/Transforms/NewGVN/pr31573.ll42
-rw-r--r--test/lit.cfg10
-rw-r--r--test/lit.site.cfg.in18
-rw-r--r--test/tools/llvm-config/system-libs.test3
-rw-r--r--test/tools/llvm-config/system-libs.windows.test3
-rw-r--r--test/tools/llvm-opt-report/Inputs/dm.c13
-rw-r--r--test/tools/llvm-opt-report/Inputs/dm.yaml104
-rw-r--r--test/tools/llvm-opt-report/func-dm.test17
89 files changed, 8652 insertions, 2982 deletions
diff --git a/test/Analysis/CostModel/X86/shuffle-reverse.ll b/test/Analysis/CostModel/X86/shuffle-reverse.ll
index a1bdda0690aaf..627d798574340 100644
--- a/test/Analysis/CostModel/X86/shuffle-reverse.ll
+++ b/test/Analysis/CostModel/X86/shuffle-reverse.ll
@@ -161,7 +161,7 @@ define void @test_vXi8(<16 x i8> %src128, <32 x i8> %src256, <64 x i8> %src512)
; AVX1: cost of 8 {{.*}} %V512 = shufflevector
; AVX2: cost of 4 {{.*}} %V512 = shufflevector
; AVX512F: cost of 4 {{.*}} %V512 = shufflevector
- ; AVX512BW: cost of 6 {{.*}} %V512 = shufflevector
+ ; AVX512BW: cost of 2 {{.*}} %V512 = shufflevector
%V512 = shufflevector <64 x i8> %src512, <64 x i8> undef, <64 x i32> <i32 63, i32 62, i32 61, i32 60, i32 59, i32 58, i32 57, i32 56, i32 55, i32 54, i32 53, i32 52, i32 51, i32 50, i32 49, i32 48, i32 47, i32 46, i32 45, i32 44, i32 43, i32 42, i32 41, i32 40, i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret void
diff --git a/test/Analysis/CostModel/X86/testshiftlshr.ll b/test/Analysis/CostModel/X86/testshiftlshr.ll
index 52f176fe4d63d..e5fff9b5e4da9 100644
--- a/test/Analysis/CostModel/X86/testshiftlshr.ll
+++ b/test/Analysis/CostModel/X86/testshiftlshr.ll
@@ -498,7 +498,7 @@ entry:
define %shifttypec16i8 @shift16i8c(%shifttypec16i8 %a, %shifttypec16i8 %b) {
entry:
; SSE2: shift16i8c
- ; SSE2: cost of 1 {{.*}} lshr
+ ; SSE2: cost of 2 {{.*}} lshr
; SSE2-CODEGEN: shift16i8c
; SSE2-CODEGEN: psrlw $3
@@ -513,7 +513,7 @@ entry:
define %shifttypec32i8 @shift32i8c(%shifttypec32i8 %a, %shifttypec32i8 %b) {
entry:
; SSE2: shift32i8c
- ; SSE2: cost of 2 {{.*}} lshr
+ ; SSE2: cost of 4 {{.*}} lshr
; SSE2-CODEGEN: shift32i8c
; SSE2-CODEGEN: psrlw $3
diff --git a/test/Analysis/CostModel/X86/testshiftshl.ll b/test/Analysis/CostModel/X86/testshiftshl.ll
index e385c5bfeeacf..6628b9b87986b 100644
--- a/test/Analysis/CostModel/X86/testshiftshl.ll
+++ b/test/Analysis/CostModel/X86/testshiftshl.ll
@@ -498,7 +498,7 @@ entry:
define %shifttypec16i8 @shift16i8c(%shifttypec16i8 %a, %shifttypec16i8 %b) {
entry:
; SSE2: shift16i8c
- ; SSE2: cost of 1 {{.*}} shl
+ ; SSE2: cost of 2 {{.*}} shl
; SSE2-CODEGEN: shift16i8c
; SSE2-CODEGEN: psllw $3
@@ -513,7 +513,7 @@ entry:
define %shifttypec32i8 @shift32i8c(%shifttypec32i8 %a, %shifttypec32i8 %b) {
entry:
; SSE2: shift32i8c
- ; SSE2: cost of 2 {{.*}} shl
+ ; SSE2: cost of 4 {{.*}} shl
; SSE2-CODEGEN: shift32i8c
; SSE2-CODEGEN: psllw $3
diff --git a/test/Analysis/CostModel/X86/vshift-ashr-cost.ll b/test/Analysis/CostModel/X86/vshift-ashr-cost.ll
index 888164df75f5d..6756f3ba2802e 100644
--- a/test/Analysis/CostModel/X86/vshift-ashr-cost.ll
+++ b/test/Analysis/CostModel/X86/vshift-ashr-cost.ll
@@ -120,7 +120,7 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) {
; AVX: Found an estimated cost of 56 for instruction: %shift
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
-; AVX512BW: Found an estimated cost of 2 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = ashr <32 x i16> %a, %b
ret <32 x i16> %shift
@@ -282,7 +282,7 @@ define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) {
; AVX: Found an estimated cost of 56 for instruction: %shift
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
-; AVX512BW: Found an estimated cost of 2 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
%splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
%shift = ashr <32 x i16> %a, %splat
@@ -439,7 +439,7 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) {
; AVX: Found an estimated cost of 56 for instruction: %shift
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
-; AVX512BW: Found an estimated cost of 2 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = ashr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <32 x i16> %shift
@@ -529,8 +529,7 @@ define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) {
; AVX: Found an estimated cost of 1 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
-; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
+; XOP: Found an estimated cost of 1 for instruction: %shift
%shift = ashr <4 x i32> %a, <i32 5, i32 5, i32 5, i32 5>
ret <4 x i32> %shift
}
@@ -568,7 +567,7 @@ define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) {
; AVX: Found an estimated cost of 1 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 2 for instruction: %shift
+; XOP: Found an estimated cost of 1 for instruction: %shift
%shift = ashr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <8 x i16> %shift
}
@@ -578,9 +577,10 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) {
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
; AVX: Found an estimated cost of 2 for instruction: %shift
-; AVX2: Found an estimated cost of 10 for instruction: %shift
-; AVX512: Found an estimated cost of 10 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; AVX2: Found an estimated cost of 1 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = ashr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <16 x i16> %shift
}
@@ -590,10 +590,11 @@ define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) {
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
; AVX: Found an estimated cost of 4 for instruction: %shift
-; AVX2: Found an estimated cost of 20 for instruction: %shift
-; AVX512F: Found an estimated cost of 20 for instruction: %shift
-; AVX512BW: Found an estimated cost of 2 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; AVX2: Found an estimated cost of 2 for instruction: %shift
+; AVX512F: Found an estimated cost of 2 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = ashr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <32 x i16> %shift
}
@@ -605,7 +606,7 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) {
; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 4 for instruction: %shift
; AVX512: Found an estimated cost of 4 for instruction: %shift
-; XOP: Found an estimated cost of 2 for instruction: %shift
+; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = ashr <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <16 x i8> %shift
}
@@ -615,9 +616,10 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) {
; SSE2: Found an estimated cost of 8 for instruction: %shift
; SSE41: Found an estimated cost of 8 for instruction: %shift
; AVX: Found an estimated cost of 8 for instruction: %shift
-; AVX2: Found an estimated cost of 24 for instruction: %shift
-; AVX512: Found an estimated cost of 24 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; AVX2: Found an estimated cost of 4 for instruction: %shift
+; AVX512: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 4 for instruction: %shift
%shift = ashr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
}
@@ -627,10 +629,11 @@ define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) {
; SSE2: Found an estimated cost of 16 for instruction: %shift
; SSE41: Found an estimated cost of 16 for instruction: %shift
; AVX: Found an estimated cost of 16 for instruction: %shift
-; AVX2: Found an estimated cost of 48 for instruction: %shift
-; AVX512F: Found an estimated cost of 48 for instruction: %shift
-; AVX512BW: Found an estimated cost of 2 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; AVX2: Found an estimated cost of 8 for instruction: %shift
+; AVX512F: Found an estimated cost of 8 for instruction: %shift
+; AVX512BW: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 16 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 8 for instruction: %shift
%shift = ashr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <64 x i8> %shift
}
diff --git a/test/Analysis/CostModel/X86/vshift-lshr-cost.ll b/test/Analysis/CostModel/X86/vshift-lshr-cost.ll
index b3382253739f7..63e6db194d520 100644
--- a/test/Analysis/CostModel/X86/vshift-lshr-cost.ll
+++ b/test/Analysis/CostModel/X86/vshift-lshr-cost.ll
@@ -123,7 +123,7 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) {
; AVX: Found an estimated cost of 56 for instruction: %shift
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
-; AVX512BW: Found an estimated cost of 2 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = lshr <32 x i16> %a, %b
ret <32 x i16> %shift
@@ -287,7 +287,7 @@ define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) {
; AVX: Found an estimated cost of 56 for instruction: %shift
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
-; AVX512BW: Found an estimated cost of 2 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
%splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
%shift = lshr <32 x i16> %a, %splat
@@ -447,7 +447,7 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) {
; AVX: Found an estimated cost of 56 for instruction: %shift
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
-; AVX512BW: Found an estimated cost of 2 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 8 for instruction: %shift
%shift = lshr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <32 x i16> %shift
@@ -501,8 +501,7 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) {
; AVX: Found an estimated cost of 1 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
-; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
+; XOP: Found an estimated cost of 1 for instruction: %shift
%shift = lshr <2 x i64> %a, <i64 7, i64 7>
ret <2 x i64> %shift
}
@@ -540,8 +539,7 @@ define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) {
; AVX: Found an estimated cost of 1 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOPAVX: Found an estimated cost of 2 for instruction: %shift
-; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
+; XOP: Found an estimated cost of 1 for instruction: %shift
%shift = lshr <4 x i32> %a, <i32 5, i32 5, i32 5, i32 5>
ret <4 x i32> %shift
}
@@ -579,7 +577,7 @@ define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) {
; AVX: Found an estimated cost of 1 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 2 for instruction: %shift
+; XOP: Found an estimated cost of 1 for instruction: %shift
%shift = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <8 x i16> %shift
}
@@ -589,9 +587,10 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) {
; SSE2: Found an estimated cost of 2 for instruction: %shift
; SSE41: Found an estimated cost of 2 for instruction: %shift
; AVX: Found an estimated cost of 2 for instruction: %shift
-; AVX2: Found an estimated cost of 10 for instruction: %shift
-; AVX512: Found an estimated cost of 10 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; AVX2: Found an estimated cost of 1 for instruction: %shift
+; AVX512: Found an estimated cost of 1 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 1 for instruction: %shift
%shift = lshr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <16 x i16> %shift
}
@@ -601,21 +600,22 @@ define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) {
; SSE2: Found an estimated cost of 4 for instruction: %shift
; SSE41: Found an estimated cost of 4 for instruction: %shift
; AVX: Found an estimated cost of 4 for instruction: %shift
-; AVX2: Found an estimated cost of 20 for instruction: %shift
-; AVX512F: Found an estimated cost of 20 for instruction: %shift
-; AVX512BW: Found an estimated cost of 2 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; AVX2: Found an estimated cost of 2 for instruction: %shift
+; AVX512F: Found an estimated cost of 2 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = lshr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <32 x i16> %shift
}
define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v16i8':
-; SSE2: Found an estimated cost of 1 for instruction: %shift
-; SSE41: Found an estimated cost of 1 for instruction: %shift
-; AVX: Found an estimated cost of 1 for instruction: %shift
-; AVX2: Found an estimated cost of 1 for instruction: %shift
-; AVX512: Found an estimated cost of 1 for instruction: %shift
+; SSE2: Found an estimated cost of 2 for instruction: %shift
+; SSE41: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX2: Found an estimated cost of 2 for instruction: %shift
+; AVX512: Found an estimated cost of 2 for instruction: %shift
; XOP: Found an estimated cost of 2 for instruction: %shift
%shift = lshr <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <16 x i8> %shift
@@ -623,25 +623,27 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) {
define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v32i8':
-; SSE2: Found an estimated cost of 2 for instruction: %shift
-; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
-; AVX2: Found an estimated cost of 11 for instruction: %shift
-; AVX512: Found an estimated cost of 11 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; SSE2: Found an estimated cost of 4 for instruction: %shift
+; SSE41: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX2: Found an estimated cost of 2 for instruction: %shift
+; AVX512: Found an estimated cost of 2 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = lshr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
}
define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v64i8':
-; SSE2: Found an estimated cost of 4 for instruction: %shift
-; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
-; AVX2: Found an estimated cost of 22 for instruction: %shift
-; AVX512F: Found an estimated cost of 22 for instruction: %shift
+; SSE2: Found an estimated cost of 8 for instruction: %shift
+; SSE41: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
+; AVX2: Found an estimated cost of 4 for instruction: %shift
+; AVX512F: Found an estimated cost of 4 for instruction: %shift
; AVX512BW: Found an estimated cost of 2 for instruction: %shift
-; XOP: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 4 for instruction: %shift
%shift = lshr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <64 x i8> %shift
}
diff --git a/test/Analysis/CostModel/X86/vshift-shl-cost.ll b/test/Analysis/CostModel/X86/vshift-shl-cost.ll
index 804c5a76c3197..8c42bd66c707a 100644
--- a/test/Analysis/CostModel/X86/vshift-shl-cost.ll
+++ b/test/Analysis/CostModel/X86/vshift-shl-cost.ll
@@ -57,8 +57,8 @@ define <8 x i64> @var_shift_v8i64(<8 x i64> %a, <8 x i64> %b) {
define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v4i32':
; SSE2: Found an estimated cost of 10 for instruction: %shift
-; SSE41: Found an estimated cost of 10 for instruction: %shift
-; AVX: Found an estimated cost of 10 for instruction: %shift
+; SSE41: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 1 for instruction: %shift
@@ -70,8 +70,8 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v8i32':
; SSE2: Found an estimated cost of 20 for instruction: %shift
-; SSE41: Found an estimated cost of 20 for instruction: %shift
-; AVX: Found an estimated cost of 20 for instruction: %shift
+; SSE41: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 2 for instruction: %shift
@@ -83,8 +83,8 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) {
define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) {
; CHECK: 'Cost Model Analysis' for function 'var_shift_v16i32':
; SSE2: Found an estimated cost of 40 for instruction: %shift
-; SSE41: Found an estimated cost of 40 for instruction: %shift
-; AVX: Found an estimated cost of 40 for instruction: %shift
+; SSE41: Found an estimated cost of 16 for instruction: %shift
+; AVX: Found an estimated cost of 16 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 4 for instruction: %shift
@@ -124,7 +124,7 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) {
; AVX: Found an estimated cost of 56 for instruction: %shift
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
-; AVX512BW: Found an estimated cost of 2 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
%shift = shl <32 x i16> %a, %b
ret <32 x i16> %shift
@@ -216,8 +216,8 @@ define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, <8 x i64> %b) {
define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v4i32':
; SSE2: Found an estimated cost of 10 for instruction: %shift
-; SSE41: Found an estimated cost of 10 for instruction: %shift
-; AVX: Found an estimated cost of 10 for instruction: %shift
+; SSE41: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 1 for instruction: %shift
@@ -230,8 +230,8 @@ define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) {
define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v8i32':
; SSE2: Found an estimated cost of 20 for instruction: %shift
-; SSE41: Found an estimated cost of 20 for instruction: %shift
-; AVX: Found an estimated cost of 20 for instruction: %shift
+; SSE41: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 1 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 2 for instruction: %shift
@@ -244,8 +244,8 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) {
define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) {
; CHECK: 'Cost Model Analysis' for function 'splatvar_shift_v16i32':
; SSE2: Found an estimated cost of 40 for instruction: %shift
-; SSE41: Found an estimated cost of 40 for instruction: %shift
-; AVX: Found an estimated cost of 40 for instruction: %shift
+; SSE41: Found an estimated cost of 16 for instruction: %shift
+; AVX: Found an estimated cost of 16 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 4 for instruction: %shift
@@ -288,7 +288,7 @@ define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) {
; AVX: Found an estimated cost of 56 for instruction: %shift
; AVX2: Found an estimated cost of 20 for instruction: %shift
; AVX512F: Found an estimated cost of 20 for instruction: %shift
-; AVX512BW: Found an estimated cost of 2 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOP: Found an estimated cost of 4 for instruction: %shift
%splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
%shift = shl <32 x i16> %a, %splat
@@ -449,7 +449,7 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) {
; AVX: Found an estimated cost of 8 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512F: Found an estimated cost of 2 for instruction: %shift
-; AVX512BW: Found an estimated cost of 2 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = shl <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
@@ -607,7 +607,7 @@ define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) {
; AVX: Found an estimated cost of 4 for instruction: %shift
; AVX2: Found an estimated cost of 2 for instruction: %shift
; AVX512F: Found an estimated cost of 2 for instruction: %shift
-; AVX512BW: Found an estimated cost of 2 for instruction: %shift
+; AVX512BW: Found an estimated cost of 1 for instruction: %shift
; XOPAVX: Found an estimated cost of 4 for instruction: %shift
; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = shl <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
@@ -616,37 +616,39 @@ define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) {
define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v16i8':
-; SSE2: Found an estimated cost of 1 for instruction: %shift
-; SSE41: Found an estimated cost of 1 for instruction: %shift
-; AVX: Found an estimated cost of 1 for instruction: %shift
-; AVX2: Found an estimated cost of 1 for instruction: %shift
-; AVX512: Found an estimated cost of 1 for instruction: %shift
-; XOP: Found an estimated cost of 1 for instruction: %shift
+; SSE2: Found an estimated cost of 2 for instruction: %shift
+; SSE41: Found an estimated cost of 2 for instruction: %shift
+; AVX: Found an estimated cost of 2 for instruction: %shift
+; AVX2: Found an estimated cost of 2 for instruction: %shift
+; AVX512: Found an estimated cost of 2 for instruction: %shift
+; XOP: Found an estimated cost of 2 for instruction: %shift
%shift = shl <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <16 x i8> %shift
}
define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v32i8':
-; SSE2: Found an estimated cost of 2 for instruction: %shift
-; SSE41: Found an estimated cost of 2 for instruction: %shift
-; AVX: Found an estimated cost of 2 for instruction: %shift
-; AVX2: Found an estimated cost of 11 for instruction: %shift
-; AVX512: Found an estimated cost of 11 for instruction: %shift
-; XOP: Found an estimated cost of 2 for instruction: %shift
+; SSE2: Found an estimated cost of 4 for instruction: %shift
+; SSE41: Found an estimated cost of 4 for instruction: %shift
+; AVX: Found an estimated cost of 4 for instruction: %shift
+; AVX2: Found an estimated cost of 2 for instruction: %shift
+; AVX512: Found an estimated cost of 2 for instruction: %shift
+; XOPAVX: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 2 for instruction: %shift
%shift = shl <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
}
define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) {
; CHECK: 'Cost Model Analysis' for function 'splatconstant_shift_v64i8':
-; SSE2: Found an estimated cost of 4 for instruction: %shift
-; SSE41: Found an estimated cost of 4 for instruction: %shift
-; AVX: Found an estimated cost of 4 for instruction: %shift
-; AVX2: Found an estimated cost of 22 for instruction: %shift
-; AVX512F: Found an estimated cost of 22 for instruction: %shift
+; SSE2: Found an estimated cost of 8 for instruction: %shift
+; SSE41: Found an estimated cost of 8 for instruction: %shift
+; AVX: Found an estimated cost of 8 for instruction: %shift
+; AVX2: Found an estimated cost of 4 for instruction: %shift
+; AVX512F: Found an estimated cost of 4 for instruction: %shift
; AVX512BW: Found an estimated cost of 2 for instruction: %shift
-; XOP: Found an estimated cost of 4 for instruction: %shift
+; XOPAVX: Found an estimated cost of 8 for instruction: %shift
+; XOPAVX2: Found an estimated cost of 4 for instruction: %shift
%shift = shl <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <64 x i8> %shift
}
diff --git a/test/Analysis/ScalarEvolution/invalidation.ll b/test/Analysis/ScalarEvolution/invalidation.ll
new file mode 100644
index 0000000000000..1fcaddb525e64
--- /dev/null
+++ b/test/Analysis/ScalarEvolution/invalidation.ll
@@ -0,0 +1,70 @@
+; Test that SCEV gets invalidated when one of its dependencies is invalidated.
+;
+; Each of the RUNs checks that the pass manager runs SCEV, then invalidates it
+; due to a dependency being invalidated, and then re-urns it. This will
+; directly fail and indicates a failure that would occur later if we ddidn't
+; invalidate SCEV in this way.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; RUN: opt < %s -passes='require<scalar-evolution>,invalidate<assumptions>,print<scalar-evolution>' \
+; RUN: -debug-pass-manager -disable-output 2>&1 \
+; RUN: | FileCheck %s -check-prefixes=CHECK,CHECK-AC-INVALIDATE
+;
+; CHECK-AC-INVALIDATE: Running pass: RequireAnalysisPass
+; CHECK-AC-INVALIDATE: Running analysis: ScalarEvolutionAnalysis
+; CHECK-AC-INVALIDATE: Running analysis: AssumptionAnalysis
+; CHECK-AC-INVALIDATE: Running pass: InvalidateAnalysisPass
+; CHECK-AC-INVALIDATE: Invalidating analysis: AssumptionAnalysis
+; CHECK-AC-INVALIDATE: Running pass: ScalarEvolutionPrinterPass
+; CHECK-AC-INVALIDATE: Running analysis: ScalarEvolutionAnalysis
+; CHECK-AC-INVALIDATE: Running analysis: AssumptionAnalysis
+
+; RUN: opt < %s -passes='require<scalar-evolution>,invalidate<domtree>,print<scalar-evolution>' \
+; RUN: -debug-pass-manager -disable-output 2>&1 \
+; RUN: | FileCheck %s -check-prefixes=CHECK,CHECK-DT-INVALIDATE
+;
+; CHECK-DT-INVALIDATE: Running pass: RequireAnalysisPass
+; CHECK-DT-INVALIDATE: Running analysis: ScalarEvolutionAnalysis
+; CHECK-DT-INVALIDATE: Running analysis: DominatorTreeAnalysis
+; CHECK-DT-INVALIDATE: Running pass: InvalidateAnalysisPass
+; CHECK-DT-INVALIDATE: Invalidating analysis: DominatorTreeAnalysis
+; CHECK-DT-INVALIDATE: Running pass: ScalarEvolutionPrinterPass
+; CHECK-DT-INVALIDATE: Running analysis: ScalarEvolutionAnalysis
+; CHECK-DT-INVALIDATE: Running analysis: DominatorTreeAnalysis
+
+; RUN: opt < %s -passes='require<scalar-evolution>,invalidate<loops>,print<scalar-evolution>' \
+; RUN: -debug-pass-manager -disable-output 2>&1 \
+; RUN: | FileCheck %s -check-prefixes=CHECK,CHECK-LI-INVALIDATE
+;
+; CHECK-LI-INVALIDATE: Running pass: RequireAnalysisPass
+; CHECK-LI-INVALIDATE: Running analysis: ScalarEvolutionAnalysis
+; CHECK-LI-INVALIDATE: Running analysis: LoopAnalysis
+; CHECK-LI-INVALIDATE: Running pass: InvalidateAnalysisPass
+; CHECK-LI-INVALIDATE: Invalidating analysis: LoopAnalysis
+; CHECK-LI-INVALIDATE: Running pass: ScalarEvolutionPrinterPass
+; CHECK-LI-INVALIDATE: Running analysis: ScalarEvolutionAnalysis
+; CHECK-LI-INVALIDATE: Running analysis: LoopAnalysis
+
+; This test isn't particularly interesting, its just enough to make sure we
+; actually do some work inside of SCEV so that if we regress here despite the
+; debug pass printing continuing to match, ASan and other tools can catch it.
+define void @test(i32 %n) {
+; CHECK-LABEL: Classifying expressions for: @test
+; CHECK: Loop %loop: backedge-taken count is 14
+; CHECK: Loop %loop: max backedge-taken count is 14
+; CHECK: Loop %loop: Predicated backedge-taken count is 14
+
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.inc, %loop ]
+ %iv.inc = add nsw i32 %iv, 3
+ %becond = icmp ne i32 %iv.inc, 46
+ br i1 %becond, label %loop, label %leave
+
+leave:
+ ret void
+}
diff --git a/test/Analysis/ValueTracking/assume.ll b/test/Analysis/ValueTracking/assume.ll
index 4bffe8ef79096..fe0ee53eb4162 100644
--- a/test/Analysis/ValueTracking/assume.ll
+++ b/test/Analysis/ValueTracking/assume.ll
@@ -1,14 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
define i32 @assume_add(i32 %a, i32 %b) {
; CHECK-LABEL: @assume_add(
- %1 = add i32 %a, %b
- %last_two_digits = and i32 %1, 3
- %2 = icmp eq i32 %last_two_digits, 0
- call void @llvm.assume(i1 %2)
- %3 = add i32 %1, 3
-; CHECK: %3 = or i32 %1, 3
- ret i32 %3
+; CHECK-NEXT: [[T1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[LAST_TWO_DIGITS:%.*]] = and i32 [[T1]], 3
+; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[LAST_TWO_DIGITS]], 0
+; CHECK-NEXT: call void @llvm.assume(i1 [[T2]])
+; CHECK-NEXT: [[T3:%.*]] = or i32 [[T1]], 3
+; CHECK-NEXT: ret i32 [[T3]]
+;
+ %t1 = add i32 %a, %b
+ %last_two_digits = and i32 %t1, 3
+ %t2 = icmp eq i32 %last_two_digits, 0
+ call void @llvm.assume(i1 %t2)
+ %t3 = add i32 %t1, 3
+ ret i32 %t3
}
declare void @llvm.assume(i1)
+
diff --git a/test/Bindings/Go/lit.local.cfg b/test/Bindings/Go/lit.local.cfg
index d68d867fb308f..a587f88f54aae 100644
--- a/test/Bindings/Go/lit.local.cfg
+++ b/test/Bindings/Go/lit.local.cfg
@@ -6,7 +6,7 @@ import sys
if not 'go' in config.root.llvm_bindings:
config.unsupported = True
-if config.root.include_go_tests != 'ON':
+if not config.root.include_go_tests:
config.unsupported = True
def find_executable(executable, path=None):
diff --git a/test/Bindings/OCaml/lit.local.cfg b/test/Bindings/OCaml/lit.local.cfg
index 7a83ca142808e..fd9e1c50e990c 100644
--- a/test/Bindings/OCaml/lit.local.cfg
+++ b/test/Bindings/OCaml/lit.local.cfg
@@ -3,5 +3,5 @@ config.suffixes = ['.ml']
if not 'ocaml' in config.root.llvm_bindings:
config.unsupported = True
-if config.root.have_ocaml_ounit not in ('1', 'TRUE'):
+if not config.root.have_ocaml_ounit:
config.unsupported = True
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 635197bc9ddd3..c1667049f80fc 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -1,6 +1,14 @@
-if(LLVM_BUILD_EXAMPLES)
- set(ENABLE_EXAMPLES 1)
-endif()
+llvm_canonicalize_cmake_booleans(
+ LLVM_TOOL_LTO_BUILD
+ HAVE_OCAMLOPT
+ HAVE_OCAML_OUNIT
+ LLVM_INCLUDE_GO_TESTS
+ LLVM_USE_INTEL_JITEVENTS
+ HAVE_LIBZ
+ HAVE_LIBXAR
+ LLVM_ENABLE_DIA_SDK
+ LLVM_ENABLE_FFI
+ BUILD_SHARED_LIBS)
configure_lit_site_cfg(
${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in
diff --git a/test/CodeGen/AMDGPU/load-constant-i16.ll b/test/CodeGen/AMDGPU/load-constant-i16.ll
index 628d285141bc0..eb79767e62bef 100644
--- a/test/CodeGen/AMDGPU/load-constant-i16.ll
+++ b/test/CodeGen/AMDGPU/load-constant-i16.ll
@@ -137,8 +137,8 @@ define void @constant_sextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x
; v2i16 is naturally 4 byte aligned
; EG: VTX_READ_32 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1
-; TODO: This should use DST, but for some there are redundant MOVs
-; EG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, literal
+; EG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], literal
+; EG: 16
; EG: 16
define void @constant_zextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(2)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(2)* %in
@@ -153,11 +153,11 @@ define void @constant_zextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x
; GCN-DAG: s_sext_i32_i16
; v2i16 is naturally 4 byte aligned
+; EG: MEM_RAT_CACHELESS STORE_RAW [[ST:T[0-9]]].XY, {{T[0-9].[XYZW]}},
; EG: VTX_READ_32 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1
-; TODO: These should use DST, but for some there are redundant MOVs
-; TODO: We should also use ASHR instead of LSHR + BFE
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, 0.0, literal
+; EG-DAG: BFE_INT {{[* ]*}}[[ST]].X, [[DST]], 0.0, literal
+; TODO: We should use ASHR instead of LSHR + BFE
+; EG-DAG: BFE_INT {{[* ]*}}[[ST]].Y, {{PV\.[XYZW]}}, 0.0, literal
; EG-DAG: 16
; EG-DAG: 16
define void @constant_sextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(2)* %in) #0 {
@@ -167,16 +167,23 @@ define void @constant_sextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x
ret void
}
-; FUNC-LABEL: {{^}}constant_constant_zextload_v3i16_to_v3i32:
+; FUNC-LABEL: {{^}}constant_zextload_v3i16_to_v3i32:
; GCN: s_load_dwordx2
; v3i16 is naturally 8 byte aligned
-; EG-DAG: VTX_READ_32 [[DST_HI:T[0-9]\.[XYZW]]], [[DST_HI]], 0, #1
-; EG-DAG: VTX_READ_16 [[DST_LO:T[0-9]\.[XYZW]]], [[DST_LO]], 4, #1
+; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XY, {{T[0-9].[XYZW]}},
+; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].X, {{T[0-9].[XYZW]}},
+; EG: CF_END
+; EG-DAG: VTX_READ_32 [[DST_LO:T[0-9]\.[XYZW]]], {{T[0-9]\.[XYZW]}}, 0, #1
+; EG-DAG: VTX_READ_16 [[DST_HI:T[0-9]\.[XYZW]]], {{T[0-9]\.[XYZW]}}, 4, #1
; TODO: This should use DST, but for some there are redundant MOVs
-; EG: LSHR {{[* ]*}}{{T[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, literal
-; EG: 16
-define void @constant_constant_zextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(2)* %in) {
+; EG-DAG: LSHR {{[* ]*}}[[ST_LO]].Y, {{T[0-9]\.[XYZW]}}, literal
+; EG-DAG: 16
+; EG-DAG: AND_INT {{[* ]*}}[[ST_LO]].X, {{T[0-9]\.[XYZW]}}, literal
+; EG-DAG: AND_INT {{[* ]*}}[[ST_HI]].X, {{T[0-9]\.[XYZW]}}, literal
+; EG-DAG: 65535
+; EG-DAG: 65535
+define void @constant_zextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(2)* %in) {
entry:
%ld = load <3 x i16>, <3 x i16> addrspace(2)* %in
%ext = zext <3 x i16> %ld to <3 x i32>
@@ -184,19 +191,20 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}constant_constant_sextload_v3i16_to_v3i32:
+; FUNC-LABEL: {{^}}constant_sextload_v3i16_to_v3i32:
; GCN: s_load_dwordx2
+; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XY, {{T[0-9].[XYZW]}},
+; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].X, {{T[0-9].[XYZW]}},
; v3i16 is naturally 8 byte aligned
-; EG-DAG: VTX_READ_32 [[DST_HI:T[0-9]\.[XYZW]]], [[DST_HI]], 0, #1
-; EG-DAG: VTX_READ_16 [[DST_LO:T[0-9]\.[XYZW]]], [[DST_LO]], 4, #1
-; TODO: These should use DST, but for some there are redundant MOVs
-; EG-DAG: ASHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, 0.0, literal
+; EG-DAG: VTX_READ_32 [[DST_HI:T[0-9]\.[XYZW]]], [[PTR:T[0-9]\.[XYZW]]], 0, #1
+; EG-DAG: VTX_READ_16 [[DST_LO:T[0-9]\.[XYZW]]], {{T[0-9]\.[XYZW]}}, 4, #1
+; EG-DAG: ASHR {{[* ]*}}[[ST_LO]].Y, {{T[0-9]\.[XYZW]}}, literal
+; EG-DAG: BFE_INT {{[* ]*}}[[ST_LO]].X, {{T[0-9]\.[XYZW]}}, 0.0, literal
+; EG-DAG: BFE_INT {{[* ]*}}[[ST_HI]].X, {{T[0-9]\.[XYZW]}}, 0.0, literal
; EG-DAG: 16
; EG-DAG: 16
-define void @constant_constant_sextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(2)* %in) {
+define void @constant_sextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(2)* %in) {
entry:
%ld = load <3 x i16>, <3 x i16> addrspace(2)* %in
%ext = sext <3 x i16> %ld to <3 x i32>
@@ -204,20 +212,24 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}constant_constant_zextload_v4i16_to_v4i32:
+; FUNC-LABEL: {{^}}constant_zextload_v4i16_to_v4i32:
; GCN: s_load_dwordx2
; GCN-DAG: s_and_b32
; GCN-DAG: s_lshr_b32
; v4i16 is naturally 8 byte aligned
-; EG: VTX_READ_64 [[DST:T[0-9]\.XY]], {{T[0-9].[XYZW]}}, 0, #1
-; TODO: These should use DST, but for some there are redundant MOVs
-; EG-DAG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, literal
+; EG: MEM_RAT_CACHELESS STORE_RAW [[ST:T[0-9]]].XYZW, {{T[0-9].[XYZW]}}
+; EG: VTX_READ_64 [[LD:T[0-9]]].XY, {{T[0-9].[XYZW]}}, 0, #1
+; TODO: This should use LD, but for some there are redundant MOVs
+; EG-DAG: BFE_UINT {{[* ]*}}[[ST]].Y, {{.*\.[XYZW]}}, literal
+; EG-DAG: BFE_UINT {{[* ]*}}[[ST]].W, {{.*\.[XYZW]}}, literal
; EG-DAG: 16
-; EG-DAG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, literal
-; EG-DAG: AND_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, literal
; EG-DAG: 16
-define void @constant_constant_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(2)* %in) #0 {
+; EG-DAG: AND_INT {{[* ]*}}[[ST]].X, {{T[0-9]\.[XYZW]}}, literal
+; EG-DAG: AND_INT {{[* ]*}}[[ST]].Z, {{T[0-9]\.[XYZW]}}, literal
+; EG-DAG: 65535
+; EG-DAG: 65535
+define void @constant_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(2)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(2)* %in
%ext = zext <4 x i16> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -230,13 +242,14 @@ define void @constant_constant_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %
; GCN-DAG: s_sext_i32_i16
; v4i16 is naturally 8 byte aligned
-; EG: VTX_READ_64 [[DST:T[0-9]\.XY]], {{T[0-9].[XYZW]}}, 0, #1
-; TODO: These should use DST, but for some there are redundant MOVs
+; EG: MEM_RAT_CACHELESS STORE_RAW [[ST:T[0-9]]].XYZW, {{T[0-9]\.[XYZW]}},
+; EG: VTX_READ_64 [[DST:T[0-9]]].XY, {{T[0-9].[XYZW]}}, 0, #1
+; TODO: This should use LD, but for some there are redundant MOVs
+; EG-DAG: BFE_INT {{[* ]*}}[[ST]].X, {{.*}}, 0.0, literal
+; EG-DAG: BFE_INT {{[* ]*}}[[ST]].Z, {{.*}}, 0.0, literal
; TODO: We should use ASHR instead of LSHR + BFE
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
+; EG-DAG: BFE_INT {{[* ]*}}[[ST]].Y, {{.*}}, 0.0, literal
+; EG-DAG: BFE_INT {{[* ]*}}[[ST]].W, {{.*}}, 0.0, literal
; EG-DAG: 16
; EG-DAG: 16
; EG-DAG: 16
@@ -254,24 +267,27 @@ define void @constant_sextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x
; GCN-DAG: s_lshr_b32
; v8i16 is naturally 16 byte aligned
-; EG: VTX_READ_128 [[DST:T[0-9]\.XYZW]], {{T[0-9].[XYZW]}}, 0, #1
-; TODO: These should use DST, but for some there are redundant MOVs
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: 16
-; EG-DAG: 16
-; EG-DAG: 16
-; EG-DAG: 16
+; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].XYZW, {{T[0-9]+.[XYZW]}},
+; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XYZW, {{T[0-9]+.[XYZW]}},
+; EG: VTX_READ_128 [[DST:T[0-9]]].XYZW, {{T[0-9].[XYZW]}}, 0, #1
+; TODO: These should use LSHR instead of BFE_UINT
+; TODO: This should use DST, but for some there are redundant MOVs
+; EG-DAG: BFE_UINT {{[* ]*}}[[ST_LO]].Y, {{.*}}, literal
+; EG-DAG: BFE_UINT {{[* ]*}}[[ST_LO]].W, {{.*}}, literal
+; EG-DAG: BFE_UINT {{[* ]*}}[[ST_HI]].Y, {{.*}}, literal
+; EG-DAG: BFE_UINT {{[* ]*}}[[ST_HI]].W, {{.*}}, literal
+; EG-DAG: AND_INT {{[* ]*}}[[ST_LO]].X, {{.*}}, literal
+; EG-DAG: AND_INT {{[* ]*}}[[ST_LO]].Z, {{.*}}, literal
+; EG-DAG: AND_INT {{[* ]*}}[[ST_HI]].X, {{.*}}, literal
+; EG-DAG: AND_INT {{[* ]*}}[[ST_HI]].Z, {{.*}}, literal
; EG-DAG: 16
; EG-DAG: 16
; EG-DAG: 16
; EG-DAG: 16
+; EG-DAG: 65535
+; EG-DAG: 65535
+; EG-DAG: 65535
+; EG-DAG: 65535
define void @constant_zextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(2)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(2)* %in
%ext = zext <8 x i16> %load to <8 x i32>
@@ -285,17 +301,19 @@ define void @constant_zextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x
; GCN-DAG: s_sext_i32_i16
; v8i16 is naturally 16 byte aligned
-; EG: VTX_READ_128 [[DST:T[0-9]\.XYZW]], {{T[0-9].[XYZW]}}, 0, #1
-; TODO: These should use DST, but for some there are redundant MOVs
-; TODO: We should use ASHR instead of LSHR + BFE
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
+; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].XYZW, {{T[0-9]+.[XYZW]}},
+; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XYZW, {{T[0-9]+.[XYZW]}},
+; EG: VTX_READ_128 [[DST:T[0-9]]].XYZW, {{T[0-9].[XYZW]}}, 0, #1
+; TODO: 4 of these should use ASHR instead of LSHR + BFE_INT
+; TODO: This should use DST, but for some there are redundant MOVs
+; EG-DAG: BFE_INT {{[* ]*}}[[ST_LO]].Y, {{.*}}, 0.0, literal
+; EG-DAG: BFE_INT {{[* ]*}}[[ST_LO]].W, {{.*}}, 0.0, literal
+; EG-DAG: BFE_INT {{[* ]*}}[[ST_HI]].Y, {{.*}}, 0.0, literal
+; EG-DAG: BFE_INT {{[* ]*}}[[ST_HI]].W, {{.*}}, 0.0, literal
+; EG-DAG: BFE_INT {{[* ]*}}[[ST_LO]].X, {{.*}}, 0.0, literal
+; EG-DAG: BFE_INT {{[* ]*}}[[ST_LO]].Z, {{.*}}, 0.0, literal
+; EG-DAG: BFE_INT {{[* ]*}}[[ST_HI]].X, {{.*}}, 0.0, literal
+; EG-DAG: BFE_INT {{[* ]*}}[[ST_HI]].Z, {{.*}}, 0.0, literal
; EG-DAG: 16
; EG-DAG: 16
; EG-DAG: 16
@@ -444,7 +462,7 @@ define void @constant_zextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EG: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal
-; TODO: Why not 15 ?
+; TODO: These could be expanded earlier using ASHR 15
; EG: 31
define void @constant_sextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(2)* %in) #0 {
%a = load i16, i16 addrspace(2)* %in
@@ -468,7 +486,7 @@ define void @constant_zextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x
; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
; EG: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal
-; TODO: Why not 15 ?
+; TODO: These could be expanded earlier using ASHR 15
; EG: 31
define void @constant_sextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(2)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(2)* %in
diff --git a/test/CodeGen/AMDGPU/load-global-i16.ll b/test/CodeGen/AMDGPU/load-global-i16.ll
index f398dd32e06de..7bd131e6516c0 100644
--- a/test/CodeGen/AMDGPU/load-global-i16.ll
+++ b/test/CodeGen/AMDGPU/load-global-i16.ll
@@ -1,8 +1,8 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,GCN-NOHSA-SI,FUNC %s
; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-HSA,FUNC %s
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GCN-NOHSA,GCN-NOHSA-VI,FUNC %s
-; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=EGCM -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=CM -check-prefix=EGCM -check-prefix=FUNC %s
; FIXME: r600 is broken because the bigger testcases spill and it's not implemented
@@ -10,7 +10,7 @@
; GCN-NOHSA: buffer_load_ushort v{{[0-9]+}}
; GCN-HSA: flat_load_ushort
-; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
+; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
define void @global_load_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
entry:
%ld = load i16, i16 addrspace(1)* %in
@@ -22,7 +22,7 @@ entry:
; GCN-NOHSA: buffer_load_dword v
; GCN-HSA: flat_load_dword v
-; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
+; EGCM: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
define void @global_load_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
entry:
%ld = load <2 x i16>, <2 x i16> addrspace(1)* %in
@@ -34,8 +34,8 @@ entry:
; GCN-NOHSA: buffer_load_dwordx2 v
; GCN-HSA: flat_load_dwordx2 v
-; EG-DAG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-; EG-DAG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 4, #1
+; EGCM-DAG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
+; EGCM-DAG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 4, #1
define void @global_load_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) {
entry:
%ld = load <3 x i16>, <3 x i16> addrspace(1)* %in
@@ -47,7 +47,7 @@ entry:
; GCN-NOHSA: buffer_load_dwordx2
; GCN-HSA: flat_load_dwordx2
-; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
+; EGCM: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
define void @global_load_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
entry:
%ld = load <4 x i16>, <4 x i16> addrspace(1)* %in
@@ -59,7 +59,7 @@ entry:
; GCN-NOHSA: buffer_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
+; EGCM: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
define void @global_load_v8i16(<8 x i16> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) {
entry:
%ld = load <8 x i16>, <8 x i16> addrspace(1)* %in
@@ -74,8 +74,8 @@ entry:
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
+; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
+; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
define void @global_load_v16i16(<16 x i16> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) {
entry:
%ld = load <16 x i16>, <16 x i16> addrspace(1)* %in
@@ -90,7 +90,7 @@ entry:
; GCN-HSA: flat_load_ushort
; GCN-HSA: flat_store_dword
-; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
+; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
define void @global_zextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
%a = load i16, i16 addrspace(1)* %in
%ext = zext i16 %a to i32
@@ -105,9 +105,9 @@ define void @global_zextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)
; GCN-HSA: flat_load_sshort
; GCN-HSA: flat_store_dword
-; EG: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], T{{[0-9]+}}.X, 0, #1
-; EG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal
-; EG: 16
+; EGCM: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], T{{[0-9]+}}.X, 0, #1
+; EGCM: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal
+; EGCM: 16
define void @global_sextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
%a = load i16, i16 addrspace(1)* %in
%ext = sext i16 %a to i32
@@ -119,7 +119,7 @@ define void @global_sextload_i16_to_i32(i32 addrspace(1)* %out, i16 addrspace(1)
; GCN-NOHSA: buffer_load_ushort
; GCN-HSA: flat_load_ushort
-; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
+; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
define void @global_zextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(1)* %in
%ext = zext <1 x i16> %load to <1 x i32>
@@ -131,9 +131,9 @@ define void @global_zextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i
; GCN-NOHSA: buffer_load_sshort
; GCN-HSA: flat_load_sshort
-; EG: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], T{{[0-9]+}}.X, 0, #1
-; EG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal
-; EG: 16
+; EGCM: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], T{{[0-9]+}}.X, 0, #1
+; EGCM: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], 0.0, literal
+; EGCM: 16
define void @global_sextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(1)* %in
%ext = sext <1 x i16> %load to <1 x i32>
@@ -145,10 +145,9 @@ define void @global_sextload_v1i16_to_v1i32(<1 x i32> addrspace(1)* %out, <1 x i
; GCN-NOHSA: buffer_load_dword
; GCN-HSA: flat_load_dword
-; EG: VTX_READ_32 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1
-; TODO: This should use DST, but for some there are redundant MOVs
-; EG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, literal
-; EG: 16
+; EGCM: VTX_READ_32 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1
+; EGCM: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, [[DST]], literal
+; EGCM: 16
define void @global_zextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(1)* %in
%ext = zext <2 x i16> %load to <2 x i32>
@@ -161,13 +160,14 @@ define void @global_zextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i
; GCN-HSA: flat_load_dword
-; EG: VTX_READ_32 [[DST:T[0-9]\.[XYZW]]], [[DST]], 0, #1
-; TODO: These should use DST, but for some there are redundant MOVs
-; TODO: We should also use ASHR instead of LSHR + BFE
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, 0.0, literal
-; EG-DAG: 16
-; EG-DAG: 16
+; EG: MEM_RAT_CACHELESS STORE_RAW [[ST:T[0-9]]].XY, {{T[0-9]\.[XYZW]}},
+; CM: MEM_RAT_CACHELESS STORE_DWORD [[ST:T[0-9]]], {{T[0-9]\.[XYZW]}}
+; EGCM: VTX_READ_32 [[DST:T[0-9].[XYZW]]], [[DST]], 0, #1
+; TODO: This should use ASHR instead of LSHR + BFE
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST]].X, [[DST]], 0.0, literal
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST]].Y, {{PV.[XYZW]}}, 0.0, literal
+; EGCM-DAG: 16
+; EGCM-DAG: 16
define void @global_sextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(1)* %in
%ext = sext <2 x i16> %load to <2 x i32>
@@ -175,16 +175,22 @@ define void @global_sextload_v2i16_to_v2i32(<2 x i32> addrspace(1)* %out, <2 x i
ret void
}
-; FUNC-LABEL: {{^}}global_global_zextload_v3i16_to_v3i32:
+; FUNC-LABEL: {{^}}global_zextload_v3i16_to_v3i32:
; GCN-NOHSA: buffer_load_dwordx2
; GCN-HSA: flat_load_dwordx2
-; EG-DAG: VTX_READ_32 [[DST_HI:T[0-9]\.[XYZW]]], [[DST_HI]], 0, #1
-; EG-DAG: VTX_READ_16 [[DST_LO:T[0-9]\.[XYZW]]], [[DST_LO]], 4, #1
+; CM: MEM_RAT_CACHELESS STORE_DWORD [[ST_HI:T[0-9]]].X, {{T[0-9]\.[XYZW]}}
+; CM: MEM_RAT_CACHELESS STORE_DWORD [[ST_LO:T[0-9]]], {{T[0-9]\.[XYZW]}}
+; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].X, {{T[0-9]\.[XYZW]}},
+; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XY, {{T[0-9]\.[XYZW]}},
+; EGCM-DAG: VTX_READ_32 [[DST_LO:T[0-9]\.[XYZW]]], {{T[0-9]\.[XYZW]}}, 0, #1
+; EGCM-DAG: VTX_READ_16 [[DST_HI:T[0-9]\.[XYZW]]], {{T[0-9]\.[XYZW]}}, 4, #1
; TODO: This should use DST, but for some there are redundant MOVs
-; EG: LSHR {{[* ]*}}{{T[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, literal
-; EG: 16
-define void @global_global_zextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) {
+; EGCM: LSHR {{[* ]*}}[[ST_LO]].Y, {{T[0-9]\.[XYZW]}}, literal
+; EGCM: 16
+; EGCM: AND_INT {{[* ]*}}[[ST_LO]].X, {{T[0-9]\.[XYZW]}}, literal
+; EGCM: AND_INT {{[* ]*}}[[ST_HI]].X, [[DST_HI]], literal
+define void @global_zextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) {
entry:
%ld = load <3 x i16>, <3 x i16> addrspace(1)* %in
%ext = zext <3 x i16> %ld to <3 x i32>
@@ -192,19 +198,23 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}global_global_sextload_v3i16_to_v3i32:
+; FUNC-LABEL: {{^}}global_sextload_v3i16_to_v3i32:
; GCN-NOHSA: buffer_load_dwordx2
; GCN-HSA: flat_load_dwordx2
-; EG-DAG: VTX_READ_32 [[DST_HI:T[0-9]\.[XYZW]]], [[DST_HI]], 0, #1
-; EG-DAG: VTX_READ_16 [[DST_LO:T[0-9]\.[XYZW]]], [[DST_LO]], 4, #1
-; TODO: These should use DST, but for some there are redundant MOVs
-; EG-DAG: ASHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{PV.[XYZW]}}, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, 0.0, literal
-; EG-DAG: 16
-; EG-DAG: 16
-define void @global_global_sextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) {
+; CM: MEM_RAT_CACHELESS STORE_DWORD [[ST_HI:T[0-9]]].X, {{T[0-9]\.[XYZW]}}
+; CM: MEM_RAT_CACHELESS STORE_DWORD [[ST_LO:T[0-9]]], {{T[0-9]\.[XYZW]}}
+; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].X, {{T[0-9]\.[XYZW]}},
+; EG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XY, {{T[0-9]\.[XYZW]}},
+; EGCM-DAG: VTX_READ_32 [[DST_LO:T[0-9]\.[XYZW]]], {{T[0-9].[XYZW]}}, 0, #1
+; EGCM-DAG: VTX_READ_16 [[DST_HI:T[0-9]\.[XYZW]]], {{T[0-9].[XYZW]}}, 4, #1
+; TODO: This should use DST, but for some there are redundant MOVs
+; EGCM-DAG: ASHR {{[* ]*}}[[ST_LO]].Y, {{T[0-9]\.[XYZW]}}, literal
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_LO]].X, {{T[0-9]\.[XYZW]}}, 0.0, literal
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_HI]].X, [[DST_HI]], 0.0, literal
+; EGCM-DAG: 16
+; EGCM-DAG: 16
+define void @global_sextload_v3i16_to_v3i32(<3 x i32> addrspace(1)* %out, <3 x i16> addrspace(1)* %in) {
entry:
%ld = load <3 x i16>, <3 x i16> addrspace(1)* %in
%ext = sext <3 x i16> %ld to <3 x i32>
@@ -212,19 +222,22 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}global_global_zextload_v4i16_to_v4i32:
+; FUNC-LABEL: {{^}}global_zextload_v4i16_to_v4i32:
; GCN-NOHSA: buffer_load_dwordx2
; GCN-HSA: flat_load_dwordx2
-; EG: VTX_READ_64 [[DST:T[0-9]\.XY]], {{T[0-9].[XYZW]}}, 0, #1
-; TODO: These should use DST, but for some there are redundant MOVs
-; EG-DAG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, literal
-; EG-DAG: 16
-; EG-DAG: BFE_UINT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, literal
-; EG-DAG: AND_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{T[0-9].[XYZW]}}, literal
-; EG-DAG: 16
-define void @global_global_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
+; CM: MEM_RAT_CACHELESS STORE_DWORD [[ST:T[0-9]]], {{T[0-9]\.[XYZW]}}
+; EG: MEM_RAT_CACHELESS STORE_RAW [[ST:T[0-9]]].XYZW, {{T[0-9]\.[XYZW]}},
+; EGCM: VTX_READ_64 [[DST:T[0-9]]].XY, {{T[0-9].[XYZW]}}, 0, #1
+; TODO: This should use DST, but for some there are redundant MOVs
+; EGCM-DAG: BFE_UINT {{[* ]*}}[[ST]].Y, {{.*}}, literal
+; EGCM-DAG: 16
+; EGCM-DAG: BFE_UINT {{[* ]*}}[[ST]].W, {{.*}}, literal
+; EGCM-DAG: AND_INT {{[* ]*}}[[ST]].X, {{.*}}, literal
+; EGCM-DAG: AND_INT {{[* ]*}}[[ST]].Z, {{.*}}, literal
+; EGCM-DAG: 16
+define void @global_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(1)* %in
%ext = zext <4 x i16> %load to <4 x i32>
store <4 x i32> %ext, <4 x i32> addrspace(1)* %out
@@ -236,17 +249,19 @@ define void @global_global_zextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out,
; GCN-HSA: flat_load_dwordx2
-; EG: VTX_READ_64 [[DST:T[0-9]\.XY]], {{T[0-9].[XYZW]}}, 0, #1
-; TODO: These should use DST, but for some there are redundant MOVs
+; CM: MEM_RAT_CACHELESS STORE_DWORD [[ST:T[0-9]]], {{T[0-9]\.[XYZW]}}
+; EG: MEM_RAT_CACHELESS STORE_RAW [[ST:T[0-9]]].XYZW, {{T[0-9]\.[XYZW]}},
+; EGCM: VTX_READ_64 [[DST:T[0-9]]].XY, {{T[0-9].[XYZW]}}, 0, #1
; TODO: We should use ASHR instead of LSHR + BFE
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-; EG-DAG: BFE_INT {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, 0.0, literal
-; EG-DAG: 16
-; EG-DAG: 16
-; EG-DAG: 16
-; EG-DAG: 16
+; TODO: This should use DST, but for some there are redundant MOVs
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST]].X, {{.*}}, 0.0, literal
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST]].Y, {{.*}}, 0.0, literal
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST]].Z, {{.*}}, 0.0, literal
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST]].W, {{.*}}, 0.0, literal
+; EGCM-DAG: 16
+; EGCM-DAG: 16
+; EGCM-DAG: 16
+; EGCM-DAG: 16
define void @global_sextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(1)* %in
%ext = sext <4 x i16> %load to <4 x i32>
@@ -258,16 +273,29 @@ define void @global_sextload_v4i16_to_v4i32(<4 x i32> addrspace(1)* %out, <4 x i
; GCN-NOHSA: buffer_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-; EG: VTX_READ_128 [[DST:T[0-9]\.XYZW]], {{T[0-9].[XYZW]}}, 0, #1
-; TODO: These should use DST, but for some there are redundant MOVs
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: 16
-; EG-DAG: 16
-; EG-DAG: 16
-; EG-DAG: 16
+; CM-DAG: MEM_RAT_CACHELESS STORE_DWORD [[ST_LO:T[0-9]]], {{T[0-9]\.[XYZW]}}
+; CM-DAG: MEM_RAT_CACHELESS STORE_DWORD [[ST_HI:T[0-9]]], {{T[0-9]\.[XYZW]}}
+; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XYZW, {{T[0-9]\.[XYZW]}},
+; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].XYZW, {{T[0-9]\.[XYZW]}},
+; EGCM: CF_END
+; EGCM: VTX_READ_128 [[DST:T[0-9]]].XYZW, {{T[0-9].[XYZW]}}, 0, #1
+; TODO: These should use LSHR instead of BFE_UINT
+; EGCM-DAG: BFE_UINT {{[* ]*}}[[ST_LO]].Y, {{.*}}, literal
+; EGCM-DAG: BFE_UINT {{[* ]*}}[[ST_LO]].W, {{.*}}, literal
+; EGCM-DAG: BFE_UINT {{[* ]*}}[[ST_HI]].Y, {{.*}}, literal
+; EGCM-DAG: BFE_UINT {{[* ]*}}[[ST_HI]].W, {{.*}}, literal
+; EGCM-DAG: AND_INT {{[* ]*}}[[ST_LO]].X, {{.*}}, literal
+; EGCM-DAG: AND_INT {{[* ]*}}[[ST_LO]].Z, {{.*}}, literal
+; EGCM-DAG: AND_INT {{[* ]*}}[[ST_HI]].X, {{.*}}, literal
+; EGCM-DAG: AND_INT {{[* ]*}}[[ST_HI]].Z, {{.*}}, literal
+; EGCM-DAG: 65535
+; EGCM-DAG: 65535
+; EGCM-DAG: 65535
+; EGCM-DAG: 65535
+; EGCM-DAG: 16
+; EGCM-DAG: 16
+; EGCM-DAG: 16
+; EGCM-DAG: 16
define void @global_zextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(1)* %in
%ext = zext <8 x i16> %load to <8 x i32>
@@ -279,24 +307,29 @@ define void @global_zextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i
; GCN-NOHSA: buffer_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-; EG: VTX_READ_128 [[DST:T[0-9]\.XYZW]], {{T[0-9].[XYZW]}}, 0, #1
-; TODO: These should use DST, but for some there are redundant MOVs
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: LSHR {{[* ]*}}T{{[0-9].[XYZW]}}, {{.*}}, literal
-; EG-DAG: 16
-; EG-DAG: 16
-; EG-DAG: 16
-; EG-DAG: 16
-; EG-DAG: 16
-; EG-DAG: 16
-; EG-DAG: 16
-; EG-DAG: 16
+; CM-DAG: MEM_RAT_CACHELESS STORE_DWORD [[ST_LO:T[0-9]]], {{T[0-9]\.[XYZW]}}
+; CM-DAG: MEM_RAT_CACHELESS STORE_DWORD [[ST_HI:T[0-9]]], {{T[0-9]\.[XYZW]}}
+; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_LO:T[0-9]]].XYZW, {{T[0-9]\.[XYZW]}},
+; EG-DAG: MEM_RAT_CACHELESS STORE_RAW [[ST_HI:T[0-9]]].XYZW, {{T[0-9]\.[XYZW]}},
+; EGCM: CF_END
+; EGCM: VTX_READ_128 [[DST:T[0-9]]].XYZW, {{T[0-9].[XYZW]}}, 0, #1
+; TODO: These should use ASHR instead of LSHR + BFE_INT
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_LO]].Y, {{.*}}, 0.0, literal
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_LO]].W, {{.*}}, 0.0, literal
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_HI]].Y, {{.*}}, 0.0, literal
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_HI]].W, {{.*}}, 0.0, literal
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_LO]].X, {{.*}}, 0.0, literal
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_LO]].Z, {{.*}}, 0.0, literal
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_HI]].X, {{.*}}, 0.0, literal
+; EGCM-DAG: BFE_INT {{[* ]*}}[[ST_HI]].Z, {{.*}}, 0.0, literal
+; EGCM-DAG: 16
+; EGCM-DAG: 16
+; EGCM-DAG: 16
+; EGCM-DAG: 16
+; EGCM-DAG: 16
+; EGCM-DAG: 16
+; EGCM-DAG: 16
+; EGCM-DAG: 16
define void @global_sextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(1)* %in
%ext = sext <8 x i16> %load to <8 x i32>
@@ -311,8 +344,8 @@ define void @global_sextload_v8i16_to_v8i32(<8 x i32> addrspace(1)* %out, <8 x i
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
define void @global_zextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(1)* %in
%ext = zext <16 x i16> %load to <16 x i32>
@@ -322,8 +355,8 @@ define void @global_zextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16
; FUNC-LABEL: {{^}}global_sextload_v16i16_to_v16i32:
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
define void @global_sextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(1)* %in
%ext = sext <16 x i16> %load to <16 x i32>
@@ -342,10 +375,10 @@ define void @global_sextload_v16i16_to_v16i32(<16 x i32> addrspace(1)* %out, <16
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1
define void @global_zextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(1)* %in
%ext = zext <32 x i16> %load to <32 x i32>
@@ -364,10 +397,10 @@ define void @global_zextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1
define void @global_sextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(1)* %in
%ext = sext <32 x i16> %load to <32 x i32>
@@ -394,14 +427,14 @@ define void @global_sextload_v32i16_to_v32i32(<32 x i32> addrspace(1)* %out, <32
; GCN-HSA: flat_load_dwordx4
; GCN-HSA: flat_load_dwordx4
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 64, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 80, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 96, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 112, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 64, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 80, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 96, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 112, #1
define void @global_zextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* %in) #0 {
%load = load <64 x i16>, <64 x i16> addrspace(1)* %in
%ext = zext <64 x i16> %load to <64 x i32>
@@ -411,14 +444,14 @@ define void @global_zextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64
; FUNC-LABEL: {{^}}global_sextload_v64i16_to_v64i32:
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 64, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 80, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 96, #1
-; EG-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 112, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 0, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 16, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 32, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 48, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 64, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 80, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 96, #1
+; EGCM-DAG: VTX_READ_128 {{T[0-9]+\.XYZW}}, {{T[0-9]+.[XYZW]}}, 112, #1
define void @global_sextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64 x i16> addrspace(1)* %in) #0 {
%load = load <64 x i16>, <64 x i16> addrspace(1)* %in
%ext = sext <64 x i16> %load to <64 x i32>
@@ -434,8 +467,8 @@ define void @global_sextload_v64i16_to_v64i32(<64 x i32> addrspace(1)* %out, <64
; GCN-NOHSA: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]]
; GCN-HSA: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}}
-; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-; EG: MOV {{.*}}, 0.0
+; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
+; EGCM: MOV {{.*}}, 0.0
define void @global_zextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
%a = load i16, i16 addrspace(1)* %in
%ext = zext i16 %a to i64
@@ -458,10 +491,10 @@ define void @global_zextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)
; GCN-NOHSA: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]]
; GCN-HSA: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[LO]]:[[HI]]{{\]}}
-; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-; EG: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal
-; TODO: Why not 15 ?
-; EG: 31
+; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
+; EGCM: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal
+; TODO: These could be expanded earlier using ASHR 15
+; EGCM: 31
define void @global_sextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
%a = load i16, i16 addrspace(1)* %in
%ext = sext i16 %a to i64
@@ -471,8 +504,8 @@ define void @global_sextload_i16_to_i64(i64 addrspace(1)* %out, i16 addrspace(1)
; FUNC-LABEL: {{^}}global_zextload_v1i16_to_v1i64:
-; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-; EG: MOV {{.*}}, 0.0
+; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
+; EGCM: MOV {{.*}}, 0.0
define void @global_zextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(1)* %in
%ext = zext <1 x i16> %load to <1 x i64>
@@ -482,10 +515,10 @@ define void @global_zextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i
; FUNC-LABEL: {{^}}global_sextload_v1i16_to_v1i64:
-; EG: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
-; EG: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal
-; TODO: Why not 15 ?
-; EG: 31
+; EGCM: VTX_READ_16 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
+; EGCM: ASHR {{\**}} {{T[0-9]\.[XYZW]}}, {{.*}}, literal
+; TODO: These could be expanded earlier using ASHR 15
+; EGCM: 31
define void @global_sextload_v1i16_to_v1i64(<1 x i64> addrspace(1)* %out, <1 x i16> addrspace(1)* %in) #0 {
%load = load <1 x i16>, <1 x i16> addrspace(1)* %in
%ext = sext <1 x i16> %load to <1 x i64>
@@ -503,7 +536,7 @@ define void @global_zextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i
; FUNC-LABEL: {{^}}global_sextload_v2i16_to_v2i64:
-; EG: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
+; EGCM: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0, #1
define void @global_sextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
%load = load <2 x i16>, <2 x i16> addrspace(1)* %in
%ext = sext <2 x i16> %load to <2 x i64>
@@ -513,7 +546,7 @@ define void @global_sextload_v2i16_to_v2i64(<2 x i64> addrspace(1)* %out, <2 x i
; FUNC-LABEL: {{^}}global_zextload_v4i16_to_v4i64:
-; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
+; EGCM: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
define void @global_zextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(1)* %in
%ext = zext <4 x i16> %load to <4 x i64>
@@ -523,7 +556,7 @@ define void @global_zextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i
; FUNC-LABEL: {{^}}global_sextload_v4i16_to_v4i64:
-; EG: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
+; EGCM: VTX_READ_64 T{{[0-9]+}}.XY, T{{[0-9]+}}.X, 0, #1
define void @global_sextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
%load = load <4 x i16>, <4 x i16> addrspace(1)* %in
%ext = sext <4 x i16> %load to <4 x i64>
@@ -533,7 +566,7 @@ define void @global_sextload_v4i16_to_v4i64(<4 x i64> addrspace(1)* %out, <4 x i
; FUNC-LABEL: {{^}}global_zextload_v8i16_to_v8i64:
-; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
+; EGCM: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
define void @global_zextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(1)* %in
%ext = zext <8 x i16> %load to <8 x i64>
@@ -543,7 +576,7 @@ define void @global_zextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i
; FUNC-LABEL: {{^}}global_sextload_v8i16_to_v8i64:
-; EG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
+; EGCM: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
define void @global_sextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i16> addrspace(1)* %in) #0 {
%load = load <8 x i16>, <8 x i16> addrspace(1)* %in
%ext = sext <8 x i16> %load to <8 x i64>
@@ -553,8 +586,8 @@ define void @global_sextload_v8i16_to_v8i64(<8 x i64> addrspace(1)* %out, <8 x i
; FUNC-LABEL: {{^}}global_zextload_v16i16_to_v16i64:
-; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
+; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
+; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
define void @global_zextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(1)* %in
%ext = zext <16 x i16> %load to <16 x i64>
@@ -564,8 +597,8 @@ define void @global_zextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16
; FUNC-LABEL: {{^}}global_sextload_v16i16_to_v16i64:
-; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
+; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
+; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
define void @global_sextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16 x i16> addrspace(1)* %in) #0 {
%load = load <16 x i16>, <16 x i16> addrspace(1)* %in
%ext = sext <16 x i16> %load to <16 x i64>
@@ -575,10 +608,10 @@ define void @global_sextload_v16i16_to_v16i64(<16 x i64> addrspace(1)* %out, <16
; FUNC-LABEL: {{^}}global_zextload_v32i16_to_v32i64:
-; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
-; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 32, #1
-; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 48, #1
+; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
+; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
+; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 32, #1
+; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 48, #1
define void @global_zextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(1)* %in
%ext = zext <32 x i16> %load to <32 x i64>
@@ -588,10 +621,10 @@ define void @global_zextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32
; FUNC-LABEL: {{^}}global_sextload_v32i16_to_v32i64:
-; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
-; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
-; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 32, #1
-; EG-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 48, #1
+; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 0, #1
+; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 16, #1
+; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 32, #1
+; EGCM-DAG: VTX_READ_128 T{{[0-9]+}}.XYZW, T{{[0-9]+}}.X, 48, #1
define void @global_sextload_v32i16_to_v32i64(<32 x i64> addrspace(1)* %out, <32 x i16> addrspace(1)* %in) #0 {
%load = load <32 x i16>, <32 x i16> addrspace(1)* %in
%ext = sext <32 x i16> %load to <32 x i64>
diff --git a/test/CodeGen/AMDGPU/min.ll b/test/CodeGen/AMDGPU/min.ll
index 5d64a152af3cb..13d56535303f6 100644
--- a/test/CodeGen/AMDGPU/min.ll
+++ b/test/CodeGen/AMDGPU/min.ll
@@ -1,10 +1,9 @@
-; RUN: llc -march=amdgcn < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
-
; FUNC-LABEL: {{^}}v_test_imin_sle_i32:
-; SI: v_min_i32_e32
+; GCN: v_min_i32_e32
; EG: MIN_INT
define void @v_test_imin_sle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
@@ -17,7 +16,7 @@ define void @v_test_imin_sle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr
}
; FUNC-LABEL: {{^}}s_test_imin_sle_i32:
-; SI: s_min_i32
+; GCN: s_min_i32
; EG: MIN_INT
define void @s_test_imin_sle_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
@@ -28,7 +27,7 @@ define void @s_test_imin_sle_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
}
; FUNC-LABEL: {{^}}s_test_imin_sle_v1i32:
-; SI: s_min_i32
+; GCN: s_min_i32
; EG: MIN_INT
define void @s_test_imin_sle_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x i32> %b) nounwind {
@@ -39,10 +38,10 @@ define void @s_test_imin_sle_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <
}
; FUNC-LABEL: {{^}}s_test_imin_sle_v4i32:
-; SI: s_min_i32
-; SI: s_min_i32
-; SI: s_min_i32
-; SI: s_min_i32
+; GCN: s_min_i32
+; GCN: s_min_i32
+; GCN: s_min_i32
+; GCN: s_min_i32
; EG: MIN_INT
; EG: MIN_INT
@@ -56,11 +55,11 @@ define void @s_test_imin_sle_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, <
}
; FUNC-LABEL: {{^}}s_test_imin_sle_i8:
-; SI: s_load_dword
-; SI: s_load_dword
-; SI: s_sext_i32_i8
-; SI: s_sext_i32_i8
-; SI: s_min_i32
+; GCN: s_load_dword
+; GCN: s_load_dword
+; GCN: s_sext_i32_i8
+; GCN: s_sext_i32_i8
+; GCN: s_min_i32
define void @s_test_imin_sle_i8(i8 addrspace(1)* %out, i8 %a, i8 %b) nounwind {
%cmp = icmp sle i8 %a, %b
%val = select i1 %cmp, i8 %a, i8 %b
@@ -72,21 +71,26 @@ define void @s_test_imin_sle_i8(i8 addrspace(1)* %out, i8 %a, i8 %b) nounwind {
; extloads with mubuf instructions.
; FUNC-LABEL: {{^}}s_test_imin_sle_v4i8:
-; SI: buffer_load_sbyte
-; SI: buffer_load_sbyte
-; SI: buffer_load_sbyte
-; SI: buffer_load_sbyte
-; SI: buffer_load_sbyte
-; SI: buffer_load_sbyte
-; SI: buffer_load_sbyte
-; SI: buffer_load_sbyte
+; GCN: buffer_load_sbyte
+; GCN: buffer_load_sbyte
+; GCN: buffer_load_sbyte
+; GCN: buffer_load_sbyte
+; GCN: buffer_load_sbyte
+; GCN: buffer_load_sbyte
+; GCN: buffer_load_sbyte
+; GCN: buffer_load_sbyte
; SI: v_min_i32
; SI: v_min_i32
; SI: v_min_i32
; SI: v_min_i32
-; SI: s_endpgm
+; VI: v_min_i32
+; VI: v_min_i32
+; VI: v_min_i32
+; VI: v_min_i32
+
+; GCN: s_endpgm
; EG: MIN_INT
; EG: MIN_INT
@@ -117,7 +121,7 @@ define void @s_test_imin_sle_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, <
}
; FUNC-LABEL: @v_test_imin_slt_i32
-; SI: v_min_i32_e32
+; GCN: v_min_i32_e32
; EG: MIN_INT
define void @v_test_imin_slt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
@@ -130,7 +134,7 @@ define void @v_test_imin_slt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr
}
; FUNC-LABEL: @s_test_imin_slt_i32
-; SI: s_min_i32
+; GCN: s_min_i32
; EG: MIN_INT
define void @s_test_imin_slt_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
@@ -141,8 +145,8 @@ define void @s_test_imin_slt_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
}
; FUNC-LABEL: {{^}}s_test_imin_slt_v2i32:
-; SI: s_min_i32
-; SI: s_min_i32
+; GCN: s_min_i32
+; GCN: s_min_i32
; EG: MIN_INT
; EG: MIN_INT
@@ -154,7 +158,7 @@ define void @s_test_imin_slt_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <
}
; FUNC-LABEL: {{^}}s_test_imin_slt_imm_i32:
-; SI: s_min_i32 {{s[0-9]+}}, {{s[0-9]+}}, 8
+; GCN: s_min_i32 {{s[0-9]+}}, {{s[0-9]+}}, 8
; EG: MIN_INT {{.*}}literal.{{[xyzw]}}
define void @s_test_imin_slt_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
@@ -165,7 +169,7 @@ define void @s_test_imin_slt_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
}
; FUNC-LABEL: {{^}}s_test_imin_sle_imm_i32:
-; SI: s_min_i32 {{s[0-9]+}}, {{s[0-9]+}}, 8
+; GCN: s_min_i32 {{s[0-9]+}}, {{s[0-9]+}}, 8
; EG: MIN_INT {{.*}}literal.{{[xyzw]}}
define void @s_test_imin_sle_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
@@ -176,7 +180,7 @@ define void @s_test_imin_sle_imm_i32(i32 addrspace(1)* %out, i32 %a) nounwind {
}
; FUNC-LABEL: @v_test_umin_ule_i32
-; SI: v_min_u32_e32
+; GCN: v_min_u32_e32
; EG: MIN_UINT
define void @v_test_umin_ule_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
@@ -189,11 +193,11 @@ define void @v_test_umin_ule_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr
}
; FUNC-LABEL: @v_test_umin_ule_v3i32
-; SI: v_min_u32_e32
-; SI: v_min_u32_e32
-; SI: v_min_u32_e32
+; GCN: v_min_u32_e32
+; GCN: v_min_u32_e32
+; GCN: v_min_u32_e32
; SI-NOT: v_min_u32_e32
-; SI: s_endpgm
+; GCN: s_endpgm
; EG: MIN_UINT
; EG: MIN_UINT
@@ -207,7 +211,7 @@ define void @v_test_umin_ule_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrs
ret void
}
; FUNC-LABEL: @s_test_umin_ule_i32
-; SI: s_min_u32
+; GCN: s_min_u32
; EG: MIN_UINT
define void @s_test_umin_ule_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
@@ -218,7 +222,7 @@ define void @s_test_umin_ule_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
}
; FUNC-LABEL: @v_test_umin_ult_i32
-; SI: v_min_u32_e32
+; GCN: v_min_u32_e32
; EG: MIN_UINT
define void @v_test_umin_ult_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
@@ -231,9 +235,9 @@ define void @v_test_umin_ult_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr
}
; FUNC-LABEL: {{^}}v_test_umin_ult_i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: v_min_u32_e32
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: v_min_u32_e32
; EG: MIN_UINT
define void @v_test_umin_ult_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %aptr, i8 addrspace(1)* %bptr) nounwind {
@@ -246,7 +250,7 @@ define void @v_test_umin_ult_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %aptr, i
}
; FUNC-LABEL: @s_test_umin_ult_i32
-; SI: s_min_u32
+; GCN: s_min_u32
; EG: MIN_UINT
define void @s_test_umin_ult_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
@@ -258,10 +262,10 @@ define void @s_test_umin_ult_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwin
; FUNC-LABEL: @v_test_umin_ult_i32_multi_use
; SI-NOT: v_min
-; SI: v_cmp_lt_u32
+; GCN: v_cmp_lt_u32
; SI-NEXT: v_cndmask_b32
; SI-NOT: v_min
-; SI: s_endpgm
+; GCN: s_endpgm
; EG-NOT: MIN_UINT
define void @v_test_umin_ult_i32_multi_use(i32 addrspace(1)* %out0, i1 addrspace(1)* %out1, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
@@ -274,9 +278,27 @@ define void @v_test_umin_ult_i32_multi_use(i32 addrspace(1)* %out0, i1 addrspace
ret void
}
+; FUNC-LABEL: @v_test_umin_ult_i16_multi_use
+; GCN-NOT: v_min
+; GCN: v_cmp_lt_u32
+; GCN-NEXT: v_cndmask_b32
+; GCN-NOT: v_min
+; GCN: s_endpgm
+
+; EG-NOT: MIN_UINT
+define void @v_test_umin_ult_i16_multi_use(i16 addrspace(1)* %out0, i1 addrspace(1)* %out1, i16 addrspace(1)* %aptr, i16 addrspace(1)* %bptr) nounwind {
+ %a = load i16, i16 addrspace(1)* %aptr, align 2
+ %b = load i16, i16 addrspace(1)* %bptr, align 2
+ %cmp = icmp ult i16 %a, %b
+ %val = select i1 %cmp, i16 %a, i16 %b
+ store i16 %val, i16 addrspace(1)* %out0, align 2
+ store i1 %cmp, i1 addrspace(1)* %out1
+ ret void
+}
+
; FUNC-LABEL: @s_test_umin_ult_v1i32
-; SI: s_min_u32
+; GCN: s_min_u32
; EG: MIN_UINT
define void @s_test_umin_ult_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <1 x i32> %b) nounwind {
@@ -287,14 +309,14 @@ define void @s_test_umin_ult_v1i32(<1 x i32> addrspace(1)* %out, <1 x i32> %a, <
}
; FUNC-LABEL: {{^}}s_test_umin_ult_v8i32:
-; SI: s_min_u32
-; SI: s_min_u32
-; SI: s_min_u32
-; SI: s_min_u32
-; SI: s_min_u32
-; SI: s_min_u32
-; SI: s_min_u32
-; SI: s_min_u32
+; GCN: s_min_u32
+; GCN: s_min_u32
+; GCN: s_min_u32
+; GCN: s_min_u32
+; GCN: s_min_u32
+; GCN: s_min_u32
+; GCN: s_min_u32
+; GCN: s_min_u32
; EG: MIN_UINT
; EG: MIN_UINT
@@ -312,14 +334,14 @@ define void @s_test_umin_ult_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, <
}
; FUNC-LABEL: {{^}}s_test_umin_ult_v8i16:
-; SI: v_min_u32
-; SI: v_min_u32
-; SI: v_min_u32
-; SI: v_min_u32
-; SI: v_min_u32
-; SI: v_min_u32
-; SI: v_min_u32
-; SI: v_min_u32
+; GCN: v_min_u32
+; GCN: v_min_u32
+; GCN: v_min_u32
+; GCN: v_min_u32
+; GCN: v_min_u32
+; GCN: v_min_u32
+; GCN: v_min_u32
+; GCN: v_min_u32
; EG: MIN_UINT
; EG: MIN_UINT
@@ -338,11 +360,11 @@ define void @s_test_umin_ult_v8i16(<8 x i16> addrspace(1)* %out, <8 x i16> %a, <
; Make sure redundant and removed
; FUNC-LABEL: {{^}}simplify_demanded_bits_test_umin_ult_i16:
-; SI-DAG: s_load_dword [[A:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
-; SI-DAG: s_load_dword [[B:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0xc
-; SI: s_min_u32 [[MIN:s[0-9]+]], [[A]], [[B]]
-; SI: v_mov_b32_e32 [[VMIN:v[0-9]+]], [[MIN]]
-; SI: buffer_store_dword [[VMIN]]
+; GCN-DAG: s_load_dword [[A:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}}
+; GCN-DAG: s_load_dword [[B:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, {{0xc|0x30}}
+; GCN: s_min_u32 [[MIN:s[0-9]+]], [[A]], [[B]]
+; GCN: v_mov_b32_e32 [[VMIN:v[0-9]+]], [[MIN]]
+; GCN: buffer_store_dword [[VMIN]]
; EG: MIN_UINT
define void @simplify_demanded_bits_test_umin_ult_i16(i32 addrspace(1)* %out, i16 zeroext %a, i16 zeroext %b) nounwind {
@@ -358,11 +380,11 @@ define void @simplify_demanded_bits_test_umin_ult_i16(i32 addrspace(1)* %out, i1
; Make sure redundant sign_extend_inreg removed.
; FUNC-LABEL: {{^}}simplify_demanded_bits_test_min_slt_i16:
-; SI-DAG: s_load_dword [[A:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
-; SI-DAG: s_load_dword [[B:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, 0xc
-; SI: s_min_i32 [[MIN:s[0-9]+]], [[A]], [[B]]
-; SI: v_mov_b32_e32 [[VMIN:v[0-9]+]], [[MIN]]
-; SI: buffer_store_dword [[VMIN]]
+; GCN-DAG: s_load_dword [[A:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}}
+; GCN-DAG: s_load_dword [[B:s[0-9]+]], {{s\[[0-9]+:[0-9]+\]}}, {{0xc|0x30}}
+; GCN: s_min_i32 [[MIN:s[0-9]+]], [[A]], [[B]]
+; GCN: v_mov_b32_e32 [[VMIN:v[0-9]+]], [[MIN]]
+; GCN: buffer_store_dword [[VMIN]]
; EG: MIN_INT
define void @simplify_demanded_bits_test_min_slt_i16(i32 addrspace(1)* %out, i16 signext %a, i16 signext %b) nounwind {
@@ -377,7 +399,7 @@ define void @simplify_demanded_bits_test_min_slt_i16(i32 addrspace(1)* %out, i16
}
; FUNC-LABEL: {{^}}s_test_imin_sle_i16:
-; SI: s_min_i32
+; GCN: s_min_i32
; EG: MIN_INT
define void @s_test_imin_sle_i16(i16 addrspace(1)* %out, i16 %a, i16 %b) nounwind {
@@ -389,7 +411,7 @@ define void @s_test_imin_sle_i16(i16 addrspace(1)* %out, i16 %a, i16 %b) nounwin
; 64 bit
; FUNC-LABEL: {{^}}test_umin_ult_i64
-; SI: s_endpgm
+; GCN: s_endpgm
; EG: MIN_UINT
; EG: MIN_UINT
@@ -401,7 +423,7 @@ define void @test_umin_ult_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind
}
; FUNC-LABEL: {{^}}test_umin_ule_i64
-; SI: s_endpgm
+; GCN: s_endpgm
; EG: MIN_UINT
; EG: MIN_UINT
@@ -413,7 +435,7 @@ define void @test_umin_ule_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind
}
; FUNC-LABEL: {{^}}test_imin_slt_i64
-; SI: s_endpgm
+; GCN: s_endpgm
; EG-DAG: MIN_UINT
; EG-DAG: MIN_INT
@@ -425,7 +447,7 @@ define void @test_imin_slt_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind
}
; FUNC-LABEL: {{^}}test_imin_sle_i64
-; SI: s_endpgm
+; GCN: s_endpgm
; EG-DAG: MIN_UINT
; EG-DAG: MIN_INT
diff --git a/test/CodeGen/AMDGPU/r600-legalize-umax-bug.ll b/test/CodeGen/AMDGPU/r600-legalize-umax-bug.ll
new file mode 100644
index 0000000000000..866a4a9191e27
--- /dev/null
+++ b/test/CodeGen/AMDGPU/r600-legalize-umax-bug.ll
@@ -0,0 +1,16 @@
+; RUN: llc -march=r600 -mcpu=cypress -start-after safe-stack %s -o - | FileCheck %s
+; Don't crash
+
+; CHECK: MAX_UINT
+define void @test(i64 addrspace(1)* %out) {
+bb:
+ store i64 2, i64 addrspace(1)* %out
+ %tmp = load i64, i64 addrspace(1)* %out
+ br label %jump
+
+jump: ; preds = %bb
+ %tmp1 = icmp ugt i64 %tmp, 4
+ %umax = select i1 %tmp1, i64 %tmp, i64 4
+ store i64 %umax, i64 addrspace(1)* %out
+ ret void
+}
diff --git a/test/CodeGen/AMDGPU/store-private.ll b/test/CodeGen/AMDGPU/store-private.ll
new file mode 100644
index 0000000000000..33d27f24e9cf6
--- /dev/null
+++ b/test/CodeGen/AMDGPU/store-private.ll
@@ -0,0 +1,743 @@
+; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=CM -check-prefix=FUNC %s
+
+; FUNC-LABEL: {{^}}store_i1:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; SI: buffer_store_byte
+define void @store_i1(i1 addrspace(0)* %out) {
+entry:
+ store i1 true, i1 addrspace(0)* %out
+ ret void
+}
+
+; i8 store
+; FUNC-LABEL: {{^}}store_i8:
+; EG: LSHR * [[ADDRESS:T[0-9]\.[XYZW]]], KC0[2].Y, literal.x
+; EG-NEXT: 2
+; EG: MOVA_INT * AR.x (MASKED)
+; EG: MOV [[OLD:T[0-9]\.[XYZW]]], {{.*}}AR.x
+
+; IG 0: Get the byte index and truncate the value
+; EG: AND_INT * T{{[0-9]}}.[[BI_CHAN:[XYZW]]], KC0[2].Y, literal.x
+; EG: LSHL * T{{[0-9]}}.[[SHIFT_CHAN:[XYZW]]], PV.[[BI_CHAN]], literal.x
+; EG-NEXT: 3(4.203895e-45)
+; EG: AND_INT * T{{[0-9]}}.[[TRUNC_CHAN:[XYZW]]], KC0[2].Z, literal.x
+; EG-NEXT: 255(3.573311e-43)
+
+; EG: NOT_INT
+; EG: AND_INT {{[\* ]*}}[[CLR_CHAN:T[0-9]\.[XYZW]]], {{.*}}[[OLD]]
+; EG: OR_INT * [[RES:T[0-9]\.[XYZW]]]
+; TODO: Is the reload necessary?
+; EG: MOVA_INT * AR.x (MASKED), [[ADDRESS]]
+; EG: MOV * T(0 + AR.x).X+, [[RES]]
+
+; SI: buffer_store_byte
+
+define void @store_i8(i8 addrspace(0)* %out, i8 %in) {
+entry:
+ store i8 %in, i8 addrspace(0)* %out
+ ret void
+}
+
+; i16 store
+; FUNC-LABEL: {{^}}store_i16:
+; EG: LSHR * [[ADDRESS:T[0-9]\.[XYZW]]], KC0[2].Y, literal.x
+; EG-NEXT: 2
+; EG: MOVA_INT * AR.x (MASKED)
+; EG: MOV [[OLD:T[0-9]\.[XYZW]]], {{.*}}AR.x
+
+; IG 0: Get the byte index and truncate the value
+; EG: AND_INT * T{{[0-9]}}.[[BI_CHAN:[XYZW]]], KC0[2].Y, literal.x
+; EG: LSHL * T{{[0-9]}}.[[SHIFT_CHAN:[XYZW]]], PV.[[BI_CHAN]], literal.x
+; EG-NEXT: 3(4.203895e-45)
+; EG: AND_INT * T{{[0-9]}}.[[TRUNC_CHAN:[XYZW]]], KC0[2].Z, literal.x
+; EG-NEXT: 65535(9.183409e-41)
+
+; EG: NOT_INT
+; EG: AND_INT {{[\* ]*}}[[CLR_CHAN:T[0-9]\.[XYZW]]], {{.*}}[[OLD]]
+; EG: OR_INT * [[RES:T[0-9]\.[XYZW]]]
+; TODO: Is the reload necessary?
+; EG: MOVA_INT * AR.x (MASKED), [[ADDRESS]]
+; EG: MOV * T(0 + AR.x).X+, [[RES]]
+
+; SI: buffer_store_short
+define void @store_i16(i16 addrspace(0)* %out, i16 %in) {
+entry:
+ store i16 %in, i16 addrspace(0)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_i24:
+; SI: s_lshr_b32 s{{[0-9]+}}, s{{[0-9]+}}, 16
+; SI-DAG: buffer_store_byte
+; SI-DAG: buffer_store_short
+
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store can be eliminated
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store can be eliminated
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+define void @store_i24(i24 addrspace(0)* %out, i24 %in) {
+entry:
+ store i24 %in, i24 addrspace(0)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_i25:
+; SI: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, 0x1ffffff{{$}}
+; SI: v_mov_b32_e32 [[VAND:v[0-9]+]], [[AND]]
+; SI: buffer_store_dword [[VAND]]
+
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG-NOT: MOVA_INT
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM-NOT: MOVA_INT
+define void @store_i25(i25 addrspace(0)* %out, i25 %in) {
+entry:
+ store i25 %in, i25 addrspace(0)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_v2i8:
+; v2i8 is naturally 2B aligned, treat as i16
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG-NOT: MOVA_INT
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM-NOT: MOVA_INT
+
+; SI: buffer_store_short
+define void @store_v2i8(<2 x i8> addrspace(0)* %out, <2 x i32> %in) {
+entry:
+ %0 = trunc <2 x i32> %in to <2 x i8>
+ store <2 x i8> %0, <2 x i8> addrspace(0)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_v2i8_unaligned:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; SI: buffer_store_byte
+define void @store_v2i8_unaligned(<2 x i8> addrspace(0)* %out, <2 x i32> %in) {
+entry:
+ %0 = trunc <2 x i32> %in to <2 x i8>
+ store <2 x i8> %0, <2 x i8> addrspace(0)* %out, align 1
+ ret void
+}
+
+
+; FUNC-LABEL: {{^}}store_v2i16:
+; v2i8 is naturally 2B aligned, treat as i16
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG-NOT: MOVA_INT
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM-NOT: MOVA_INT
+
+; SI: buffer_store_dword
+define void @store_v2i16(<2 x i16> addrspace(0)* %out, <2 x i32> %in) {
+entry:
+ %0 = trunc <2 x i32> %in to <2 x i16>
+ store <2 x i16> %0, <2 x i16> addrspace(0)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_v2i16_unaligned:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; SI: buffer_store_short
+; SI: buffer_store_short
+define void @store_v2i16_unaligned(<2 x i16> addrspace(0)* %out, <2 x i32> %in) {
+entry:
+ %0 = trunc <2 x i32> %in to <2 x i16>
+ store <2 x i16> %0, <2 x i16> addrspace(0)* %out, align 2
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_v4i8:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG-NOT: MOVA_INT
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM-NOT: MOVA_INT
+
+; SI: buffer_store_dword
+define void @store_v4i8(<4 x i8> addrspace(0)* %out, <4 x i32> %in) {
+entry:
+ %0 = trunc <4 x i32> %in to <4 x i8>
+ store <4 x i8> %0, <4 x i8> addrspace(0)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_v4i8_unaligned:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI-NOT: buffer_store_dword
+define void @store_v4i8_unaligned(<4 x i8> addrspace(0)* %out, <4 x i32> %in) {
+entry:
+ %0 = trunc <4 x i32> %in to <4 x i8>
+ store <4 x i8> %0, <4 x i8> addrspace(0)* %out, align 1
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_v8i8_unaligned:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI: buffer_store_byte
+; SI-NOT: buffer_store_dword
+define void @store_v8i8_unaligned(<8 x i8> addrspace(0)* %out, <8 x i32> %in) {
+entry:
+ %0 = trunc <8 x i32> %in to <8 x i8>
+ store <8 x i8> %0, <8 x i8> addrspace(0)* %out, align 1
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_v4i8_halfaligned:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; TODO: This load and store cannot be eliminated,
+; they might be different locations
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; SI: buffer_store_short
+; SI: buffer_store_short
+; SI-NOT: buffer_store_dword
+define void @store_v4i8_halfaligned(<4 x i8> addrspace(0)* %out, <4 x i32> %in) {
+entry:
+ %0 = trunc <4 x i32> %in to <4 x i8>
+ store <4 x i8> %0, <4 x i8> addrspace(0)* %out, align 2
+ ret void
+}
+
+; floating-point store
+; FUNC-LABEL: {{^}}store_f32:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; SI: buffer_store_dword
+
+define void @store_f32(float addrspace(0)* %out, float %in) {
+ store float %in, float addrspace(0)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_v4i16:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+;TODO: why not x2?
+; XSI: buffer_store_dwordx2
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+define void @store_v4i16(<4 x i16> addrspace(0)* %out, <4 x i32> %in) {
+entry:
+ %0 = trunc <4 x i32> %in to <4 x i16>
+ store <4 x i16> %0, <4 x i16> addrspace(0)* %out
+ ret void
+}
+
+; vec2 floating-point stores
+; FUNC-LABEL: {{^}}store_v2f32:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+;TODO: why not x2?
+; XSI: buffer_store_dwordx2
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+
+define void @store_v2f32(<2 x float> addrspace(0)* %out, float %a, float %b) {
+entry:
+ %0 = insertelement <2 x float> <float 0.0, float 0.0>, float %a, i32 0
+ %1 = insertelement <2 x float> %0, float %b, i32 1
+ store <2 x float> %1, <2 x float> addrspace(0)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_v3i32:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+;TODO: why not x2?
+; XSI-DAG: buffer_store_dwordx2
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+
+define void @store_v3i32(<3 x i32> addrspace(0)* %out, <3 x i32> %a) nounwind {
+ store <3 x i32> %a, <3 x i32> addrspace(0)* %out, align 16
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_v4i32:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+;TODO: why not x4?
+; XSI: buffer_store_dwordx4
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+define void @store_v4i32(<4 x i32> addrspace(0)* %out, <4 x i32> %in) {
+entry:
+ store <4 x i32> %in, <4 x i32> addrspace(0)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_v4i32_unaligned:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+;TODO: why not x4?
+; XSI: buffer_store_dwordx4
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+define void @store_v4i32_unaligned(<4 x i32> addrspace(0)* %out, <4 x i32> %in) {
+entry:
+ store <4 x i32> %in, <4 x i32> addrspace(0)* %out, align 4
+ ret void
+}
+
+; v4f32 store
+; FUNC-LABEL: {{^}}store_v4f32:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+;TODO: why not x4?
+; XSI: buffer_store_dwordx4
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+define void @store_v4f32(<4 x float> addrspace(0)* %out, <4 x float> addrspace(0)* %in) {
+ %1 = load <4 x float>, <4 x float> addrspace(0) * %in
+ store <4 x float> %1, <4 x float> addrspace(0)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_i64_i8:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; SI: buffer_store_byte
+define void @store_i64_i8(i8 addrspace(0)* %out, i64 %in) {
+entry:
+ %0 = trunc i64 %in to i8
+ store i8 %0, i8 addrspace(0)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}store_i64_i16:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}{{T[0-9]+\.[XYZW]}}, T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; SI: buffer_store_short
+define void @store_i64_i16(i16 addrspace(0)* %out, i64 %in) {
+entry:
+ %0 = trunc i64 %in to i16
+ store i16 %0, i16 addrspace(0)* %out
+ ret void
+}
+
+; The stores in this function are combined by the optimizer to create a
+; 64-bit store with 32-bit alignment. This is legal and the legalizer
+; should not try to split the 64-bit store back into 2 32-bit stores.
+
+; FUNC-LABEL: {{^}}vecload2:
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+;TODO: why not x2?
+; XSI: buffer_store_dwordx2
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+define void @vecload2(i32 addrspace(0)* nocapture %out, i32 addrspace(2)* nocapture %mem) #0 {
+entry:
+ %0 = load i32, i32 addrspace(2)* %mem, align 4
+ %arrayidx1.i = getelementptr inbounds i32, i32 addrspace(2)* %mem, i64 1
+ %1 = load i32, i32 addrspace(2)* %arrayidx1.i, align 4
+ store i32 %0, i32 addrspace(0)* %out, align 4
+ %arrayidx1 = getelementptr inbounds i32, i32 addrspace(0)* %out, i64 1
+ store i32 %1, i32 addrspace(0)* %arrayidx1, align 4
+ ret void
+}
+
+; When i128 was a legal type this program generated cannot select errors:
+
+; FUNC-LABEL: {{^}}"i128-const-store":
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; EG: MOVA_INT
+; EG: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+; CM: MOVA_INT
+; CM: MOV {{[\* ]*}}T(0 + AR.x).X+,
+
+;TODO: why not x4?
+; XSI: buffer_store_dwordx4
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+; SI: buffer_store_dword
+define void @i128-const-store(i32 addrspace(0)* %out) {
+entry:
+ store i32 1, i32 addrspace(0)* %out, align 4
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(0)* %out, i64 1
+ store i32 1, i32 addrspace(0)* %arrayidx2, align 4
+ %arrayidx4 = getelementptr inbounds i32, i32 addrspace(0)* %out, i64 2
+ store i32 2, i32 addrspace(0)* %arrayidx4, align 4
+ %arrayidx6 = getelementptr inbounds i32, i32 addrspace(0)* %out, i64 3
+ store i32 2, i32 addrspace(0)* %arrayidx6, align 4
+ ret void
+}
+
+
+attributes #0 = { nounwind }
diff --git a/test/CodeGen/AVR/intrinsics/read_register.ll b/test/CodeGen/AVR/intrinsics/read_register.ll
new file mode 100644
index 0000000000000..3f28d1d3a9fe2
--- /dev/null
+++ b/test/CodeGen/AVR/intrinsics/read_register.ll
@@ -0,0 +1,17 @@
+; RUN: llc -O0 < %s -march=avr | FileCheck %s
+
+; CHECK-LABEL: foo
+define void @foo() {
+entry:
+ %val1 = call i16 @llvm.read_register.i16(metadata !0)
+ %val2 = call i16 @llvm.read_register.i16(metadata !1)
+ %val3 = call i8 @llvm.read_register.i8(metadata !2)
+ ret void
+}
+
+declare i8 @llvm.read_register.i8(metadata)
+declare i16 @llvm.read_register.i16(metadata)
+
+!0 = !{!"r28"}
+!1 = !{!"Z"}
+!2 = !{!"r0"}
diff --git a/test/CodeGen/WebAssembly/function-bitcasts.ll b/test/CodeGen/WebAssembly/function-bitcasts.ll
new file mode 100644
index 0000000000000..49980da6eb8fe
--- /dev/null
+++ b/test/CodeGen/WebAssembly/function-bitcasts.ll
@@ -0,0 +1,56 @@
+; RUN: llc < %s -asm-verbose=false | FileCheck %s
+
+; Test that function pointer casts are replaced with wrappers.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+; CHECK-LABEL: test:
+; CHECK-NEXT: call .Lbitcast@FUNCTION{{$}}
+; CHECK-NEXT: call .Lbitcast.1@FUNCTION{{$}}
+; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, 0
+; CHECK-NEXT: call .Lbitcast.2@FUNCTION, $pop[[L0]]{{$}}
+; CHECK-NEXT: i32.call $drop=, .Lbitcast.3@FUNCTION{{$}}
+; CHECK-NEXT: call foo2@FUNCTION{{$}}
+; CHECK-NEXT: call foo3@FUNCTION{{$}}
+; CHECK-NEXT: .endfunc
+
+; CHECK-LABEL: .Lbitcast:
+; CHECK-NEXT: .local i32
+; CHECK-NEXT: call has_i32_arg@FUNCTION, $0{{$}}
+; CHECK-NEXT: .endfunc
+
+; CHECK-LABEL: .Lbitcast.1:
+; CHECK-NEXT: call $drop=, has_i32_ret@FUNCTION{{$}}
+; CHECK-NEXT: .endfunc
+
+; CHECK-LABEL: .Lbitcast.2:
+; CHECK-NEXT: .param i32
+; CHECK-NEXT: call foo0@FUNCTION{{$}}
+; CHECK-NEXT: .endfunc
+
+; CHECK-LABEL: .Lbitcast.3:
+; CHECK-NEXT: .result i32
+; CHECK-NEXT: .local i32
+; CHECK-NEXT: call foo1@FUNCTION{{$}}
+; CHECK-NEXT: copy_local $push0=, $0
+; CHECK-NEXT: .endfunc
+
+declare void @has_i32_arg(i32)
+declare i32 @has_i32_ret()
+
+declare void @foo0()
+declare void @foo1()
+declare void @foo2()
+declare void @foo3()
+
+define void @test() {
+entry:
+ call void bitcast (void (i32)* @has_i32_arg to void ()*)()
+ call void bitcast (i32 ()* @has_i32_ret to void ()*)()
+ call void bitcast (void ()* @foo0 to void (i32)*)(i32 0)
+ %t = call i32 bitcast (void ()* @foo1 to i32 ()*)()
+ call void bitcast (void ()* @foo2 to void ()*)()
+ call void @foo3()
+ ret void
+}
diff --git a/test/CodeGen/WebAssembly/unsupported-function-bitcasts.ll b/test/CodeGen/WebAssembly/unsupported-function-bitcasts.ll
new file mode 100644
index 0000000000000..ef4318ec299b7
--- /dev/null
+++ b/test/CodeGen/WebAssembly/unsupported-function-bitcasts.ll
@@ -0,0 +1,26 @@
+; RUN: llc < %s -asm-verbose=false | FileCheck %s
+
+; Test that function pointer casts that require conversions are not converted
+; to wrappers. In theory some conversions could be supported, but currently no
+; conversions are implemented.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+; CHECK-LABEL: test:
+; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}}
+; CHECK-NEXT: call has_i64_arg@FUNCTION, $pop[[L0]]{{$}}
+; CHECK-NEXT: i32.call $drop=, has_i64_ret@FUNCTION{{$}}
+; CHECK-NEXT: .endfunc
+
+; CHECK-NOT: .Lbitcast
+
+declare void @has_i64_arg(i64)
+declare i64 @has_i64_ret()
+
+define void @test() {
+entry:
+ call void bitcast (void (i64)* @has_i64_arg to void (i32)*)(i32 0)
+ %t = call i32 bitcast (i64 ()* @has_i64_ret to i32 ()*)()
+ ret void
+}
diff --git a/test/CodeGen/X86/avx2-arith.ll b/test/CodeGen/X86/avx2-arith.ll
index e1341624cad38..aec74424b9b27 100644
--- a/test/CodeGen/X86/avx2-arith.ll
+++ b/test/CodeGen/X86/avx2-arith.ll
@@ -142,17 +142,108 @@ define <16 x i16> @test_vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone
ret <16 x i16> %x
}
-define <16 x i8> @mul-v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
+define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
+; X32-LABEL: mul_v16i8:
+; X32: ## BB#0:
+; X32-NEXT: vpmovsxbw %xmm1, %ymm1
+; X32-NEXT: vpmovsxbw %xmm0, %ymm0
+; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0
+; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X32-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; X32-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; X32-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; X32-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X32-NEXT: vzeroupper
+; X32-NEXT: retl
+;
+; X64-LABEL: mul_v16i8:
+; X64: ## BB#0:
+; X64-NEXT: vpmovsxbw %xmm1, %ymm1
+; X64-NEXT: vpmovsxbw %xmm0, %ymm0
+; X64-NEXT: vpmullw %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; X64-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; X64-NEXT: vpshufb %xmm2, %xmm0, %xmm0
+; X64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-NEXT: vzeroupper
+; X64-NEXT: retq
%x = mul <16 x i8> %i, %j
ret <16 x i8> %x
}
-define <32 x i8> @mul-v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
+define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
+; X32-LABEL: mul_v32i8:
+; X32: ## BB#0:
+; X32-NEXT: vextracti128 $1, %ymm1, %xmm2
+; X32-NEXT: vpmovsxbw %xmm2, %ymm2
+; X32-NEXT: vextracti128 $1, %ymm0, %xmm3
+; X32-NEXT: vpmovsxbw %xmm3, %ymm3
+; X32-NEXT: vpmullw %ymm2, %ymm3, %ymm2
+; X32-NEXT: vextracti128 $1, %ymm2, %xmm3
+; X32-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; X32-NEXT: vpshufb %xmm4, %xmm3, %xmm3
+; X32-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; X32-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; X32-NEXT: vpmovsxbw %xmm1, %ymm1
+; X32-NEXT: vpmovsxbw %xmm0, %ymm0
+; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0
+; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X32-NEXT: vpshufb %xmm4, %xmm1, %xmm1
+; X32-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; X32-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X32-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: mul_v32i8:
+; X64: ## BB#0:
+; X64-NEXT: vextracti128 $1, %ymm1, %xmm2
+; X64-NEXT: vpmovsxbw %xmm2, %ymm2
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm3
+; X64-NEXT: vpmovsxbw %xmm3, %ymm3
+; X64-NEXT: vpmullw %ymm2, %ymm3, %ymm2
+; X64-NEXT: vextracti128 $1, %ymm2, %xmm3
+; X64-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
+; X64-NEXT: vpshufb %xmm4, %xmm3, %xmm3
+; X64-NEXT: vpshufb %xmm4, %xmm2, %xmm2
+; X64-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; X64-NEXT: vpmovsxbw %xmm1, %ymm1
+; X64-NEXT: vpmovsxbw %xmm0, %ymm0
+; X64-NEXT: vpmullw %ymm1, %ymm0, %ymm0
+; X64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X64-NEXT: vpshufb %xmm4, %xmm1, %xmm1
+; X64-NEXT: vpshufb %xmm4, %xmm0, %xmm0
+; X64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X64-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; X64-NEXT: retq
%x = mul <32 x i8> %i, %j
ret <32 x i8> %x
}
-define <4 x i64> @mul-v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
+; X32-LABEL: mul_v4i64:
+; X32: ## BB#0:
+; X32-NEXT: vpsrlq $32, %ymm0, %ymm2
+; X32-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
+; X32-NEXT: vpsrlq $32, %ymm1, %ymm3
+; X32-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
+; X32-NEXT: vpaddq %ymm2, %ymm3, %ymm2
+; X32-NEXT: vpsllq $32, %ymm2, %ymm2
+; X32-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
+; X32-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: mul_v4i64:
+; X64: ## BB#0:
+; X64-NEXT: vpsrlq $32, %ymm0, %ymm2
+; X64-NEXT: vpmuludq %ymm1, %ymm2, %ymm2
+; X64-NEXT: vpsrlq $32, %ymm1, %ymm3
+; X64-NEXT: vpmuludq %ymm3, %ymm0, %ymm3
+; X64-NEXT: vpaddq %ymm2, %ymm3, %ymm2
+; X64-NEXT: vpsllq $32, %ymm2, %ymm2
+; X64-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
+; X64-NEXT: vpaddq %ymm2, %ymm0, %ymm0
+; X64-NEXT: retq
%x = mul <4 x i64> %i, %j
ret <4 x i64> %x
}
@@ -291,8 +382,8 @@ define <8 x i32> @mul_const9(<8 x i32> %x) {
ret <8 x i32> %y
}
+; %x * 0x01010101
define <4 x i32> @mul_const10(<4 x i32> %x) {
- ; %x * 0x01010101
; X32-LABEL: mul_const10:
; X32: ## BB#0:
; X32-NEXT: vpbroadcastd LCPI22_0, %xmm1
@@ -308,8 +399,8 @@ define <4 x i32> @mul_const10(<4 x i32> %x) {
ret <4 x i32> %m
}
+; %x * 0x80808080
define <4 x i32> @mul_const11(<4 x i32> %x) {
- ; %x * 0x80808080
; X32-LABEL: mul_const11:
; X32: ## BB#0:
; X32-NEXT: vpbroadcastd LCPI23_0, %xmm1
diff --git a/test/CodeGen/X86/avx512-bugfix-23634.ll b/test/CodeGen/X86/avx512-bugfix-23634.ll
index 0dcfb7c169f31..e66eefdb8e9f1 100644
--- a/test/CodeGen/X86/avx512-bugfix-23634.ll
+++ b/test/CodeGen/X86/avx512-bugfix-23634.ll
@@ -15,7 +15,7 @@ define void @f_fu(float* %ret, float* %aa, float %b) {
; CHECK-NEXT: vpsrad $1, %zmm2, %zmm2
; CHECK-NEXT: movw $-21846, %ax ## imm = 0xAAAA
; CHECK-NEXT: kmovw %eax, %k1
-; CHECK-NEXT: vpblendmd {{.*}}(%rip), %zmm1, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa32 {{.*}}(%rip), %zmm1 {%k1}
; CHECK-NEXT: vpaddd %zmm0, %zmm2, %zmm0
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: vcvtdq2ps %zmm0, %zmm0
diff --git a/test/CodeGen/X86/avx512-calling-conv.ll b/test/CodeGen/X86/avx512-calling-conv.ll
index 532678ae72fa3..1a91bc1dee9a7 100644
--- a/test/CodeGen/X86/avx512-calling-conv.ll
+++ b/test/CodeGen/X86/avx512-calling-conv.ll
@@ -25,8 +25,7 @@ define <16 x i1> @test2(<16 x i1>%a, <16 x i1>%b) {
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1 {%k1}
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdb %zmm0, %xmm0
; KNL-NEXT: retq
;
@@ -48,8 +47,7 @@ define <16 x i1> @test2(<16 x i1>%a, <16 x i1>%b) {
; KNL_X32-NEXT: vpslld $31, %zmm0, %zmm0
; KNL_X32-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL_X32-NEXT: vptestmd %zmm1, %zmm1, %k1 {%k1}
-; KNL_X32-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL_X32-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; KNL_X32-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL_X32-NEXT: vpmovdb %zmm0, %xmm0
; KNL_X32-NEXT: retl
%c = and <16 x i1>%a, %b
@@ -65,8 +63,7 @@ define <8 x i1> @test3(<8 x i1>%a, <8 x i1>%b) {
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1 {%k1}
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqw %zmm0, %xmm0
; KNL-NEXT: retq
;
@@ -88,8 +85,7 @@ define <8 x i1> @test3(<8 x i1>%a, <8 x i1>%b) {
; KNL_X32-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL_X32-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL_X32-NEXT: vptestmq %zmm1, %zmm1, %k1 {%k1}
-; KNL_X32-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL_X32-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL_X32-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL_X32-NEXT: vpmovqw %zmm0, %xmm0
; KNL_X32-NEXT: retl
%c = and <8 x i1>%a, %b
@@ -180,8 +176,7 @@ define <16 x i32> @test6(<16 x i32>%a, <16 x i32>%b) {
; KNL-NEXT: Lcfi1:
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdb %zmm0, %xmm0
; KNL-NEXT: callq _func16xi1
; KNL-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
@@ -210,8 +205,7 @@ define <16 x i32> @test6(<16 x i32>%a, <16 x i32>%b) {
; KNL_X32-NEXT: Lcfi1:
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
-; KNL_X32-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL_X32-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; KNL_X32-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL_X32-NEXT: vpmovdb %zmm0, %xmm0
; KNL_X32-NEXT: calll _func16xi1
; KNL_X32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
@@ -285,8 +279,7 @@ define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) {
; KNL-NEXT: movb $85, %al
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqw %zmm0, %xmm0
; KNL-NEXT: popq %rax
; KNL-NEXT: retq
@@ -322,8 +315,7 @@ define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) {
; KNL_X32-NEXT: movb $85, %al
; KNL_X32-NEXT: kmovw %eax, %k1
; KNL_X32-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
-; KNL_X32-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL_X32-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL_X32-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL_X32-NEXT: vpmovqw %zmm0, %xmm0
; KNL_X32-NEXT: addl $12, %esp
; KNL_X32-NEXT: retl
diff --git a/test/CodeGen/X86/avx512-cvt.ll b/test/CodeGen/X86/avx512-cvt.ll
index c2eb19d166505..5e50a3aef2f29 100644
--- a/test/CodeGen/X86/avx512-cvt.ll
+++ b/test/CodeGen/X86/avx512-cvt.ll
@@ -740,8 +740,7 @@ define <16 x float> @sitofp_16i1_float(<16 x i32> %a) {
; KNL: ## BB#0:
; KNL-NEXT: vpxord %zmm1, %zmm1, %zmm1
; KNL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vcvtdq2ps %zmm0, %zmm0
; KNL-NEXT: retq
;
@@ -805,11 +804,10 @@ define <16 x double> @sitofp_16i1_double(<16 x double> %a) {
; KNL-NEXT: vpxord %zmm2, %zmm2, %zmm2
; KNL-NEXT: vcmpltpd %zmm1, %zmm2, %k1
; KNL-NEXT: vcmpltpd %zmm0, %zmm2, %k2
-; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
-; KNL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k2} {z}
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vcvtdq2pd %ymm0, %zmm0
-; KNL-NEXT: vmovdqa64 %zmm1, %zmm1 {%k1} {z}
+; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vpmovqd %zmm1, %ymm1
; KNL-NEXT: vcvtdq2pd %ymm1, %zmm1
; KNL-NEXT: retq
@@ -834,8 +832,7 @@ define <8 x double> @sitofp_8i1_double(<8 x double> %a) {
; KNL: ## BB#0:
; KNL-NEXT: vpxord %zmm1, %zmm1, %zmm1
; KNL-NEXT: vcmpltpd %zmm0, %zmm1, %k1
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vcvtdq2pd %ymm0, %zmm0
; KNL-NEXT: retq
@@ -858,8 +855,7 @@ define <8 x float> @sitofp_8i1_float(<8 x float> %a) {
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vxorps %ymm1, %ymm1, %ymm1
; KNL-NEXT: vcmpltps %zmm0, %zmm1, %k1
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vcvtdq2ps %ymm0, %ymm0
; KNL-NEXT: retq
diff --git a/test/CodeGen/X86/avx512-ext.ll b/test/CodeGen/X86/avx512-ext.ll
index 32bd0804d637f..03d6127ae5dce 100644
--- a/test/CodeGen/X86/avx512-ext.ll
+++ b/test/CodeGen/X86/avx512-ext.ll
@@ -345,9 +345,9 @@ define <8 x i32> @zext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind re
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
-; KNL-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; KNL-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; KNL-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; KNL-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; KNL-NEXT: vpxor %ymm0, %ymm0, %ymm0
+; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; KNL-NEXT: retq
;
@@ -369,9 +369,9 @@ define <8 x i32> @sext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind re
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
-; KNL-NEXT: vpmovsxbd (%rdi), %ymm0
-; KNL-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; KNL-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; KNL-NEXT: vpmovsxbd (%rdi), %ymm1
+; KNL-NEXT: vpxor %ymm0, %ymm0, %ymm0
+; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; KNL-NEXT: retq
;
@@ -704,9 +704,9 @@ define <8 x i32> @zext_8x16mem_to_8x32(<8 x i16> *%i , <8 x i1> %mask) nounwind
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
-; KNL-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; KNL-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; KNL-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; KNL-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; KNL-NEXT: vpxor %ymm0, %ymm0, %ymm0
+; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; KNL-NEXT: retq
;
@@ -728,9 +728,9 @@ define <8 x i32> @sext_8x16mem_to_8x32mask(<8 x i16> *%i , <8 x i1> %mask) nounw
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
-; KNL-NEXT: vpmovsxwd (%rdi), %ymm0
-; KNL-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; KNL-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; KNL-NEXT: vpmovsxwd (%rdi), %ymm1
+; KNL-NEXT: vpxor %ymm0, %ymm0, %ymm0
+; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; KNL-NEXT: retq
;
@@ -762,9 +762,9 @@ define <8 x i32> @zext_8x16_to_8x32mask(<8 x i16> %a , <8 x i1> %mask) nounwind
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
-; KNL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; KNL-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; KNL-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; KNL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; KNL-NEXT: vpxor %ymm0, %ymm0, %ymm0
+; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; KNL-NEXT: retq
;
@@ -1457,8 +1457,7 @@ define <16 x i32> @sext_16i1_16i32(<16 x i32> %a1, <16 x i32> %a2) nounwind {
; KNL-LABEL: sext_16i1_16i32:
; KNL: ## BB#0:
; KNL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: sext_16i1_16i32:
diff --git a/test/CodeGen/X86/avx512-insert-extract.ll b/test/CodeGen/X86/avx512-insert-extract.ll
index 26d14fa0840f5..cb8ed0e59a3a2 100644
--- a/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/test/CodeGen/X86/avx512-insert-extract.ll
@@ -365,11 +365,10 @@ define i16 @test16(i1 *%addr, i16 %a) {
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: kmovw %esi, %k2
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm1 {%k2} {z}
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15]
-; KNL-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; KNL-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; KNL-NEXT: vpslld $31, %zmm2, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -402,11 +401,10 @@ define i8 @test17(i1 *%addr, i8 %a) {
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: kmovw %esi, %k2
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm1 {%k2} {z}
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7]
-; KNL-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; KNL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -1242,30 +1240,29 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y)
; KNL-NEXT: vpextrd $1, %xmm0, %eax
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k2
-; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
-; KNL-NEXT: vmovdqa64 %zmm1, %zmm2 {%k2} {z}
+; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
; KNL-NEXT: vmovd %xmm0, %eax
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k2
-; KNL-NEXT: vmovdqa64 %zmm1, %zmm3 {%k2} {z}
-; KNL-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,8,2,3,4,5,6,7]
-; KNL-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
-; KNL-NEXT: vpsllq $63, %zmm4, %zmm2
-; KNL-NEXT: vptestmq %zmm2, %zmm2, %k2
-; KNL-NEXT: vmovdqa64 %zmm1, %zmm2 {%k2} {z}
-; KNL-NEXT: vmovdqa64 %zmm1, %zmm3 {%k1} {z}
-; KNL-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,8,3,4,5,6,7]
-; KNL-NEXT: vpermi2q %zmm3, %zmm2, %zmm4
-; KNL-NEXT: vpsllq $63, %zmm4, %zmm2
-; KNL-NEXT: vptestmq %zmm2, %zmm2, %k1
-; KNL-NEXT: vmovdqa64 %zmm1, %zmm2 {%k1} {z}
+; KNL-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
+; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
+; KNL-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
+; KNL-NEXT: vpsllq $63, %zmm3, %zmm1
+; KNL-NEXT: vptestmq %zmm1, %zmm1, %k2
+; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; KNL-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
+; KNL-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
+; KNL-NEXT: vpsllq $63, %zmm3, %zmm1
+; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
+; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vpextrd $3, %xmm0, %eax
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; KNL-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,2,8,4,5,6,7]
-; KNL-NEXT: vpermi2q %zmm0, %zmm2, %zmm1
-; KNL-NEXT: vpsllq $63, %zmm1, %zmm0
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
+; KNL-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: retq
@@ -1306,11 +1303,10 @@ define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y)
; KNL-NEXT: vmovq %xmm0, %rax
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k2
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm1 {%k2} {z}
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
-; KNL-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; KNL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
diff --git a/test/CodeGen/X86/avx512-mask-op.ll b/test/CodeGen/X86/avx512-mask-op.ll
index d48f63536e0e4..b127585dc87bc 100644
--- a/test/CodeGen/X86/avx512-mask-op.ll
+++ b/test/CodeGen/X86/avx512-mask-op.ll
@@ -344,8 +344,7 @@ define <16 x i8> @test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) {
; KNL-NEXT: LBB17_1:
; KNL-NEXT: vpcmpgtd %zmm2, %zmm0, %k1
; KNL-NEXT: LBB17_3:
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdb %zmm0, %xmm0
; KNL-NEXT: retq
;
@@ -382,8 +381,7 @@ define <16 x i1> @test9(<16 x i1>%a, <16 x i1>%b, i32 %a1, i32 %b1) {
; KNL-NEXT: LBB18_3:
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdb %zmm0, %xmm0
; KNL-NEXT: retq
;
@@ -472,8 +470,7 @@ define <16 x i1> @test15(i32 %x, i32 %y) {
; KNL-NEXT: movw $1, %cx
; KNL-NEXT: cmovgw %ax, %cx
; KNL-NEXT: kmovw %ecx, %k1
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdb %zmm0, %xmm0
; KNL-NEXT: retq
;
@@ -510,28 +507,27 @@ define <64 x i8> @test16(i64 %x) {
; KNL-NEXT: movl %edi, (%rsp)
; KNL-NEXT: shrq $32, %rdi
; KNL-NEXT: movl %edi, {{[0-9]+}}(%rsp)
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; KNL-NEXT: kmovw (%rsp), %k1
; KNL-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpmovdb %zmm0, %xmm0
+; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
; KNL-NEXT: vpmovdb %zmm1, %xmm1
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm2 {%k2} {z}
-; KNL-NEXT: vpmovdb %zmm2, %xmm2
-; KNL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm2
+; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; KNL-NEXT: movl $1, %eax
-; KNL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; KNL-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; KNL-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; KNL-NEXT: vpsllw $7, %ymm0, %ymm0
+; KNL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; KNL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; KNL-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; KNL-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
; KNL-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vpmovdb %zmm1, %xmm1
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k2} {z}
-; KNL-NEXT: vpmovdb %zmm0, %xmm0
-; KNL-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm1
-; KNL-NEXT: vpsllw $7, %ymm2, %ymm0
-; KNL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; KNL-NEXT: vpxor %ymm2, %ymm2, %ymm2
-; KNL-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0
+; KNL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
+; KNL-NEXT: vpmovdb %zmm2, %xmm2
+; KNL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
@@ -574,30 +570,29 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
; KNL-NEXT: movl %edi, (%rsp)
; KNL-NEXT: shrq $32, %rdi
; KNL-NEXT: movl %edi, {{[0-9]+}}(%rsp)
-; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
; KNL-NEXT: kmovw (%rsp), %k1
; KNL-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
-; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdb %zmm0, %xmm0
-; KNL-NEXT: vmovdqa32 %zmm1, %zmm2 {%k2} {z}
-; KNL-NEXT: vpmovdb %zmm2, %xmm2
-; KNL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; KNL-NEXT: vpmovdb %zmm1, %xmm1
+; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; KNL-NEXT: xorl %eax, %eax
; KNL-NEXT: cmpl %edx, %esi
; KNL-NEXT: setg %al
; KNL-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
+; KNL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; KNL-NEXT: vpsllw $7, %ymm0, %ymm0
; KNL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; KNL-NEXT: vpxor %ymm2, %ymm2, %ymm2
-; KNL-NEXT: vpcmpgtb %ymm0, %ymm2, %ymm0
+; KNL-NEXT: vpxor %ymm1, %ymm1, %ymm1
+; KNL-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
; KNL-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
; KNL-NEXT: kmovw {{[0-9]+}}(%rsp), %k2
-; KNL-NEXT: vmovdqa32 %zmm1, %zmm2 {%k1} {z}
-; KNL-NEXT: vpmovdb %zmm2, %xmm2
-; KNL-NEXT: vmovdqa32 %zmm1, %zmm1 {%k2} {z}
+; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vpmovdb %zmm1, %xmm1
-; KNL-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; KNL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
+; KNL-NEXT: vpmovdb %zmm2, %xmm2
+; KNL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
@@ -635,18 +630,17 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kshiftlw $6, %k2, %k2
; KNL-NEXT: kshiftrw $15, %k2, %k2
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} {z}
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm2 {%k2} {z}
-; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,5,8,7]
-; KNL-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
-; KNL-NEXT: vpsllq $63, %zmm3, %zmm1
-; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
+; KNL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
+; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: kshiftlw $1, %k1, %k1
; KNL-NEXT: kshiftrw $1, %k1, %k1
; KNL-NEXT: kshiftlw $7, %k0, %k0
; KNL-NEXT: korw %k0, %k1, %k1
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqw %zmm0, %xmm0
; KNL-NEXT: retq
;
@@ -1387,8 +1381,7 @@ define <8 x i64> @load_8i1(<8 x i1>* %a) {
; KNL: ## BB#0:
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: load_8i1:
@@ -1405,8 +1398,7 @@ define <16 x i32> @load_16i1(<16 x i1>* %a) {
; KNL-LABEL: load_16i1:
; KNL: ## BB#0:
; KNL-NEXT: kmovw (%rdi), %k1
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: retq
;
; SKX-LABEL: load_16i1:
@@ -1424,8 +1416,7 @@ define <2 x i16> @load_2i1(<2 x i1>* %a) {
; KNL: ## BB#0:
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; KNL-NEXT: retq
;
@@ -1444,8 +1435,7 @@ define <4 x i16> @load_4i1(<4 x i1>* %a) {
; KNL: ## BB#0:
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; KNL-NEXT: retq
@@ -1465,10 +1455,9 @@ define <32 x i16> @load_32i1(<32 x i1>* %a) {
; KNL: ## BB#0:
; KNL-NEXT: kmovw (%rdi), %k1
; KNL-NEXT: kmovw 2(%rdi), %k2
-; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
-; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: vmovdqa32 %zmm1, %zmm1 {%k2} {z}
+; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
; KNL-NEXT: vpmovdw %zmm1, %ymm1
; KNL-NEXT: retq
;
@@ -1489,17 +1478,16 @@ define <64 x i8> @load_64i1(<64 x i1>* %a) {
; KNL-NEXT: kmovw 2(%rdi), %k2
; KNL-NEXT: kmovw 4(%rdi), %k3
; KNL-NEXT: kmovw 6(%rdi), %k4
-; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1
-; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdb %zmm0, %xmm0
-; KNL-NEXT: vmovdqa32 %zmm1, %zmm2 {%k2} {z}
-; KNL-NEXT: vpmovdb %zmm2, %xmm2
-; KNL-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
-; KNL-NEXT: vmovdqa32 %zmm1, %zmm2 {%k3} {z}
-; KNL-NEXT: vpmovdb %zmm2, %xmm2
-; KNL-NEXT: vmovdqa32 %zmm1, %zmm1 {%k4} {z}
+; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
; KNL-NEXT: vpmovdb %zmm1, %xmm1
-; KNL-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; KNL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k3} {z}
+; KNL-NEXT: vpmovdb %zmm1, %xmm1
+; KNL-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 {%k4} {z}
+; KNL-NEXT: vpmovdb %zmm2, %xmm2
+; KNL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
; KNL-NEXT: retq
;
; SKX-LABEL: load_64i1:
diff --git a/test/CodeGen/X86/avx512-mov.ll b/test/CodeGen/X86/avx512-mov.ll
index 2a0de05608b46..9234ae838cffd 100644
--- a/test/CodeGen/X86/avx512-mov.ll
+++ b/test/CodeGen/X86/avx512-mov.ll
@@ -313,7 +313,7 @@ define <16 x i32> @test32(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x48,0x1f,0xca,0x04]
-; CHECK-NEXT: vpblendmd (%rdi), %zmm0, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x64,0x07]
+; CHECK-NEXT: vmovdqa32 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x49,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i32>*
@@ -327,7 +327,7 @@ define <16 x i32> @test33(i8 * %addr, <16 x i32> %old, <16 x i32> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x48,0x1f,0xca,0x04]
-; CHECK-NEXT: vpblendmd (%rdi), %zmm0, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x64,0x07]
+; CHECK-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7e,0x49,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i32>*
@@ -369,7 +369,7 @@ define <8 x i64> @test36(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x48,0x1f,0xca,0x04]
-; CHECK-NEXT: vpblendmq (%rdi), %zmm0, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x64,0x07]
+; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i64>*
@@ -383,7 +383,7 @@ define <8 x i64> @test37(i8 * %addr, <8 x i64> %old, <8 x i64> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x48,0x1f,0xca,0x04]
-; CHECK-NEXT: vpblendmq (%rdi), %zmm0, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x64,0x07]
+; CHECK-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfe,0x49,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i64>*
@@ -426,7 +426,7 @@ define <16 x float> @test40(i8 * %addr, <16 x float> %old, <16 x float> %mask1)
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
; CHECK-NEXT: vcmpordps %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0x74,0x48,0xc2,0xca,0x07]
; CHECK-NEXT: vcmpneqps %zmm2, %zmm1, %k1 {%k1} ## encoding: [0x62,0xf1,0x74,0x49,0xc2,0xca,0x04]
-; CHECK-NEXT: vblendmps (%rdi), %zmm0, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x65,0x07]
+; CHECK-NEXT: vmovaps (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = fcmp one <16 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x float>*
@@ -441,7 +441,7 @@ define <16 x float> @test41(i8 * %addr, <16 x float> %old, <16 x float> %mask1)
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
; CHECK-NEXT: vcmpordps %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0x74,0x48,0xc2,0xca,0x07]
; CHECK-NEXT: vcmpneqps %zmm2, %zmm1, %k1 {%k1} ## encoding: [0x62,0xf1,0x74,0x49,0xc2,0xca,0x04]
-; CHECK-NEXT: vblendmps (%rdi), %zmm0, %zmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x65,0x07]
+; CHECK-NEXT: vmovups (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x49,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = fcmp one <16 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x float>*
@@ -486,7 +486,7 @@ define <8 x double> @test44(i8 * %addr, <8 x double> %old, <8 x double> %mask1)
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
; CHECK-NEXT: vcmpordpd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0xf5,0x48,0xc2,0xca,0x07]
; CHECK-NEXT: vcmpneqpd %zmm2, %zmm1, %k1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x49,0xc2,0xca,0x04]
-; CHECK-NEXT: vblendmpd (%rdi), %zmm0, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x65,0x07]
+; CHECK-NEXT: vmovapd (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = fcmp one <8 x double> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x double>*
@@ -501,7 +501,7 @@ define <8 x double> @test45(i8 * %addr, <8 x double> %old, <8 x double> %mask1)
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2 ## encoding: [0x62,0xf1,0x6d,0x48,0xef,0xd2]
; CHECK-NEXT: vcmpordpd %zmm2, %zmm1, %k1 ## encoding: [0x62,0xf1,0xf5,0x48,0xc2,0xca,0x07]
; CHECK-NEXT: vcmpneqpd %zmm2, %zmm1, %k1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x49,0xc2,0xca,0x04]
-; CHECK-NEXT: vblendmpd (%rdi), %zmm0, %zmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x49,0x65,0x07]
+; CHECK-NEXT: vmovupd (%rdi), %zmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x49,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = fcmp one <8 x double> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x double>*
diff --git a/test/CodeGen/X86/avx512-regcall-NoMask.ll b/test/CodeGen/X86/avx512-regcall-NoMask.ll
index ce8fca036c91b..a29c1e4628a12 100644
--- a/test/CodeGen/X86/avx512-regcall-NoMask.ll
+++ b/test/CodeGen/X86/avx512-regcall-NoMask.ll
@@ -325,11 +325,13 @@ define x86_regcallcc [4 x i32]* @test_CallargRetPointer([4 x i32]* %a) {
}
; X32-LABEL: test_argRet128Vector:
-; X32: vpblend{{.*}} %xmm0, %xmm1, %xmm0
+; X32: vmovdqa{{.*}} %xmm0, %xmm1
+; X32: vmovdqa{{.*}} %xmm1, %xmm0
; X32: ret{{.*}}
; WIN64-LABEL: test_argRet128Vector:
-; WIN64: vpblend{{.*}} %xmm0, %xmm1, %xmm0
+; WIN64: vmovdqa{{.*}} %xmm0, %xmm1
+; WIN64: vmovdqa{{.*}} %xmm1, %xmm0
; WIN64: ret{{.*}}
; Test regcall when receiving/returning 128 bit vector
@@ -341,13 +343,13 @@ define x86_regcallcc <4 x i32> @test_argRet128Vector(<4 x i32> %a, <4 x i32> %b)
; X32-LABEL: test_CallargRet128Vector:
; X32: vmov{{.*}} %xmm0, {{%xmm([0-7])}}
; X32: call{{.*}} {{.*}}test_argRet128Vector
-; X32: vpblend{{.*}} {{%xmm([0-7])}}, %xmm0, %xmm0
+; X32: vmovdqa{{.*}} {{%xmm([0-7])}}, %xmm0
; X32: ret{{.*}}
; WIN64-LABEL: test_CallargRet128Vector:
; WIN64: vmov{{.*}} %xmm0, {{%xmm([0-9]+)}}
; WIN64: call{{.*}} {{.*}}test_argRet128Vector
-; WIN64: vpblend{{.*}} {{%xmm([0-9]+)}}, %xmm0, %xmm0
+; WIN64: vmovdqa{{.*}} {{%xmm([0-9]+)}}, %xmm0
; WIN64: ret{{.*}}
; Test regcall when passing/retrieving 128 bit vector
@@ -358,11 +360,13 @@ define x86_regcallcc <4 x i32> @test_CallargRet128Vector(<4 x i32> %a) {
}
; X32-LABEL: test_argRet256Vector:
-; X32: vpblend{{.*}} %ymm0, %ymm1, %ymm0
+; X32: vmovdqa{{.*}} %ymm0, %ymm1
+; X32: vmovdqa{{.*}} %ymm1, %ymm0
; X32: ret{{.*}}
; WIN64-LABEL: test_argRet256Vector:
-; WIN64: vpblend{{.*}} %ymm0, %ymm1, %ymm0
+; WIN64: vmovdqa{{.*}} %ymm0, %ymm1
+; WIN64: vmovdqa{{.*}} %ymm1, %ymm0
; WIN64: ret{{.*}}
; Test regcall when receiving/returning 256 bit vector
@@ -374,13 +378,13 @@ define x86_regcallcc <8 x i32> @test_argRet256Vector(<8 x i32> %a, <8 x i32> %b)
; X32-LABEL: test_CallargRet256Vector:
; X32: vmov{{.*}} %ymm0, %ymm1
; X32: call{{.*}} {{.*}}test_argRet256Vector
-; X32: vpblend{{.*}} %ymm1, %ymm0, %ymm0
+; X32: vmovdqa{{.*}} %ymm1, %ymm0
; X32: ret{{.*}}
; WIN64-LABEL: test_CallargRet256Vector:
; WIN64: vmov{{.*}} %ymm0, %ymm1
; WIN64: call{{.*}} {{.*}}test_argRet256Vector
-; WIN64: vpblend{{.*}} %ymm1, %ymm0, %ymm0
+; WIN64: vmovdqa{{.*}} %ymm1, %ymm0
; WIN64: ret{{.*}}
; Test regcall when passing/retrieving 256 bit vector
@@ -391,11 +395,13 @@ define x86_regcallcc <8 x i32> @test_CallargRet256Vector(<8 x i32> %a) {
}
; X32-LABEL: test_argRet512Vector:
-; X32: vpblend{{.*}} %zmm0, %zmm1, %zmm0
+; X32: vmovdqa{{.*}} %zmm0, %zmm1
+; X32: vmovdqa{{.*}} %zmm1, %zmm0
; X32: ret{{.*}}
; WIN64-LABEL: test_argRet512Vector:
-; WIN64: vpblend{{.*}} %zmm0, %zmm1, %zmm0
+; WIN64: vmovdqa{{.*}} %zmm0, %zmm1
+; WIN64: vmovdqa{{.*}} %zmm1, %zmm0
; WIN64: ret{{.*}}
; Test regcall when receiving/returning 512 bit vector
@@ -407,13 +413,13 @@ define x86_regcallcc <16 x i32> @test_argRet512Vector(<16 x i32> %a, <16 x i32>
; X32-LABEL: test_CallargRet512Vector:
; X32: vmov{{.*}} %zmm0, %zmm1
; X32: call{{.*}} {{.*}}test_argRet512Vector
-; X32: vpblend{{.*}} %zmm1, %zmm0, %zmm0
+; X32: movdqa{{.*}} %zmm1, %zmm0
; X32: ret{{.*}}
; WIN64-LABEL: test_CallargRet512Vector:
; WIN64: vmov{{.*}} %zmm0, %zmm1
; WIN64: call{{.*}} {{.*}}test_argRet512Vector
-; WIN64: vpblend{{.*}} %zmm1, %zmm0, %zmm0
+; WIN64: vmovdqa{{.*}} %zmm1, %zmm0
; WIN64: ret{{.*}}
; Test regcall when passing/retrieving 512 bit vector
diff --git a/test/CodeGen/X86/avx512-vbroadcast.ll b/test/CodeGen/X86/avx512-vbroadcast.ll
index 840239b9011a4..1991ee4f33768 100644
--- a/test/CodeGen/X86/avx512-vbroadcast.ll
+++ b/test/CodeGen/X86/avx512-vbroadcast.ll
@@ -218,8 +218,7 @@ define <16 x i32> @test_vbroadcast() {
; ALL: # BB#0: # %entry
; ALL-NEXT: vpxord %zmm0, %zmm0, %zmm0
; ALL-NEXT: vcmpunordps %zmm0, %zmm0, %k1
-; ALL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; ALL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; ALL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; ALL-NEXT: knotw %k1, %k1
; ALL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
; ALL-NEXT: retq
diff --git a/test/CodeGen/X86/avx512-vec-cmp.ll b/test/CodeGen/X86/avx512-vec-cmp.ll
index bd269ea87a350..361ee1ddbf9df 100644
--- a/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -6,7 +6,8 @@ define <16 x float> @test1(<16 x float> %x, <16 x float> %y) nounwind {
; CHECK-LABEL: test1:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpleps %zmm1, %zmm0, %k1
-; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovaps %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
%mask = fcmp ole <16 x float> %x, %y
%max = select <16 x i1> %mask, <16 x float> %x, <16 x float> %y
@@ -17,7 +18,8 @@ define <8 x double> @test2(<8 x double> %x, <8 x double> %y) nounwind {
; CHECK-LABEL: test2:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmplepd %zmm1, %zmm0, %k1
-; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovapd %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovapd %zmm1, %zmm0
; CHECK-NEXT: retq
%mask = fcmp ole <8 x double> %x, %y
%max = select <8 x i1> %mask, <8 x double> %x, <8 x double> %y
@@ -28,7 +30,8 @@ define <16 x i32> @test3(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %yp) nounwin
; CHECK-LABEL: test3:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd (%rdi), %zmm0, %k1
-; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%y = load <16 x i32>, <16 x i32>* %yp, align 4
%mask = icmp eq <16 x i32> %x, %y
@@ -40,7 +43,8 @@ define <16 x i32> @test4_unsigned(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1)
; CHECK-LABEL: test4_unsigned:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnltud %zmm1, %zmm0, %k1
-; CHECK-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %zmm2, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask = icmp uge <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x1, <16 x i32> %y
@@ -51,7 +55,8 @@ define <8 x i64> @test5(<8 x i64> %x, <8 x i64> %y) nounwind {
; CHECK-LABEL: test5:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k1
-; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask = icmp eq <8 x i64> %x, %y
%max = select <8 x i1> %mask, <8 x i64> %x, <8 x i64> %y
@@ -62,7 +67,8 @@ define <8 x i64> @test6_unsigned(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1) noun
; CHECK-LABEL: test6_unsigned:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
-; CHECK-NEXT: vpblendmq %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask = icmp ugt <8 x i64> %x, %y
%max = select <8 x i1> %mask, <8 x i64> %x1, <8 x i64> %y
@@ -81,7 +87,8 @@ define <4 x float> @test7(<4 x float> %a, <4 x float> %b) {
; SKX: ## BB#0:
; SKX-NEXT: vxorps %xmm2, %xmm2, %xmm2
; SKX-NEXT: vcmpltps %xmm2, %xmm0, %k1
-; SKX-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
+; SKX-NEXT: vmovaps %xmm0, %xmm1 {%k1}
+; SKX-NEXT: vmovaps %xmm1, %xmm0
; SKX-NEXT: retq
%mask = fcmp olt <4 x float> %a, zeroinitializer
@@ -101,7 +108,8 @@ define <2 x double> @test8(<2 x double> %a, <2 x double> %b) {
; SKX: ## BB#0:
; SKX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; SKX-NEXT: vcmpltpd %xmm2, %xmm0, %k1
-; SKX-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
+; SKX-NEXT: vmovapd %xmm0, %xmm1 {%k1}
+; SKX-NEXT: vmovapd %xmm1, %xmm0
; SKX-NEXT: retq
%mask = fcmp olt <2 x double> %a, zeroinitializer
%c = select <2 x i1>%mask, <2 x double>%a, <2 x double>%b
@@ -114,14 +122,15 @@ define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
-; KNL-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; KNL-NEXT: vmovdqa %ymm1, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test9:
; SKX: ## BB#0:
; SKX-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
-; SKX-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; SKX-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; SKX-NEXT: vmovdqa %ymm1, %ymm0
; SKX-NEXT: retq
%mask = icmp eq <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %y
@@ -134,14 +143,15 @@ define <8 x float> @test10(<8 x float> %x, <8 x float> %y) nounwind {
; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vcmpeqps %zmm1, %zmm0, %k1
-; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: vmovaps %zmm0, %zmm1 {%k1}
+; KNL-NEXT: vmovaps %ymm1, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test10:
; SKX: ## BB#0:
; SKX-NEXT: vcmpeqps %ymm1, %ymm0, %k1
-; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
+; SKX-NEXT: vmovaps %ymm0, %ymm1 {%k1}
+; SKX-NEXT: vmovaps %ymm1, %ymm0
; SKX-NEXT: retq
%mask = fcmp oeq <8 x float> %x, %y
@@ -658,9 +668,9 @@ define <16 x i32> @test13(<16 x float>%a, <16 x float>%b)
define <16 x i32> @test14(<16 x i32>%a, <16 x i32>%b) {
; CHECK-LABEL: test14:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm2
+; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm2
; CHECK-NEXT: vpcmpgtd %zmm0, %zmm2, %k1
-; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%sub_r = sub <16 x i32> %a, %b
%cmp.i2.i = icmp sgt <16 x i32> %sub_r, %a
@@ -673,9 +683,9 @@ define <16 x i32> @test14(<16 x i32>%a, <16 x i32>%b) {
define <8 x i64> @test15(<8 x i64>%a, <8 x i64>%b) {
; CHECK-LABEL: test15:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm2
+; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm2
; CHECK-NEXT: vpcmpgtq %zmm0, %zmm2, %k1
-; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%sub_r = sub <8 x i64> %a, %b
%cmp.i2.i = icmp sgt <8 x i64> %sub_r, %a
@@ -689,7 +699,8 @@ define <16 x i32> @test16(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1) nounwind
; CHECK-LABEL: test16:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k1
-; CHECK-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %zmm2, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask = icmp sge <16 x i32> %x, %y
%max = select <16 x i1> %mask, <16 x i32> %x1, <16 x i32> %y
@@ -700,7 +711,8 @@ define <16 x i32> @test17(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
; CHECK-LABEL: test17:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd (%rdi), %zmm0, %k1
-; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp sgt <16 x i32> %x, %y
@@ -712,7 +724,8 @@ define <16 x i32> @test18(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
; CHECK-LABEL: test18:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi), %zmm0, %k1
-; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp sle <16 x i32> %x, %y
@@ -724,7 +737,8 @@ define <16 x i32> @test19(<16 x i32> %x, <16 x i32> %x1, <16 x i32>* %y.ptr) nou
; CHECK-LABEL: test19:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleud (%rdi), %zmm0, %k1
-; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
%mask = icmp ule <16 x i32> %x, %y
@@ -737,7 +751,8 @@ define <16 x i32> @test20(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i3
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; CHECK-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp eq <16 x i32> %x1, %y1
%mask0 = icmp eq <16 x i32> %x, %y
@@ -751,7 +766,8 @@ define <8 x i64> @test21(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleq %zmm1, %zmm0, %k1
; CHECK-NEXT: vpcmpleq %zmm2, %zmm3, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %zmm0, %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <8 x i64> %x1, %y1
%mask0 = icmp sle <8 x i64> %x, %y
@@ -765,7 +781,8 @@ define <8 x i64> @test22(<8 x i64> %x, <8 x i64>* %y.ptr, <8 x i64> %x1, <8 x i6
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %zmm2, %zmm1, %k1
; CHECK-NEXT: vpcmpgtq (%rdi), %zmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sgt <8 x i64> %x1, %y1
%y = load <8 x i64>, <8 x i64>* %y.ptr, align 4
@@ -780,7 +797,8 @@ define <16 x i32> @test23(<16 x i32> %x, <16 x i32>* %y.ptr, <16 x i32> %x1, <16
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %zmm1, %zmm2, %k1
; CHECK-NEXT: vpcmpleud (%rdi), %zmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <16 x i32> %x1, %y1
%y = load <16 x i32>, <16 x i32>* %y.ptr, align 4
@@ -794,7 +812,8 @@ define <8 x i64> @test24(<8 x i64> %x, <8 x i64> %x1, i64* %yb.ptr) nounwind {
; CHECK-LABEL: test24:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k1
-; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <8 x i64> undef, i64 %yb, i32 0
@@ -808,7 +827,8 @@ define <16 x i32> @test25(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1) nounwind
; CHECK-LABEL: test25:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi){1to16}, %zmm0, %k1
-; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <16 x i32> undef, i32 %yb, i32 0
@@ -823,7 +843,8 @@ define <16 x i32> @test26(<16 x i32> %x, i32* %yb.ptr, <16 x i32> %x1, <16 x i32
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %zmm1, %zmm2, %k1
; CHECK-NEXT: vpcmpgtd (%rdi){1to16}, %zmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <16 x i32> %x1, %y1
%yb = load i32, i32* %yb.ptr, align 4
@@ -840,7 +861,8 @@ define <8 x i64> @test27(<8 x i64> %x, i64* %yb.ptr, <8 x i64> %x1, <8 x i64> %y
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleq %zmm1, %zmm2, %k1
; CHECK-NEXT: vpcmpleq (%rdi){1to8}, %zmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <8 x i64> %x1, %y1
%yb = load i64, i64* %yb.ptr, align 4
@@ -858,8 +880,7 @@ define <8 x i32>@test28(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y1
; KNL-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; KNL-NEXT: vpcmpgtq %zmm3, %zmm2, %k1
; KNL-NEXT: kxnorw %k1, %k0, %k1
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: retq
;
@@ -883,8 +904,7 @@ define <16 x i8>@test29(<16 x i32> %x, <16 x i32> %y, <16 x i32> %x1, <16 x i32>
; KNL-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; KNL-NEXT: vpcmpgtd %zmm3, %zmm2, %k1
; KNL-NEXT: kxorw %k1, %k0, %k1
-; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdb %zmm0, %xmm0
; KNL-NEXT: retq
;
@@ -912,7 +932,8 @@ define <4 x double> @test30(<4 x double> %x, <4 x double> %y) nounwind {
; SKX-LABEL: test30:
; SKX: ## BB#0:
; SKX-NEXT: vcmpeqpd %ymm1, %ymm0, %k1
-; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
+; SKX-NEXT: vmovapd %ymm0, %ymm1 {%k1}
+; SKX-NEXT: vmovapd %ymm1, %ymm0
; SKX-NEXT: retq
%mask = fcmp oeq <4 x double> %x, %y
@@ -930,7 +951,8 @@ define <2 x double> @test31(<2 x double> %x, <2 x double> %x1, <2 x double>* %yp
; SKX-LABEL: test31:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltpd (%rdi), %xmm0, %k1
-; SKX-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
+; SKX-NEXT: vmovapd %xmm0, %xmm1 {%k1}
+; SKX-NEXT: vmovapd %xmm1, %xmm0
; SKX-NEXT: retq
%y = load <2 x double>, <2 x double>* %yp, align 4
@@ -949,7 +971,8 @@ define <4 x double> @test32(<4 x double> %x, <4 x double> %x1, <4 x double>* %yp
; SKX-LABEL: test32:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltpd (%rdi), %ymm0, %k1
-; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
+; SKX-NEXT: vmovapd %ymm0, %ymm1 {%k1}
+; SKX-NEXT: vmovapd %ymm1, %ymm0
; SKX-NEXT: retq
%y = load <4 x double>, <4 x double>* %yp, align 4
@@ -962,7 +985,8 @@ define <8 x double> @test33(<8 x double> %x, <8 x double> %x1, <8 x double>* %yp
; CHECK-LABEL: test33:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpltpd (%rdi), %zmm0, %k1
-; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovapd %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovapd %zmm1, %zmm0
; CHECK-NEXT: retq
%y = load <8 x double>, <8 x double>* %yp, align 4
%mask = fcmp olt <8 x double> %x, %y
@@ -980,7 +1004,8 @@ define <4 x float> @test34(<4 x float> %x, <4 x float> %x1, <4 x float>* %yp) no
; SKX-LABEL: test34:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltps (%rdi), %xmm0, %k1
-; SKX-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
+; SKX-NEXT: vmovaps %xmm0, %xmm1 {%k1}
+; SKX-NEXT: vmovaps %xmm1, %xmm0
; SKX-NEXT: retq
%y = load <4 x float>, <4 x float>* %yp, align 4
%mask = fcmp olt <4 x float> %x, %y
@@ -995,14 +1020,15 @@ define <8 x float> @test35(<8 x float> %x, <8 x float> %x1, <8 x float>* %yp) no
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vmovups (%rdi), %ymm2
; KNL-NEXT: vcmpltps %zmm2, %zmm0, %k1
-; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: vmovaps %zmm0, %zmm1 {%k1}
+; KNL-NEXT: vmovaps %ymm1, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test35:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltps (%rdi), %ymm0, %k1
-; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
+; SKX-NEXT: vmovaps %ymm0, %ymm1 {%k1}
+; SKX-NEXT: vmovaps %ymm1, %ymm0
; SKX-NEXT: retq
%y = load <8 x float>, <8 x float>* %yp, align 4
@@ -1015,7 +1041,8 @@ define <16 x float> @test36(<16 x float> %x, <16 x float> %x1, <16 x float>* %yp
; CHECK-LABEL: test36:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpltps (%rdi), %zmm0, %k1
-; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovaps %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
%y = load <16 x float>, <16 x float>* %yp, align 4
%mask = fcmp olt <16 x float> %x, %y
@@ -1027,7 +1054,8 @@ define <8 x double> @test37(<8 x double> %x, <8 x double> %x1, double* %ptr) nou
; CHECK-LABEL: test37:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpltpd (%rdi){1to8}, %zmm0, %k1
-; CHECK-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovapd %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovapd %zmm1, %zmm0
; CHECK-NEXT: retq
%a = load double, double* %ptr
@@ -1050,7 +1078,8 @@ define <4 x double> @test38(<4 x double> %x, <4 x double> %x1, double* %ptr) nou
; SKX-LABEL: test38:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltpd (%rdi){1to4}, %ymm0, %k1
-; SKX-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
+; SKX-NEXT: vmovapd %ymm0, %ymm1 {%k1}
+; SKX-NEXT: vmovapd %ymm1, %ymm0
; SKX-NEXT: retq
%a = load double, double* %ptr
@@ -1073,7 +1102,8 @@ define <2 x double> @test39(<2 x double> %x, <2 x double> %x1, double* %ptr) nou
; SKX-LABEL: test39:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltpd (%rdi){1to2}, %xmm0, %k1
-; SKX-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
+; SKX-NEXT: vmovapd %xmm0, %xmm1 {%k1}
+; SKX-NEXT: vmovapd %xmm1, %xmm0
; SKX-NEXT: retq
%a = load double, double* %ptr
@@ -1090,7 +1120,8 @@ define <16 x float> @test40(<16 x float> %x, <16 x float> %x1, float* %ptr) n
; CHECK-LABEL: test40:
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpltps (%rdi){1to16}, %zmm0, %k1
-; CHECK-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovaps %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovaps %zmm1, %zmm0
; CHECK-NEXT: retq
%a = load float, float* %ptr
@@ -1109,14 +1140,15 @@ define <8 x float> @test41(<8 x float> %x, <8 x float> %x1, float* %ptr) noun
; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
; KNL-NEXT: vbroadcastss (%rdi), %ymm2
; KNL-NEXT: vcmpltps %zmm2, %zmm0, %k1
-; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: vmovaps %zmm0, %zmm1 {%k1}
+; KNL-NEXT: vmovaps %ymm1, %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test41:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltps (%rdi){1to8}, %ymm0, %k1
-; SKX-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
+; SKX-NEXT: vmovaps %ymm0, %ymm1 {%k1}
+; SKX-NEXT: vmovaps %ymm1, %ymm0
; SKX-NEXT: retq
%a = load float, float* %ptr
@@ -1139,7 +1171,8 @@ define <4 x float> @test42(<4 x float> %x, <4 x float> %x1, float* %ptr) noun
; SKX-LABEL: test42:
; SKX: ## BB#0:
; SKX-NEXT: vcmpltps (%rdi){1to4}, %xmm0, %k1
-; SKX-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
+; SKX-NEXT: vmovaps %xmm0, %xmm1 {%k1}
+; SKX-NEXT: vmovaps %xmm1, %xmm0
; SKX-NEXT: retq
%a = load float, float* %ptr
@@ -1158,7 +1191,8 @@ define <8 x double> @test43(<8 x double> %x, <8 x double> %x1, double* %ptr,<8 x
; KNL-NEXT: vpsllq $63, %zmm2, %zmm2
; KNL-NEXT: vptestmq %zmm2, %zmm2, %k1
; KNL-NEXT: vcmpltpd (%rdi){1to8}, %zmm0, %k1 {%k1}
-; KNL-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
+; KNL-NEXT: vmovapd %zmm0, %zmm1 {%k1}
+; KNL-NEXT: vmovapd %zmm1, %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test43:
@@ -1166,7 +1200,8 @@ define <8 x double> @test43(<8 x double> %x, <8 x double> %x1, double* %ptr,<8 x
; SKX-NEXT: vpsllw $15, %xmm2, %xmm2
; SKX-NEXT: vpmovw2m %xmm2, %k1
; SKX-NEXT: vcmpltpd (%rdi){1to8}, %zmm0, %k1 {%k1}
-; SKX-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
+; SKX-NEXT: vmovapd %zmm0, %zmm1 {%k1}
+; SKX-NEXT: vmovapd %zmm1, %zmm0
; SKX-NEXT: retq
%a = load double, double* %ptr
diff --git a/test/CodeGen/X86/avx512bw-mov.ll b/test/CodeGen/X86/avx512bw-mov.ll
index c58b3cc8c3cd7..11bb431414a00 100644
--- a/test/CodeGen/X86/avx512bw-mov.ll
+++ b/test/CodeGen/X86/avx512bw-mov.ll
@@ -26,7 +26,7 @@ define <64 x i8> @test3(i8 * %addr, <64 x i8> %old, <64 x i8> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpcmpneqb %zmm2, %zmm1, %k1
-; CHECK-NEXT: vpblendmb (%rdi), %zmm0, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ne <64 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <64 x i8>*
@@ -74,7 +74,7 @@ define <32 x i16> @test7(i8 * %addr, <32 x i16> %old, <32 x i16> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpcmpneqw %zmm2, %zmm1, %k1
-; CHECK-NEXT: vpblendmw (%rdi), %zmm0, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ne <32 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <32 x i16>*
diff --git a/test/CodeGen/X86/avx512bw-vec-cmp.ll b/test/CodeGen/X86/avx512bw-vec-cmp.ll
index 016837e613072..34432468921b0 100644
--- a/test/CodeGen/X86/avx512bw-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512bw-vec-cmp.ll
@@ -5,7 +5,8 @@ define <64 x i8> @test1(<64 x i8> %x, <64 x i8> %y) nounwind {
; CHECK-LABEL: test1:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqb %zmm1, %zmm0, %k1
-; CHECK-NEXT: vpblendmb %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask = icmp eq <64 x i8> %x, %y
%max = select <64 x i1> %mask, <64 x i8> %x, <64 x i8> %y
@@ -16,7 +17,8 @@ define <64 x i8> @test2(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1) nounwind {
; CHECK-LABEL: test2:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
-; CHECK-NEXT: vpblendmb %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %zmm2, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask = icmp sgt <64 x i8> %x, %y
%max = select <64 x i1> %mask, <64 x i8> %x1, <64 x i8> %y
@@ -27,7 +29,8 @@ define <32 x i16> @test3(<32 x i16> %x, <32 x i16> %y, <32 x i16> %x1) nounwind
; CHECK-LABEL: test3:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew %zmm0, %zmm1, %k1
-; CHECK-NEXT: vpblendmw %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %zmm2, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask = icmp sge <32 x i16> %x, %y
%max = select <32 x i1> %mask, <32 x i16> %x1, <32 x i16> %y
@@ -38,7 +41,8 @@ define <64 x i8> @test4(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1) nounwind {
; CHECK-LABEL: test4:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnleub %zmm1, %zmm0, %k1
-; CHECK-NEXT: vpblendmb %zmm2, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %zmm2, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask = icmp ugt <64 x i8> %x, %y
%max = select <64 x i1> %mask, <64 x i8> %x1, <64 x i8> %y
@@ -49,7 +53,8 @@ define <32 x i16> @test5(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %yp) nounwin
; CHECK-LABEL: test5:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw (%rdi), %zmm0, %k1
-; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%y = load <32 x i16>, <32 x i16>* %yp, align 4
%mask = icmp eq <32 x i16> %x, %y
@@ -61,7 +66,8 @@ define <32 x i16> @test6(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) noun
; CHECK-LABEL: test6:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtw (%rdi), %zmm0, %k1
-; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
%mask = icmp sgt <32 x i16> %x, %y
@@ -73,7 +79,8 @@ define <32 x i16> @test7(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) noun
; CHECK-LABEL: test7:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew (%rdi), %zmm0, %k1
-; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
%mask = icmp sle <32 x i16> %x, %y
@@ -85,7 +92,8 @@ define <32 x i16> @test8(<32 x i16> %x, <32 x i16> %x1, <32 x i16>* %y.ptr) noun
; CHECK-LABEL: test8:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleuw (%rdi), %zmm0, %k1
-; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
%mask = icmp ule <32 x i16> %x, %y
@@ -98,7 +106,8 @@ define <32 x i16> @test9(<32 x i16> %x, <32 x i16> %y, <32 x i16> %x1, <32 x i16
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw %zmm1, %zmm0, %k1
; CHECK-NEXT: vpcmpeqw %zmm3, %zmm2, %k1 {%k1}
-; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp eq <32 x i16> %x1, %y1
%mask0 = icmp eq <32 x i16> %x, %y
@@ -112,7 +121,8 @@ define <64 x i8> @test10(<64 x i8> %x, <64 x i8> %y, <64 x i8> %x1, <64 x i8> %y
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleb %zmm1, %zmm0, %k1
; CHECK-NEXT: vpcmpleb %zmm2, %zmm3, %k1 {%k1}
-; CHECK-NEXT: vpblendmb %zmm0, %zmm2, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %zmm0, %zmm2 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <64 x i8> %x1, %y1
%mask0 = icmp sle <64 x i8> %x, %y
@@ -126,7 +136,8 @@ define <64 x i8> @test11(<64 x i8> %x, <64 x i8>* %y.ptr, <64 x i8> %x1, <64 x i
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtb %zmm2, %zmm1, %k1
; CHECK-NEXT: vpcmpgtb (%rdi), %zmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmb %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sgt <64 x i8> %x1, %y1
%y = load <64 x i8>, <64 x i8>* %y.ptr, align 4
@@ -141,7 +152,8 @@ define <32 x i16> @test12(<32 x i16> %x, <32 x i16>* %y.ptr, <32 x i16> %x1, <32
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew %zmm1, %zmm2, %k1
; CHECK-NEXT: vpcmpleuw (%rdi), %zmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmw %zmm0, %zmm1, %zmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %zmm0, %zmm1 {%k1}
+; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <32 x i16> %x1, %y1
%y = load <32 x i16>, <32 x i16>* %y.ptr, align 4
diff --git a/test/CodeGen/X86/avx512bwvl-mov.ll b/test/CodeGen/X86/avx512bwvl-mov.ll
index 209f18ba7f9c7..3f92641a3e165 100644
--- a/test/CodeGen/X86/avx512bwvl-mov.ll
+++ b/test/CodeGen/X86/avx512bwvl-mov.ll
@@ -26,7 +26,7 @@ define <32 x i8> @test_256_3(i8 * %addr, <32 x i8> %old, <32 x i8> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
; CHECK-NEXT: vpcmpneqb %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x3f,0xca,0x04]
-; CHECK-NEXT: vpblendmb (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x66,0x07]
+; CHECK-NEXT: vmovdqu8 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7f,0x29,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <32 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <32 x i8>*
@@ -74,7 +74,7 @@ define <16 x i16> @test_256_7(i8 * %addr, <16 x i16> %old, <16 x i16> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
; CHECK-NEXT: vpcmpneqw %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x3f,0xca,0x04]
-; CHECK-NEXT: vpblendmw (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x66,0x07]
+; CHECK-NEXT: vmovdqu16 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xff,0x29,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <16 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i16>*
@@ -122,7 +122,7 @@ define <16 x i8> @test_128_3(i8 * %addr, <16 x i8> %old, <16 x i8> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqb %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x3f,0xca,0x04]
-; CHECK-NEXT: vpblendmb (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x66,0x07]
+; CHECK-NEXT: vmovdqu8 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7f,0x09,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <16 x i8> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <16 x i8>*
@@ -170,7 +170,7 @@ define <8 x i16> @test_128_7(i8 * %addr, <8 x i16> %old, <8 x i16> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqw %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x3f,0xca,0x04]
-; CHECK-NEXT: vpblendmw (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x66,0x07]
+; CHECK-NEXT: vmovdqu16 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i16> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i16>*
diff --git a/test/CodeGen/X86/avx512bwvl-vec-cmp.ll b/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
index 17e581bbb5019..3e7f0acae78b5 100644
--- a/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
@@ -5,7 +5,8 @@ define <32 x i8> @test256_1(<32 x i8> %x, <32 x i8> %y) nounwind {
; CHECK-LABEL: test256_1:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqb %ymm1, %ymm0, %k1
-; CHECK-NEXT: vpblendmb %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%mask = icmp eq <32 x i8> %x, %y
%max = select <32 x i1> %mask, <32 x i8> %x, <32 x i8> %y
@@ -16,7 +17,8 @@ define <32 x i8> @test256_2(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1) nounwind
; CHECK-LABEL: test256_2:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtb %ymm1, %ymm0, %k1
-; CHECK-NEXT: vpblendmb %ymm0, %ymm2, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %ymm0, %ymm2 {%k1}
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0
; CHECK-NEXT: retq
%mask = icmp sgt <32 x i8> %x, %y
%max = select <32 x i1> %mask, <32 x i8> %x, <32 x i8> %x1
@@ -27,7 +29,8 @@ define <16 x i16> @test256_3(<16 x i16> %x, <16 x i16> %y, <16 x i16> %x1) nounw
; CHECK-LABEL: test256_3:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew %ymm0, %ymm1, %k1
-; CHECK-NEXT: vpblendmw %ymm2, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %ymm2, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%mask = icmp sge <16 x i16> %x, %y
%max = select <16 x i1> %mask, <16 x i16> %x1, <16 x i16> %y
@@ -38,7 +41,8 @@ define <32 x i8> @test256_4(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1) nounwind
; CHECK-LABEL: test256_4:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnleub %ymm1, %ymm0, %k1
-; CHECK-NEXT: vpblendmb %ymm0, %ymm2, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %ymm0, %ymm2 {%k1}
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0
; CHECK-NEXT: retq
%mask = icmp ugt <32 x i8> %x, %y
%max = select <32 x i1> %mask, <32 x i8> %x, <32 x i8> %x1
@@ -49,7 +53,8 @@ define <16 x i16> @test256_5(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %yp) nou
; CHECK-LABEL: test256_5:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <16 x i16>, <16 x i16>* %yp, align 4
%mask = icmp eq <16 x i16> %x, %y
@@ -61,7 +66,8 @@ define <16 x i16> @test256_6(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr)
; CHECK-LABEL: test256_6:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtw (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
%mask = icmp sgt <16 x i16> %x, %y
@@ -73,7 +79,8 @@ define <16 x i16> @test256_7(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr)
; CHECK-LABEL: test256_7:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
%mask = icmp sle <16 x i16> %x, %y
@@ -85,7 +92,8 @@ define <16 x i16> @test256_8(<16 x i16> %x, <16 x i16> %x1, <16 x i16>* %y.ptr)
; CHECK-LABEL: test256_8:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleuw (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
%mask = icmp ule <16 x i16> %x, %y
@@ -98,7 +106,8 @@ define <16 x i16> @test256_9(<16 x i16> %x, <16 x i16> %y, <16 x i16> %x1, <16 x
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k1
; CHECK-NEXT: vpcmpeqw %ymm3, %ymm2, %k1 {%k1}
-; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%mask1 = icmp eq <16 x i16> %x1, %y1
%mask0 = icmp eq <16 x i16> %x, %y
@@ -112,7 +121,8 @@ define <32 x i8> @test256_10(<32 x i8> %x, <32 x i8> %y, <32 x i8> %x1, <32 x i8
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleb %ymm1, %ymm0, %k1
; CHECK-NEXT: vpcmpleb %ymm2, %ymm3, %k1 {%k1}
-; CHECK-NEXT: vpblendmb %ymm0, %ymm2, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %ymm0, %ymm2 {%k1}
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0
; CHECK-NEXT: retq
%mask1 = icmp sge <32 x i8> %x1, %y1
%mask0 = icmp sle <32 x i8> %x, %y
@@ -126,7 +136,8 @@ define <32 x i8> @test256_11(<32 x i8> %x, <32 x i8>* %y.ptr, <32 x i8> %x1, <32
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtb %ymm2, %ymm1, %k1
; CHECK-NEXT: vpcmpgtb (%rdi), %ymm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmb %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%mask1 = icmp sgt <32 x i8> %x1, %y1
%y = load <32 x i8>, <32 x i8>* %y.ptr, align 4
@@ -141,7 +152,8 @@ define <16 x i16> @test256_12(<16 x i16> %x, <16 x i16>* %y.ptr, <16 x i16> %x1,
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew %ymm1, %ymm2, %k1
; CHECK-NEXT: vpcmpleuw (%rdi), %ymm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmw %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%mask1 = icmp sge <16 x i16> %x1, %y1
%y = load <16 x i16>, <16 x i16>* %y.ptr, align 4
@@ -155,7 +167,8 @@ define <16 x i8> @test128_1(<16 x i8> %x, <16 x i8> %y) nounwind {
; CHECK-LABEL: test128_1:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k1
-; CHECK-NEXT: vpblendmb %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%mask = icmp eq <16 x i8> %x, %y
%max = select <16 x i1> %mask, <16 x i8> %x, <16 x i8> %y
@@ -166,7 +179,8 @@ define <16 x i8> @test128_2(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1) nounwind
; CHECK-LABEL: test128_2:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
-; CHECK-NEXT: vpblendmb %xmm0, %xmm2, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
; CHECK-NEXT: retq
%mask = icmp sgt <16 x i8> %x, %y
%max = select <16 x i1> %mask, <16 x i8> %x, <16 x i8> %x1
@@ -177,7 +191,8 @@ define <8 x i16> @test128_3(<8 x i16> %x, <8 x i16> %y, <8 x i16> %x1) nounwind
; CHECK-LABEL: test128_3:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew %xmm0, %xmm1, %k1
-; CHECK-NEXT: vpblendmw %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %xmm2, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%mask = icmp sge <8 x i16> %x, %y
%max = select <8 x i1> %mask, <8 x i16> %x1, <8 x i16> %y
@@ -188,7 +203,8 @@ define <16 x i8> @test128_4(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1) nounwind
; CHECK-LABEL: test128_4:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnleub %xmm1, %xmm0, %k1
-; CHECK-NEXT: vpblendmb %xmm0, %xmm2, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
; CHECK-NEXT: retq
%mask = icmp ugt <16 x i8> %x, %y
%max = select <16 x i1> %mask, <16 x i8> %x, <16 x i8> %x1
@@ -199,7 +215,8 @@ define <8 x i16> @test128_5(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %yp) nounwin
; CHECK-LABEL: test128_5:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <8 x i16>, <8 x i16>* %yp, align 4
%mask = icmp eq <8 x i16> %x, %y
@@ -211,7 +228,8 @@ define <8 x i16> @test128_6(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) noun
; CHECK-LABEL: test128_6:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtw (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
%mask = icmp sgt <8 x i16> %x, %y
@@ -223,7 +241,8 @@ define <8 x i16> @test128_7(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) noun
; CHECK-LABEL: test128_7:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
%mask = icmp sle <8 x i16> %x, %y
@@ -235,7 +254,8 @@ define <8 x i16> @test128_8(<8 x i16> %x, <8 x i16> %x1, <8 x i16>* %y.ptr) noun
; CHECK-LABEL: test128_8:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleuw (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
%mask = icmp ule <8 x i16> %x, %y
@@ -248,7 +268,8 @@ define <8 x i16> @test128_9(<8 x i16> %x, <8 x i16> %y, <8 x i16> %x1, <8 x i16>
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k1
; CHECK-NEXT: vpcmpeqw %xmm3, %xmm2, %k1 {%k1}
-; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%mask1 = icmp eq <8 x i16> %x1, %y1
%mask0 = icmp eq <8 x i16> %x, %y
@@ -262,7 +283,8 @@ define <16 x i8> @test128_10(<16 x i8> %x, <16 x i8> %y, <16 x i8> %x1, <16 x i8
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleb %xmm1, %xmm0, %k1
; CHECK-NEXT: vpcmpleb %xmm2, %xmm3, %k1 {%k1}
-; CHECK-NEXT: vpblendmb %xmm0, %xmm2, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <16 x i8> %x1, %y1
%mask0 = icmp sle <16 x i8> %x, %y
@@ -276,7 +298,8 @@ define <16 x i8> @test128_11(<16 x i8> %x, <16 x i8>* %y.ptr, <16 x i8> %x1, <16
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtb %xmm2, %xmm1, %k1
; CHECK-NEXT: vpcmpgtb (%rdi), %xmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmb %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqu8 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%mask1 = icmp sgt <16 x i8> %x1, %y1
%y = load <16 x i8>, <16 x i8>* %y.ptr, align 4
@@ -291,7 +314,8 @@ define <8 x i16> @test128_12(<8 x i16> %x, <8 x i16>* %y.ptr, <8 x i16> %x1, <8
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmplew %xmm1, %xmm2, %k1
; CHECK-NEXT: vpcmpleuw (%rdi), %xmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqu16 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <8 x i16> %x1, %y1
%y = load <8 x i16>, <8 x i16>* %y.ptr, align 4
diff --git a/test/CodeGen/X86/avx512vl-mov.ll b/test/CodeGen/X86/avx512vl-mov.ll
index e37fd76377e39..af449d6628c46 100644
--- a/test/CodeGen/X86/avx512vl-mov.ll
+++ b/test/CodeGen/X86/avx512vl-mov.ll
@@ -166,7 +166,7 @@ define <8 x i32> @test_256_17(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
-; CHECK-NEXT: vpblendmd (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x64,0x07]
+; CHECK-NEXT: vmovdqa32 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i32>*
@@ -180,7 +180,7 @@ define <8 x i32> @test_256_18(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04]
-; CHECK-NEXT: vpblendmd (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x64,0x07]
+; CHECK-NEXT: vmovdqu32 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <8 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x i32>*
@@ -222,7 +222,7 @@ define <4 x i64> @test_256_21(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
-; CHECK-NEXT: vpblendmq (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x64,0x07]
+; CHECK-NEXT: vmovdqa64 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i64>*
@@ -236,7 +236,7 @@ define <4 x i64> @test_256_22(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
-; CHECK-NEXT: vpblendmq (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x64,0x07]
+; CHECK-NEXT: vmovdqu64 (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfe,0x29,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i64>*
@@ -279,7 +279,7 @@ define <8 x float> @test_256_25(i8 * %addr, <8 x float> %old, <8 x float> %mask1
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
; CHECK-NEXT: vcmpordps %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf1,0x74,0x28,0xc2,0xca,0x07]
; CHECK-NEXT: vcmpneqps %ymm2, %ymm1, %k1 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0xc2,0xca,0x04]
-; CHECK-NEXT: vblendmps (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x65,0x07]
+; CHECK-NEXT: vmovaps (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = fcmp one <8 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x float>*
@@ -294,7 +294,7 @@ define <8 x float> @test_256_26(i8 * %addr, <8 x float> %old, <8 x float> %mask1
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
; CHECK-NEXT: vcmpordps %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf1,0x74,0x28,0xc2,0xca,0x07]
; CHECK-NEXT: vcmpneqps %ymm2, %ymm1, %k1 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0xc2,0xca,0x04]
-; CHECK-NEXT: vblendmps (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x65,0x07]
+; CHECK-NEXT: vmovups (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x29,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = fcmp one <8 x float> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <8 x float>*
@@ -338,7 +338,7 @@ define <4 x double> @test_256_29(i8 * %addr, <4 x double> %old, <4 x i64> %mask1
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
-; CHECK-NEXT: vblendmpd (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x65,0x07]
+; CHECK-NEXT: vmovapd (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x double>*
@@ -352,7 +352,7 @@ define <4 x double> @test_256_30(i8 * %addr, <4 x double> %old, <4 x i64> %mask1
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04]
-; CHECK-NEXT: vblendmpd (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x65,0x07]
+; CHECK-NEXT: vmovupd (%rdi), %ymm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x double>*
@@ -554,7 +554,7 @@ define <4 x i32> @test_128_17(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
-; CHECK-NEXT: vpblendmd (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x64,0x07]
+; CHECK-NEXT: vmovdqa32 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i32>*
@@ -568,7 +568,7 @@ define <4 x i32> @test_128_18(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
-; CHECK-NEXT: vpblendmd (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x64,0x07]
+; CHECK-NEXT: vmovdqu32 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x i32>*
@@ -610,7 +610,7 @@ define <2 x i64> @test_128_21(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04]
-; CHECK-NEXT: vpblendmq (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x64,0x07]
+; CHECK-NEXT: vmovdqa64 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x i64>*
@@ -624,7 +624,7 @@ define <2 x i64> @test_128_22(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) {
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04]
-; CHECK-NEXT: vpblendmq (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x64,0x07]
+; CHECK-NEXT: vmovdqu64 (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfe,0x09,0x6f,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x i64>*
@@ -666,7 +666,7 @@ define <4 x float> @test_128_25(i8 * %addr, <4 x float> %old, <4 x i32> %mask1)
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
-; CHECK-NEXT: vblendmps (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x65,0x07]
+; CHECK-NEXT: vmovaps (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x float>*
@@ -680,7 +680,7 @@ define <4 x float> @test_128_26(i8 * %addr, <4 x float> %old, <4 x i32> %mask1)
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04]
-; CHECK-NEXT: vblendmps (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x65,0x07]
+; CHECK-NEXT: vmovups (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7c,0x09,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <4 x i32> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <4 x float>*
@@ -722,7 +722,7 @@ define <2 x double> @test_128_29(i8 * %addr, <2 x double> %old, <2 x i64> %mask1
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04]
-; CHECK-NEXT: vblendmpd (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x65,0x07]
+; CHECK-NEXT: vmovapd (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x28,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x double>*
@@ -736,7 +736,7 @@ define <2 x double> @test_128_30(i8 * %addr, <2 x double> %old, <2 x i64> %mask1
; CHECK: ## BB#0:
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2]
; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04]
-; CHECK-NEXT: vblendmpd (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x65,0x07]
+; CHECK-NEXT: vmovupd (%rdi), %xmm0 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0x10,0x07]
; CHECK-NEXT: retq ## encoding: [0xc3]
%mask = icmp ne <2 x i64> %mask1, zeroinitializer
%vaddr = bitcast i8* %addr to <2 x double>*
diff --git a/test/CodeGen/X86/avx512vl-vec-cmp.ll b/test/CodeGen/X86/avx512vl-vec-cmp.ll
index e0acf2be653e2..25b9cc79096fe 100644
--- a/test/CodeGen/X86/avx512vl-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512vl-vec-cmp.ll
@@ -5,7 +5,8 @@ define <4 x i64> @test256_1(<4 x i64> %x, <4 x i64> %y) nounwind {
; CHECK-LABEL: test256_1:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k1
-; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%mask = icmp eq <4 x i64> %x, %y
%max = select <4 x i1> %mask, <4 x i64> %x, <4 x i64> %y
@@ -16,7 +17,8 @@ define <4 x i64> @test256_2(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1) nounwind
; CHECK-LABEL: test256_2:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k1
-; CHECK-NEXT: vpblendmq %ymm2, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %ymm2, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%mask = icmp sgt <4 x i64> %x, %y
%max = select <4 x i1> %mask, <4 x i64> %x1, <4 x i64> %y
@@ -27,7 +29,8 @@ define <8 x i32> @test256_3(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1) nounwind
; CHECK-LABEL: test256_3:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %ymm0, %ymm1, %k1
-; CHECK-NEXT: vpblendmd %ymm2, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm2, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%mask = icmp sge <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x1, <8 x i32> %y
@@ -38,7 +41,8 @@ define <4 x i64> @test256_4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1) nounwind
; CHECK-LABEL: test256_4:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnleuq %ymm1, %ymm0, %k1
-; CHECK-NEXT: vpblendmq %ymm2, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %ymm2, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%mask = icmp ugt <4 x i64> %x, %y
%max = select <4 x i1> %mask, <4 x i64> %x1, <4 x i64> %y
@@ -49,7 +53,8 @@ define <8 x i32> @test256_5(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwin
; CHECK-LABEL: test256_5:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp eq <8 x i32> %x, %y
@@ -61,7 +66,8 @@ define <8 x i32> @test256_5b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
; CHECK-LABEL: test256_5b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp eq <8 x i32> %y, %x
@@ -73,7 +79,8 @@ define <8 x i32> @test256_6(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
; CHECK-LABEL: test256_6:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sgt <8 x i32> %x, %y
@@ -85,7 +92,8 @@ define <8 x i32> @test256_6b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
; CHECK-LABEL: test256_6b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp slt <8 x i32> %y, %x
@@ -97,7 +105,8 @@ define <8 x i32> @test256_7(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
; CHECK-LABEL: test256_7:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sle <8 x i32> %x, %y
@@ -109,7 +118,8 @@ define <8 x i32> @test256_7b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
; CHECK-LABEL: test256_7b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sge <8 x i32> %y, %x
@@ -121,7 +131,8 @@ define <8 x i32> @test256_8(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
; CHECK-LABEL: test256_8:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleud (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp ule <8 x i32> %x, %y
@@ -133,7 +144,8 @@ define <8 x i32> @test256_8b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
; CHECK-LABEL: test256_8b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleud (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp uge <8 x i32> %y, %x
@@ -146,7 +158,8 @@ define <8 x i32> @test256_9(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1, <8 x i32>
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k1
; CHECK-NEXT: vpcmpeqd %ymm3, %ymm2, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%mask1 = icmp eq <8 x i32> %x1, %y1
%mask0 = icmp eq <8 x i32> %x, %y
@@ -160,7 +173,8 @@ define <4 x i64> @test256_10(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleq %ymm1, %ymm0, %k1
; CHECK-NEXT: vpcmpleq %ymm2, %ymm3, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %ymm0, %ymm2, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %ymm0, %ymm2 {%k1}
+; CHECK-NEXT: vmovdqa %ymm2, %ymm0
; CHECK-NEXT: retq
%mask1 = icmp sge <4 x i64> %x1, %y1
%mask0 = icmp sle <4 x i64> %x, %y
@@ -174,7 +188,8 @@ define <4 x i64> @test256_11(<4 x i64> %x, <4 x i64>* %y.ptr, <4 x i64> %x1, <4
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %ymm2, %ymm1, %k1
; CHECK-NEXT: vpcmpgtq (%rdi), %ymm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%mask1 = icmp sgt <4 x i64> %x1, %y1
%y = load <4 x i64>, <4 x i64>* %y.ptr, align 4
@@ -189,7 +204,8 @@ define <8 x i32> @test256_12(<8 x i32> %x, <8 x i32>* %y.ptr, <8 x i32> %x1, <8
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %ymm1, %ymm2, %k1
; CHECK-NEXT: vpcmpleud (%rdi), %ymm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%mask1 = icmp sge <8 x i32> %x1, %y1
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
@@ -203,7 +219,8 @@ define <4 x i64> @test256_13(<4 x i64> %x, <4 x i64> %x1, i64* %yb.ptr) nounwind
; CHECK-LABEL: test256_13:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k1
-; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <4 x i64> undef, i64 %yb, i32 0
@@ -217,7 +234,8 @@ define <8 x i32> @test256_14(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1) nounwind
; CHECK-LABEL: test256_14:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi){1to8}, %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <8 x i32> undef, i32 %yb, i32 0
@@ -232,7 +250,8 @@ define <8 x i32> @test256_15(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1, <8 x i32
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %ymm1, %ymm2, %k1
; CHECK-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%mask1 = icmp sge <8 x i32> %x1, %y1
%yb = load i32, i32* %yb.ptr, align 4
@@ -249,7 +268,8 @@ define <4 x i64> @test256_16(<4 x i64> %x, i64* %yb.ptr, <4 x i64> %x1, <4 x i64
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleq %ymm1, %ymm2, %k1
; CHECK-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%mask1 = icmp sge <4 x i64> %x1, %y1
%yb = load i64, i64* %yb.ptr, align 4
@@ -265,7 +285,8 @@ define <8 x i32> @test256_17(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
; CHECK-LABEL: test256_17:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpneqd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp ne <8 x i32> %x, %y
@@ -277,7 +298,8 @@ define <8 x i32> @test256_18(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
; CHECK-LABEL: test256_18:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpneqd (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp ne <8 x i32> %y, %x
@@ -289,7 +311,8 @@ define <8 x i32> @test256_19(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
; CHECK-LABEL: test256_19:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnltud (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp uge <8 x i32> %x, %y
@@ -301,7 +324,8 @@ define <8 x i32> @test256_20(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
; CHECK-LABEL: test256_20:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleud (%rdi), %ymm0, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp uge <8 x i32> %y, %x
@@ -313,7 +337,8 @@ define <2 x i64> @test128_1(<2 x i64> %x, <2 x i64> %y) nounwind {
; CHECK-LABEL: test128_1:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k1
-; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%mask = icmp eq <2 x i64> %x, %y
%max = select <2 x i1> %mask, <2 x i64> %x, <2 x i64> %y
@@ -324,7 +349,8 @@ define <2 x i64> @test128_2(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1) nounwind
; CHECK-LABEL: test128_2:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k1
-; CHECK-NEXT: vpblendmq %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %xmm2, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%mask = icmp sgt <2 x i64> %x, %y
%max = select <2 x i1> %mask, <2 x i64> %x1, <2 x i64> %y
@@ -335,7 +361,8 @@ define <4 x i32> @test128_3(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1) nounwind
; CHECK-LABEL: test128_3:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %xmm0, %xmm1, %k1
-; CHECK-NEXT: vpblendmd %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm2, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%mask = icmp sge <4 x i32> %x, %y
%max = select <4 x i1> %mask, <4 x i32> %x1, <4 x i32> %y
@@ -346,7 +373,8 @@ define <2 x i64> @test128_4(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1) nounwind
; CHECK-LABEL: test128_4:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnleuq %xmm1, %xmm0, %k1
-; CHECK-NEXT: vpblendmq %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %xmm2, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%mask = icmp ugt <2 x i64> %x, %y
%max = select <2 x i1> %mask, <2 x i64> %x1, <2 x i64> %y
@@ -357,7 +385,8 @@ define <4 x i32> @test128_5(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwin
; CHECK-LABEL: test128_5:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %yp, align 4
%mask = icmp eq <4 x i32> %x, %y
@@ -369,7 +398,8 @@ define <4 x i32> @test128_5b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %yp) nounwi
; CHECK-LABEL: test128_5b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %yp, align 4
%mask = icmp eq <4 x i32> %y, %x
@@ -381,7 +411,8 @@ define <4 x i32> @test128_6(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) noun
; CHECK-LABEL: test128_6:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sgt <4 x i32> %x, %y
@@ -393,7 +424,8 @@ define <4 x i32> @test128_6b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
; CHECK-LABEL: test128_6b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp slt <4 x i32> %y, %x
@@ -405,7 +437,8 @@ define <4 x i32> @test128_7(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) noun
; CHECK-LABEL: test128_7:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sle <4 x i32> %x, %y
@@ -417,7 +450,8 @@ define <4 x i32> @test128_7b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
; CHECK-LABEL: test128_7b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp sge <4 x i32> %y, %x
@@ -429,7 +463,8 @@ define <4 x i32> @test128_8(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) noun
; CHECK-LABEL: test128_8:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleud (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp ule <4 x i32> %x, %y
@@ -441,7 +476,8 @@ define <4 x i32> @test128_8b(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
; CHECK-LABEL: test128_8b:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleud (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp uge <4 x i32> %y, %x
@@ -454,7 +490,8 @@ define <4 x i32> @test128_9(<4 x i32> %x, <4 x i32> %y, <4 x i32> %x1, <4 x i32>
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k1
; CHECK-NEXT: vpcmpeqd %xmm3, %xmm2, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%mask1 = icmp eq <4 x i32> %x1, %y1
%mask0 = icmp eq <4 x i32> %x, %y
@@ -468,7 +505,8 @@ define <2 x i64> @test128_10(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleq %xmm1, %xmm0, %k1
; CHECK-NEXT: vpcmpleq %xmm2, %xmm3, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %xmm0, %xmm2, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vmovdqa %xmm2, %xmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <2 x i64> %x1, %y1
%mask0 = icmp sle <2 x i64> %x, %y
@@ -482,7 +520,8 @@ define <2 x i64> @test128_11(<2 x i64> %x, <2 x i64>* %y.ptr, <2 x i64> %x1, <2
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %xmm2, %xmm1, %k1
; CHECK-NEXT: vpcmpgtq (%rdi), %xmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%mask1 = icmp sgt <2 x i64> %x1, %y1
%y = load <2 x i64>, <2 x i64>* %y.ptr, align 4
@@ -497,7 +536,8 @@ define <4 x i32> @test128_12(<4 x i32> %x, <4 x i32>* %y.ptr, <4 x i32> %x1, <4
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %xmm1, %xmm2, %k1
; CHECK-NEXT: vpcmpleud (%rdi), %xmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <4 x i32> %x1, %y1
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
@@ -511,7 +551,8 @@ define <2 x i64> @test128_13(<2 x i64> %x, <2 x i64> %x1, i64* %yb.ptr) nounwind
; CHECK-LABEL: test128_13:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k1
-; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%yb = load i64, i64* %yb.ptr, align 4
%y.0 = insertelement <2 x i64> undef, i64 %yb, i32 0
@@ -525,7 +566,8 @@ define <4 x i32> @test128_14(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1) nounwind
; CHECK-LABEL: test128_14:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled (%rdi){1to4}, %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <4 x i32> undef, i32 %yb, i32 0
@@ -540,7 +582,8 @@ define <4 x i32> @test128_15(<4 x i32> %x, i32* %yb.ptr, <4 x i32> %x1, <4 x i32
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpled %xmm1, %xmm2, %k1
; CHECK-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <4 x i32> %x1, %y1
%yb = load i32, i32* %yb.ptr, align 4
@@ -557,7 +600,8 @@ define <2 x i64> @test128_16(<2 x i64> %x, i64* %yb.ptr, <2 x i64> %x1, <2 x i64
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleq %xmm1, %xmm2, %k1
; CHECK-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k1 {%k1}
-; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa64 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%mask1 = icmp sge <2 x i64> %x1, %y1
%yb = load i64, i64* %yb.ptr, align 4
@@ -573,7 +617,8 @@ define <4 x i32> @test128_17(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
; CHECK-LABEL: test128_17:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpneqd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp ne <4 x i32> %x, %y
@@ -585,7 +630,8 @@ define <4 x i32> @test128_18(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
; CHECK-LABEL: test128_18:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpneqd (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp ne <4 x i32> %y, %x
@@ -597,7 +643,8 @@ define <4 x i32> @test128_19(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
; CHECK-LABEL: test128_19:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpnltud (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp uge <4 x i32> %x, %y
@@ -609,7 +656,8 @@ define <4 x i32> @test128_20(<4 x i32> %x, <4 x i32> %x1, <4 x i32>* %y.ptr) nou
; CHECK-LABEL: test128_20:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpleud (%rdi), %xmm0, %k1
-; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm1 {%k1}
+; CHECK-NEXT: vmovdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%y = load <4 x i32>, <4 x i32>* %y.ptr, align 4
%mask = icmp uge <4 x i32> %y, %x
diff --git a/test/CodeGen/X86/cmov.ll b/test/CodeGen/X86/cmov.ll
index 8e9bc8b5af4b9..0060539c691fd 100644
--- a/test/CodeGen/X86/cmov.ll
+++ b/test/CodeGen/X86/cmov.ll
@@ -157,16 +157,12 @@ define i8 @test7(i1 inreg %c, i8 inreg %a, i8 inreg %b) nounwind {
ret i8 %d
}
-; FIXME: The 'not' is redundant.
-
define i32 @smin(i32 %x) {
; CHECK-LABEL: smin:
; CHECK: ## BB#0:
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: notl %ecx
; CHECK-NEXT: xorl $-1, %edi
; CHECK-NEXT: movl $-1, %eax
-; CHECK-NEXT: cmovsl %ecx, %eax
+; CHECK-NEXT: cmovsl %edi, %eax
; CHECK-NEXT: retq
%not_x = xor i32 %x, -1
%1 = icmp slt i32 %not_x, -1
diff --git a/test/CodeGen/X86/fma-fneg-combine.ll b/test/CodeGen/X86/fma-fneg-combine.ll
index 5636a5bcd73ea..5329f5b216a41 100644
--- a/test/CodeGen/X86/fma-fneg-combine.ll
+++ b/test/CodeGen/X86/fma-fneg-combine.ll
@@ -222,9 +222,9 @@ define <16 x float> @test15(<16 x float> %a, <16 x float> %b, <16 x float> %c, i
; SKX-NEXT: kmovw %edi, %k1
; SKX-NEXT: vxorps {{.*}}(%rip){1to16}, %zmm0, %zmm3
; SKX-NEXT: vfnmadd213ps {ru-sae}, %zmm2, %zmm0, %zmm1
-; SKX-NEXT: vblendmps %zmm1, %zmm3, %zmm1 {%k1}
-; SKX-NEXT: vfnmadd132ps {rd-sae}, %zmm0, %zmm2, %zmm1 {%k1}
-; SKX-NEXT: vmovaps %zmm1, %zmm0
+; SKX-NEXT: vmovaps %zmm1, %zmm3 {%k1}
+; SKX-NEXT: vfnmadd132ps {rd-sae}, %zmm0, %zmm2, %zmm3 {%k1}
+; SKX-NEXT: vmovaps %zmm3, %zmm0
; SKX-NEXT: retq
;
; KNL-LABEL: test15:
@@ -232,9 +232,9 @@ define <16 x float> @test15(<16 x float> %a, <16 x float> %b, <16 x float> %c, i
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm3
; KNL-NEXT: vfnmadd213ps {ru-sae}, %zmm2, %zmm0, %zmm1
-; KNL-NEXT: vblendmps %zmm1, %zmm3, %zmm1 {%k1}
-; KNL-NEXT: vfnmadd132ps {rd-sae}, %zmm0, %zmm2, %zmm1 {%k1}
-; KNL-NEXT: vmovaps %zmm1, %zmm0
+; KNL-NEXT: vmovaps %zmm1, %zmm3 {%k1}
+; KNL-NEXT: vfnmadd132ps {rd-sae}, %zmm0, %zmm2, %zmm3 {%k1}
+; KNL-NEXT: vmovaps %zmm3, %zmm0
; KNL-NEXT: retq
entry:
%sub.i = fsub <16 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %a
diff --git a/test/CodeGen/X86/fmaddsub-combine.ll b/test/CodeGen/X86/fmaddsub-combine.ll
new file mode 100644
index 0000000000000..f3b13cd053b48
--- /dev/null
+++ b/test/CodeGen/X86/fmaddsub-combine.ll
@@ -0,0 +1,129 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma | FileCheck -check-prefix=FMA3 -check-prefix=FMA3_256 %s
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma,+avx512f | FileCheck -check-prefix=FMA3 -check-prefix=FMA3_512 %s
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma4 | FileCheck -check-prefix=FMA4 %s
+
+; This test checks the fusing of MUL + ADDSUB to FMADDSUB.
+
+define <2 x double> @mul_addsub_pd128(<2 x double> %A, <2 x double> %B, <2 x double> %C) #0 {
+; FMA3-LABEL: mul_addsub_pd128:
+; FMA3: # BB#0: # %entry
+; FMA3-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0
+; FMA3-NEXT: retq
+;
+; FMA4-LABEL: mul_addsub_pd128:
+; FMA4: # BB#0: # %entry
+; FMA4-NEXT: vfmaddsubpd %xmm2, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: retq
+entry:
+ %AB = fmul <2 x double> %A, %B
+ %Sub = fsub <2 x double> %AB, %C
+ %Add = fadd <2 x double> %AB, %C
+ %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %Addsub
+}
+
+define <4 x float> @mul_addsub_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) #0 {
+; FMA3-LABEL: mul_addsub_ps128:
+; FMA3: # BB#0: # %entry
+; FMA3-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0
+; FMA3-NEXT: retq
+;
+; FMA4-LABEL: mul_addsub_ps128:
+; FMA4: # BB#0: # %entry
+; FMA4-NEXT: vfmaddsubps %xmm2, %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: retq
+entry:
+ %AB = fmul <4 x float> %A, %B
+ %Sub = fsub <4 x float> %AB, %C
+ %Add = fadd <4 x float> %AB, %C
+ %Addsub = shufflevector <4 x float> %Sub, <4 x float> %Add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %Addsub
+}
+
+define <4 x double> @mul_addsub_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) #0 {
+; FMA3-LABEL: mul_addsub_pd256:
+; FMA3: # BB#0: # %entry
+; FMA3-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0
+; FMA3-NEXT: retq
+;
+; FMA4-LABEL: mul_addsub_pd256:
+; FMA4: # BB#0: # %entry
+; FMA4-NEXT: vfmaddsubpd %ymm2, %ymm1, %ymm0, %ymm0
+; FMA4-NEXT: retq
+entry:
+ %AB = fmul <4 x double> %A, %B
+ %Sub = fsub <4 x double> %AB, %C
+ %Add = fadd <4 x double> %AB, %C
+ %Addsub = shufflevector <4 x double> %Sub, <4 x double> %Add, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %Addsub
+}
+
+define <8 x float> @mul_addsub_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) #0 {
+; FMA3-LABEL: mul_addsub_ps256:
+; FMA3: # BB#0: # %entry
+; FMA3-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0
+; FMA3-NEXT: retq
+;
+; FMA4-LABEL: mul_addsub_ps256:
+; FMA4: # BB#0: # %entry
+; FMA4-NEXT: vfmaddsubps %ymm2, %ymm1, %ymm0, %ymm0
+; FMA4-NEXT: retq
+entry:
+ %AB = fmul <8 x float> %A, %B
+ %Sub = fsub <8 x float> %AB, %C
+ %Add = fadd <8 x float> %AB, %C
+ %Addsub = shufflevector <8 x float> %Sub, <8 x float> %Add, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %Addsub
+}
+
+define <8 x double> @mul_addsub_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) #0 {
+; FMA3_256-LABEL: mul_addsub_pd512:
+; FMA3_256: # BB#0: # %entry
+; FMA3_256-NEXT: vfmaddsub213pd %ymm4, %ymm2, %ymm0
+; FMA3_256-NEXT: vfmaddsub213pd %ymm5, %ymm3, %ymm1
+; FMA3_256-NEXT: retq
+;
+; FMA3_512-LABEL: mul_addsub_pd512:
+; FMA3_512: # BB#0: # %entry
+; FMA3_512-NEXT: vfmaddsub213pd %zmm2, %zmm1, %zmm0
+; FMA3_512-NEXT: retq
+;
+; FMA4-LABEL: mul_addsub_pd512:
+; FMA4: # BB#0: # %entry
+; FMA4-NEXT: vfmaddsubpd %ymm4, %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vfmaddsubpd %ymm5, %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: retq
+entry:
+ %AB = fmul <8 x double> %A, %B
+ %Sub = fsub <8 x double> %AB, %C
+ %Add = fadd <8 x double> %AB, %C
+ %Addsub = shufflevector <8 x double> %Sub, <8 x double> %Add, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x double> %Addsub
+}
+
+define <16 x float> @mul_addsub_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) #0 {
+; FMA3_256-LABEL: mul_addsub_ps512:
+; FMA3_256: # BB#0: # %entry
+; FMA3_256-NEXT: vfmaddsub213ps %ymm4, %ymm2, %ymm0
+; FMA3_256-NEXT: vfmaddsub213ps %ymm5, %ymm3, %ymm1
+; FMA3_256-NEXT: retq
+;
+; FMA3_512-LABEL: mul_addsub_ps512:
+; FMA3_512: # BB#0: # %entry
+; FMA3_512-NEXT: vfmaddsub213ps %zmm2, %zmm1, %zmm0
+; FMA3_512-NEXT: retq
+;
+; FMA4-LABEL: mul_addsub_ps512:
+; FMA4: # BB#0: # %entry
+; FMA4-NEXT: vfmaddsubps %ymm4, %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vfmaddsubps %ymm5, %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: retq
+entry:
+ %AB = fmul <16 x float> %A, %B
+ %Sub = fsub <16 x float> %AB, %C
+ %Add = fadd <16 x float> %AB, %C
+ %Addsub = shufflevector <16 x float> %Sub, <16 x float> %Add, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
+ ret <16 x float> %Addsub
+}
+
+attributes #0 = { nounwind "unsafe-fp-math"="true" }
diff --git a/test/CodeGen/X86/sse-fsignum.ll b/test/CodeGen/X86/sse-fsignum.ll
index 7159d4c871740..32594a27698d2 100644
--- a/test/CodeGen/X86/sse-fsignum.ll
+++ b/test/CodeGen/X86/sse-fsignum.ll
@@ -93,15 +93,14 @@ define void @signum32b(<8 x float>*) {
; AVX512F-NEXT: vmovaps (%rdi), %ymm0
; AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1
; AVX512F-NEXT: vcmpltps %zmm1, %zmm0, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
-; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm3 {%k1} {z}
-; AVX512F-NEXT: vpmovqd %zmm3, %ymm3
-; AVX512F-NEXT: vcvtdq2ps %ymm3, %ymm3
+; AVX512F-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
+; AVX512F-NEXT: vpmovqd %zmm2, %ymm2
+; AVX512F-NEXT: vcvtdq2ps %ymm2, %ymm2
; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k1
-; AVX512F-NEXT: vmovdqa64 %zmm2, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX512F-NEXT: vsubps %ymm0, %ymm3, %ymm0
+; AVX512F-NEXT: vsubps %ymm0, %ymm2, %ymm0
; AVX512F-NEXT: vmovaps %ymm0, (%rdi)
; AVX512F-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/vector-compare-results.ll b/test/CodeGen/X86/vector-compare-results.ll
index abe3da752874d..c34f333ef785b 100644
--- a/test/CodeGen/X86/vector-compare-results.ll
+++ b/test/CodeGen/X86/vector-compare-results.ll
@@ -4,6 +4,8 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512dq | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512DQ
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW
;
; 128-bit vector comparisons
@@ -308,12 +310,26 @@ define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v16i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
-; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
-; AVX512-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v16i16:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v16i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v16i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i16> %a0, %a1
ret <16 x i1> %1
}
@@ -589,13 +605,26 @@ define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v8f64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vcmpltpd %zmm0, %zmm1, %k1
-; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: vpmovqw %zmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v8f64:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v8f64:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
+; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
+; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v8f64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512BW-NEXT: retq
%1 = fcmp ogt <8 x double> %a0, %a1
ret <8 x i1> %1
}
@@ -636,13 +665,26 @@ define <16 x i1> @test_cmp_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v16f32:
-; AVX512: # BB#0:
-; AVX512-NEXT: vcmpltps %zmm0, %zmm1, %k1
-; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v16f32:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k1
+; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v16f32:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v16f32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: retq
%1 = fcmp ogt <16 x float> %a0, %a1
ret <16 x i1> %1
}
@@ -734,13 +776,26 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v8i64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
-; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: vpmovqw %zmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v8i64:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v8i64:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
+; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
+; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v8i64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpmovqw %zmm0, %xmm0
+; AVX512BW-NEXT: retq
%1 = icmp sgt <8 x i64> %a0, %a1
ret <8 x i1> %1
}
@@ -784,13 +839,26 @@ define <16 x i1> @test_cmp_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v16i32:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
-; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v16i32:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
+; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v16i32:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
+; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v16i32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i32> %a0, %a1
ret <16 x i1> %1
}
@@ -1045,16 +1113,35 @@ define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v32i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
-; AVX512-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
-; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
-; AVX512-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v32i16:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v32i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v32i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
+; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i16> %a0, %a1
ret <32 x i1> %1
}
@@ -1874,15 +1961,31 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v64i8:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
-; AVX512-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vextracti128 $1, %ymm4, %xmm3
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
-; AVX512-NEXT: vmovdqa %xmm4, %xmm2
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v64i8:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
+; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512F-NEXT: vextracti128 $1, %ymm4, %xmm3
+; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: vmovdqa %xmm4, %xmm2
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v64i8:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm4
+; AVX512DQ-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512DQ-NEXT: vextracti128 $1, %ymm4, %xmm3
+; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: vmovdqa %xmm4, %xmm2
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v64i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpcmpgtb %zmm1, %zmm0, %k0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
+; AVX512BW-NEXT: retq
%1 = icmp sgt <64 x i8> %a0, %a1
ret <64 x i1> %1
}
@@ -1957,120 +2060,350 @@ define <16 x i1> @test_cmp_v16f64(<16 x double> %a0, <16 x double> %a1) nounwind
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v16f64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vextractf32x4 $3, %zmm2, %xmm4
-; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm5
-; AVX512-NEXT: xorl %eax, %eax
-; AVX512-NEXT: vucomisd %xmm4, %xmm5
-; AVX512-NEXT: movq $-1, %rcx
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm6
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
-; AVX512-NEXT: vucomisd %xmm4, %xmm5
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
-; AVX512-NEXT: vextractf32x4 $2, %zmm2, %xmm5
-; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm6
-; AVX512-NEXT: vucomisd %xmm5, %xmm6
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm7
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm6 = xmm6[1,0]
-; AVX512-NEXT: vucomisd %xmm5, %xmm6
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm7[0],xmm5[0]
-; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
-; AVX512-NEXT: vextractf32x4 $1, %zmm2, %xmm5
-; AVX512-NEXT: vextractf32x4 $1, %zmm0, %xmm6
-; AVX512-NEXT: vucomisd %xmm5, %xmm6
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm7
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm6 = xmm6[1,0]
-; AVX512-NEXT: vucomisd %xmm5, %xmm6
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm7[0],xmm5[0]
-; AVX512-NEXT: vucomisd %xmm2, %xmm0
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm6
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX512-NEXT: vucomisd %xmm2, %xmm0
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm6[0],xmm0[0]
-; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: vextractf32x4 $3, %zmm3, %xmm2
-; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm4
-; AVX512-NEXT: vucomisd %xmm2, %xmm4
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT: vucomisd %xmm2, %xmm4
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm2
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm5[0],xmm2[0]
-; AVX512-NEXT: vextractf32x4 $2, %zmm3, %xmm4
-; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm5
-; AVX512-NEXT: vucomisd %xmm4, %xmm5
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm6
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
-; AVX512-NEXT: vucomisd %xmm4, %xmm5
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
-; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
-; AVX512-NEXT: vextractf32x4 $1, %zmm3, %xmm4
-; AVX512-NEXT: vextractf32x4 $1, %zmm1, %xmm5
-; AVX512-NEXT: vucomisd %xmm4, %xmm5
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm6
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
-; AVX512-NEXT: vucomisd %xmm4, %xmm5
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
-; AVX512-NEXT: vucomisd %xmm3, %xmm1
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; AVX512-NEXT: vucomisd %xmm3, %xmm1
-; AVX512-NEXT: cmovaq %rcx, %rax
-; AVX512-NEXT: vmovq %rax, %xmm1
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm1[0]
-; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512-NEXT: vpmovqd %zmm1, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v16f64:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextractf32x4 $3, %zmm2, %xmm4
+; AVX512F-NEXT: vextractf32x4 $3, %zmm0, %xmm5
+; AVX512F-NEXT: xorl %eax, %eax
+; AVX512F-NEXT: vucomisd %xmm4, %xmm5
+; AVX512F-NEXT: movq $-1, %rcx
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm6
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512F-NEXT: vucomisd %xmm4, %xmm5
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
+; AVX512F-NEXT: vextractf32x4 $2, %zmm2, %xmm5
+; AVX512F-NEXT: vextractf32x4 $2, %zmm0, %xmm6
+; AVX512F-NEXT: vucomisd %xmm5, %xmm6
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm7
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm6 = xmm6[1,0]
+; AVX512F-NEXT: vucomisd %xmm5, %xmm6
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm7[0],xmm5[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX512F-NEXT: vextractf32x4 $1, %zmm2, %xmm5
+; AVX512F-NEXT: vextractf32x4 $1, %zmm0, %xmm6
+; AVX512F-NEXT: vucomisd %xmm5, %xmm6
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm7
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm6 = xmm6[1,0]
+; AVX512F-NEXT: vucomisd %xmm5, %xmm6
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm7[0],xmm5[0]
+; AVX512F-NEXT: vucomisd %xmm2, %xmm0
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm6
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vucomisd %xmm2, %xmm0
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm6[0],xmm0[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: vextractf32x4 $3, %zmm3, %xmm2
+; AVX512F-NEXT: vextractf32x4 $3, %zmm1, %xmm4
+; AVX512F-NEXT: vucomisd %xmm2, %xmm4
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512F-NEXT: vucomisd %xmm2, %xmm4
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm2
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm5[0],xmm2[0]
+; AVX512F-NEXT: vextractf32x4 $2, %zmm3, %xmm4
+; AVX512F-NEXT: vextractf32x4 $2, %zmm1, %xmm5
+; AVX512F-NEXT: vucomisd %xmm4, %xmm5
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm6
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512F-NEXT: vucomisd %xmm4, %xmm5
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
+; AVX512F-NEXT: vextractf32x4 $1, %zmm3, %xmm4
+; AVX512F-NEXT: vextractf32x4 $1, %zmm1, %xmm5
+; AVX512F-NEXT: vucomisd %xmm4, %xmm5
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm6
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512F-NEXT: vucomisd %xmm4, %xmm5
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
+; AVX512F-NEXT: vucomisd %xmm3, %xmm1
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512F-NEXT: vucomisd %xmm3, %xmm1
+; AVX512F-NEXT: cmovaq %rcx, %rax
+; AVX512F-NEXT: vmovq %rax, %xmm1
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm1[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v16f64:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vextractf64x2 $3, %zmm2, %xmm4
+; AVX512DQ-NEXT: vextractf64x2 $3, %zmm0, %xmm5
+; AVX512DQ-NEXT: xorl %eax, %eax
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm5
+; AVX512DQ-NEXT: movq $-1, %rcx
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm6
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm5
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
+; AVX512DQ-NEXT: vextractf64x2 $2, %zmm2, %xmm5
+; AVX512DQ-NEXT: vextractf64x2 $2, %zmm0, %xmm6
+; AVX512DQ-NEXT: vucomisd %xmm5, %xmm6
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm7
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm6 = xmm6[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm5, %xmm6
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm7[0],xmm5[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX512DQ-NEXT: vextractf64x2 $1, %zmm2, %xmm5
+; AVX512DQ-NEXT: vextractf64x2 $1, %zmm0, %xmm6
+; AVX512DQ-NEXT: vucomisd %xmm5, %xmm6
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm7
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm6 = xmm6[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm5, %xmm6
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm7[0],xmm5[0]
+; AVX512DQ-NEXT: vucomisd %xmm2, %xmm0
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm6
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm2, %xmm0
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm6[0],xmm0[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT: vextractf64x2 $3, %zmm3, %xmm2
+; AVX512DQ-NEXT: vextractf64x2 $3, %zmm1, %xmm4
+; AVX512DQ-NEXT: vucomisd %xmm2, %xmm4
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm2, %xmm4
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm2
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm5[0],xmm2[0]
+; AVX512DQ-NEXT: vextractf64x2 $2, %zmm3, %xmm4
+; AVX512DQ-NEXT: vextractf64x2 $2, %zmm1, %xmm5
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm5
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm6
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm5
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
+; AVX512DQ-NEXT: vextractf64x2 $1, %zmm3, %xmm4
+; AVX512DQ-NEXT: vextractf64x2 $1, %zmm1, %xmm5
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm5
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm6
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm5
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
+; AVX512DQ-NEXT: vucomisd %xmm3, %xmm1
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm3, %xmm1
+; AVX512DQ-NEXT: cmovaq %rcx, %rax
+; AVX512DQ-NEXT: vmovq %rax, %xmm1
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm1[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v16f64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm2, %xmm4
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm0, %xmm5
+; AVX512BW-NEXT: xorl %eax, %eax
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm5
+; AVX512BW-NEXT: movq $-1, %rcx
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm6
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm5
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm2, %xmm5
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm0, %xmm6
+; AVX512BW-NEXT: vucomisd %xmm5, %xmm6
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm7
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm6 = xmm6[1,0]
+; AVX512BW-NEXT: vucomisd %xmm5, %xmm6
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm7[0],xmm5[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm2, %xmm5
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm0, %xmm6
+; AVX512BW-NEXT: vucomisd %xmm5, %xmm6
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm7
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm6 = xmm6[1,0]
+; AVX512BW-NEXT: vucomisd %xmm5, %xmm6
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm7[0],xmm5[0]
+; AVX512BW-NEXT: vucomisd %xmm2, %xmm0
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm6
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512BW-NEXT: vucomisd %xmm2, %xmm0
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm6[0],xmm0[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm3, %xmm2
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm1, %xmm4
+; AVX512BW-NEXT: vucomisd %xmm2, %xmm4
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512BW-NEXT: vucomisd %xmm2, %xmm4
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm2
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm5[0],xmm2[0]
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm3, %xmm4
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm1, %xmm5
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm5
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm6
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm5
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm3, %xmm4
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm1, %xmm5
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm5
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm6
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm5
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm6[0],xmm4[0]
+; AVX512BW-NEXT: vucomisd %xmm3, %xmm1
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512BW-NEXT: vucomisd %xmm3, %xmm1
+; AVX512BW-NEXT: cmovaq %rcx, %rax
+; AVX512BW-NEXT: vmovq %rax, %xmm1
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm1[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: retq
%1 = fcmp ogt <16 x double> %a0, %a1
ret <16 x i1> %1
}
@@ -2416,207 +2749,612 @@ define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v32f32:
-; AVX512: # BB#0:
-; AVX512-NEXT: vextractf32x4 $3, %zmm2, %xmm4
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm5 = xmm4[1,1,3,3]
-; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm6
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm7 = xmm6[1,1,3,3]
-; AVX512-NEXT: xorl %eax, %eax
-; AVX512-NEXT: vucomiss %xmm5, %xmm7
-; AVX512-NEXT: movl $-1, %ecx
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vucomiss %xmm4, %xmm6
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmoval %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm5
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm5, %xmm8
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm5 = xmm6[1,0]
-; AVX512-NEXT: vucomiss %xmm7, %xmm5
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm8, %xmm5
-; AVX512-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
-; AVX512-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[3,1,2,3]
-; AVX512-NEXT: vucomiss %xmm4, %xmm6
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm5, %xmm8
-; AVX512-NEXT: vextractf32x4 $2, %zmm2, %xmm5
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
-; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm7
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm7[1,1,3,3]
-; AVX512-NEXT: vucomiss %xmm6, %xmm4
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vucomiss %xmm5, %xmm7
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmoval %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm4
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm4, %xmm9
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm7[1,0]
-; AVX512-NEXT: vucomiss %xmm6, %xmm4
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm9, %xmm4
-; AVX512-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
-; AVX512-NEXT: vpermilps {{.*#+}} xmm6 = xmm7[3,1,2,3]
-; AVX512-NEXT: vucomiss %xmm5, %xmm6
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm4, %xmm4
-; AVX512-NEXT: vinserti128 $1, %xmm8, %ymm4, %ymm8
-; AVX512-NEXT: vextractf32x4 $1, %zmm2, %xmm5
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
-; AVX512-NEXT: vextractf32x4 $1, %zmm0, %xmm7
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm7[1,1,3,3]
-; AVX512-NEXT: vucomiss %xmm6, %xmm4
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vucomiss %xmm5, %xmm7
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmoval %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm4
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm4, %xmm9
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm7[1,0]
-; AVX512-NEXT: vucomiss %xmm6, %xmm4
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm9, %xmm4
-; AVX512-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
-; AVX512-NEXT: vpermilps {{.*#+}} xmm6 = xmm7[3,1,2,3]
-; AVX512-NEXT: vucomiss %xmm5, %xmm6
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm4, %xmm4
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
-; AVX512-NEXT: vucomiss %xmm5, %xmm6
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vucomiss %xmm2, %xmm0
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmoval %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm5
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm6 = xmm2[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm7 = xmm0[1,0]
-; AVX512-NEXT: vucomiss %xmm6, %xmm7
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
-; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
-; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX512-NEXT: vucomiss %xmm2, %xmm0
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm5, %xmm0
-; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
-; AVX512-NEXT: vpmovdb %zmm0, %xmm8
-; AVX512-NEXT: vextractf32x4 $3, %zmm3, %xmm2
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm2[1,1,3,3]
-; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm5
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
-; AVX512-NEXT: vucomiss %xmm4, %xmm6
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vucomiss %xmm2, %xmm5
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmoval %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm4
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm6 = xmm2[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm7 = xmm5[1,0]
-; AVX512-NEXT: vucomiss %xmm6, %xmm7
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
-; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
-; AVX512-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
-; AVX512-NEXT: vucomiss %xmm2, %xmm5
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2
-; AVX512-NEXT: vextractf32x4 $2, %zmm3, %xmm4
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm5 = xmm4[1,1,3,3]
-; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm6
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm7 = xmm6[1,1,3,3]
-; AVX512-NEXT: vucomiss %xmm5, %xmm7
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vucomiss %xmm4, %xmm6
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmoval %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm5
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm6[1,0]
-; AVX512-NEXT: vucomiss %xmm7, %xmm0
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm5, %xmm0
-; AVX512-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
-; AVX512-NEXT: vpermilps {{.*#+}} xmm5 = xmm6[3,1,2,3]
-; AVX512-NEXT: vucomiss %xmm4, %xmm5
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm0, %xmm0
-; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512-NEXT: vextractf32x4 $1, %zmm3, %xmm0
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; AVX512-NEXT: vextractf32x4 $1, %zmm1, %xmm5
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
-; AVX512-NEXT: vucomiss %xmm4, %xmm6
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vucomiss %xmm0, %xmm5
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmoval %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm4
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm7 = xmm5[1,0]
-; AVX512-NEXT: vucomiss %xmm6, %xmm7
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
-; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
-; AVX512-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
-; AVX512-NEXT: vucomiss %xmm0, %xmm5
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm4, %xmm0
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm3[1,1,3,3]
-; AVX512-NEXT: vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
-; AVX512-NEXT: vucomiss %xmm4, %xmm5
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vucomiss %xmm3, %xmm1
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmoval %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm4
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm5 = xmm3[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm6 = xmm1[1,0]
-; AVX512-NEXT: vucomiss %xmm5, %xmm6
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmoval %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
-; AVX512-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[3,1,2,3]
-; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; AVX512-NEXT: vucomiss %xmm3, %xmm1
-; AVX512-NEXT: cmoval %ecx, %eax
-; AVX512-NEXT: vpinsrd $3, %eax, %xmm4, %xmm1
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm8, %ymm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v32f32:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextractf32x4 $3, %zmm2, %xmm4
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; AVX512F-NEXT: vextractf32x4 $3, %zmm0, %xmm6
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; AVX512F-NEXT: xorl %eax, %eax
+; AVX512F-NEXT: vucomiss %xmm5, %xmm7
+; AVX512F-NEXT: movl $-1, %ecx
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vucomiss %xmm4, %xmm6
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmoval %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm5
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm5, %xmm8
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm5 = xmm6[1,0]
+; AVX512F-NEXT: vucomiss %xmm7, %xmm5
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm8, %xmm5
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[3,1,2,3]
+; AVX512F-NEXT: vucomiss %xmm4, %xmm6
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $3, %edx, %xmm5, %xmm8
+; AVX512F-NEXT: vextractf32x4 $2, %zmm2, %xmm5
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; AVX512F-NEXT: vextractf32x4 $2, %zmm0, %xmm7
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm4 = xmm7[1,1,3,3]
+; AVX512F-NEXT: vucomiss %xmm6, %xmm4
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vucomiss %xmm5, %xmm7
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmoval %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm4
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm4, %xmm9
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm7[1,0]
+; AVX512F-NEXT: vucomiss %xmm6, %xmm4
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm9, %xmm4
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm6 = xmm7[3,1,2,3]
+; AVX512F-NEXT: vucomiss %xmm5, %xmm6
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $3, %edx, %xmm4, %xmm4
+; AVX512F-NEXT: vinserti128 $1, %xmm8, %ymm4, %ymm8
+; AVX512F-NEXT: vextractf32x4 $1, %zmm2, %xmm5
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; AVX512F-NEXT: vextractf32x4 $1, %zmm0, %xmm7
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm4 = xmm7[1,1,3,3]
+; AVX512F-NEXT: vucomiss %xmm6, %xmm4
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vucomiss %xmm5, %xmm7
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmoval %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm4
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm4, %xmm9
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm7[1,0]
+; AVX512F-NEXT: vucomiss %xmm6, %xmm4
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm9, %xmm4
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm6 = xmm7[3,1,2,3]
+; AVX512F-NEXT: vucomiss %xmm5, %xmm6
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $3, %edx, %xmm4, %xmm4
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; AVX512F-NEXT: vucomiss %xmm5, %xmm6
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vucomiss %xmm2, %xmm0
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmoval %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm5
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm6 = xmm2[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm7 = xmm0[1,0]
+; AVX512F-NEXT: vucomiss %xmm6, %xmm7
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512F-NEXT: vucomiss %xmm2, %xmm0
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $3, %edx, %xmm5, %xmm0
+; AVX512F-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm8
+; AVX512F-NEXT: vextractf32x4 $3, %zmm3, %xmm2
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm4 = xmm2[1,1,3,3]
+; AVX512F-NEXT: vextractf32x4 $3, %zmm1, %xmm5
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; AVX512F-NEXT: vucomiss %xmm4, %xmm6
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vucomiss %xmm2, %xmm5
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmoval %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm4
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm6 = xmm2[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm7 = xmm5[1,0]
+; AVX512F-NEXT: vucomiss %xmm6, %xmm7
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX512F-NEXT: vucomiss %xmm2, %xmm5
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2
+; AVX512F-NEXT: vextractf32x4 $2, %zmm3, %xmm4
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; AVX512F-NEXT: vextractf32x4 $2, %zmm1, %xmm6
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; AVX512F-NEXT: vucomiss %xmm5, %xmm7
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vucomiss %xmm4, %xmm6
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmoval %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm5
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm6[1,0]
+; AVX512F-NEXT: vucomiss %xmm7, %xmm0
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm5, %xmm0
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm5 = xmm6[3,1,2,3]
+; AVX512F-NEXT: vucomiss %xmm4, %xmm5
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $3, %edx, %xmm0, %xmm0
+; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512F-NEXT: vextractf32x4 $1, %zmm3, %xmm0
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX512F-NEXT: vextractf32x4 $1, %zmm1, %xmm5
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; AVX512F-NEXT: vucomiss %xmm4, %xmm6
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vucomiss %xmm0, %xmm5
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmoval %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm4
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm7 = xmm5[1,0]
+; AVX512F-NEXT: vucomiss %xmm6, %xmm7
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX512F-NEXT: vucomiss %xmm0, %xmm5
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $3, %edx, %xmm4, %xmm0
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; AVX512F-NEXT: vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; AVX512F-NEXT: vucomiss %xmm4, %xmm5
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vucomiss %xmm3, %xmm1
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmoval %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm4
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm5 = xmm3[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm6 = xmm1[1,0]
+; AVX512F-NEXT: vucomiss %xmm5, %xmm6
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmoval %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; AVX512F-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX512F-NEXT: vucomiss %xmm3, %xmm1
+; AVX512F-NEXT: cmoval %ecx, %eax
+; AVX512F-NEXT: vpinsrd $3, %eax, %xmm4, %xmm1
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm8, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v32f32:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vextractf32x4 $3, %zmm2, %xmm4
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; AVX512DQ-NEXT: vextractf32x4 $3, %zmm0, %xmm6
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; AVX512DQ-NEXT: xorl %eax, %eax
+; AVX512DQ-NEXT: vucomiss %xmm5, %xmm7
+; AVX512DQ-NEXT: movl $-1, %ecx
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vucomiss %xmm4, %xmm6
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmoval %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm5
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm5, %xmm8
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm5 = xmm6[1,0]
+; AVX512DQ-NEXT: vucomiss %xmm7, %xmm5
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm8, %xmm5
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[3,1,2,3]
+; AVX512DQ-NEXT: vucomiss %xmm4, %xmm6
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $3, %edx, %xmm5, %xmm8
+; AVX512DQ-NEXT: vextractf32x4 $2, %zmm2, %xmm5
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; AVX512DQ-NEXT: vextractf32x4 $2, %zmm0, %xmm7
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm4 = xmm7[1,1,3,3]
+; AVX512DQ-NEXT: vucomiss %xmm6, %xmm4
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vucomiss %xmm5, %xmm7
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmoval %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm4
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm4, %xmm9
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm7[1,0]
+; AVX512DQ-NEXT: vucomiss %xmm6, %xmm4
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm9, %xmm4
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm6 = xmm7[3,1,2,3]
+; AVX512DQ-NEXT: vucomiss %xmm5, %xmm6
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $3, %edx, %xmm4, %xmm4
+; AVX512DQ-NEXT: vinserti128 $1, %xmm8, %ymm4, %ymm8
+; AVX512DQ-NEXT: vextractf32x4 $1, %zmm2, %xmm5
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; AVX512DQ-NEXT: vextractf32x4 $1, %zmm0, %xmm7
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm4 = xmm7[1,1,3,3]
+; AVX512DQ-NEXT: vucomiss %xmm6, %xmm4
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vucomiss %xmm5, %xmm7
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmoval %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm4
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm4, %xmm9
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm7[1,0]
+; AVX512DQ-NEXT: vucomiss %xmm6, %xmm4
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm9, %xmm4
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm6 = xmm7[3,1,2,3]
+; AVX512DQ-NEXT: vucomiss %xmm5, %xmm6
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $3, %edx, %xmm4, %xmm4
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; AVX512DQ-NEXT: vucomiss %xmm5, %xmm6
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vucomiss %xmm2, %xmm0
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmoval %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm5
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm6 = xmm2[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm7 = xmm0[1,0]
+; AVX512DQ-NEXT: vucomiss %xmm6, %xmm7
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512DQ-NEXT: vucomiss %xmm2, %xmm0
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $3, %edx, %xmm5, %xmm0
+; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vinserti32x8 $1, %ymm8, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm8
+; AVX512DQ-NEXT: vextractf32x4 $3, %zmm3, %xmm2
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm4 = xmm2[1,1,3,3]
+; AVX512DQ-NEXT: vextractf32x4 $3, %zmm1, %xmm5
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; AVX512DQ-NEXT: vucomiss %xmm4, %xmm6
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vucomiss %xmm2, %xmm5
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmoval %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm4
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm6 = xmm2[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm7 = xmm5[1,0]
+; AVX512DQ-NEXT: vucomiss %xmm6, %xmm7
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX512DQ-NEXT: vucomiss %xmm2, %xmm5
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2
+; AVX512DQ-NEXT: vextractf32x4 $2, %zmm3, %xmm4
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; AVX512DQ-NEXT: vextractf32x4 $2, %zmm1, %xmm6
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; AVX512DQ-NEXT: vucomiss %xmm5, %xmm7
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vucomiss %xmm4, %xmm6
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmoval %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm5
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm0 = xmm6[1,0]
+; AVX512DQ-NEXT: vucomiss %xmm7, %xmm0
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm5, %xmm0
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm5 = xmm6[3,1,2,3]
+; AVX512DQ-NEXT: vucomiss %xmm4, %xmm5
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $3, %edx, %xmm0, %xmm0
+; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-NEXT: vextractf32x4 $1, %zmm3, %xmm0
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX512DQ-NEXT: vextractf32x4 $1, %zmm1, %xmm5
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; AVX512DQ-NEXT: vucomiss %xmm4, %xmm6
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vucomiss %xmm0, %xmm5
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmoval %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm4
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm7 = xmm5[1,0]
+; AVX512DQ-NEXT: vucomiss %xmm6, %xmm7
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX512DQ-NEXT: vucomiss %xmm0, %xmm5
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $3, %edx, %xmm4, %xmm0
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; AVX512DQ-NEXT: vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; AVX512DQ-NEXT: vucomiss %xmm4, %xmm5
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vucomiss %xmm3, %xmm1
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmoval %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm4
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm5 = xmm3[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm6 = xmm1[1,0]
+; AVX512DQ-NEXT: vucomiss %xmm5, %xmm6
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmoval %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; AVX512DQ-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX512DQ-NEXT: vucomiss %xmm3, %xmm1
+; AVX512DQ-NEXT: cmoval %ecx, %eax
+; AVX512DQ-NEXT: vpinsrd $3, %eax, %xmm4, %xmm1
+; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512DQ-NEXT: vinserti32x8 $1, %ymm2, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm8, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v32f32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm2, %xmm4
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm0, %xmm6
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; AVX512BW-NEXT: xorl %eax, %eax
+; AVX512BW-NEXT: vucomiss %xmm5, %xmm7
+; AVX512BW-NEXT: movl $-1, %ecx
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vucomiss %xmm4, %xmm6
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmoval %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm5
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm5, %xmm8
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm5 = xmm6[1,0]
+; AVX512BW-NEXT: vucomiss %xmm7, %xmm5
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm8, %xmm5
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[3,1,2,3]
+; AVX512BW-NEXT: vucomiss %xmm4, %xmm6
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $3, %edx, %xmm5, %xmm8
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm2, %xmm5
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm0, %xmm7
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm4 = xmm7[1,1,3,3]
+; AVX512BW-NEXT: vucomiss %xmm6, %xmm4
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vucomiss %xmm5, %xmm7
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmoval %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm4
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm4, %xmm9
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm7[1,0]
+; AVX512BW-NEXT: vucomiss %xmm6, %xmm4
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm9, %xmm4
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm6 = xmm7[3,1,2,3]
+; AVX512BW-NEXT: vucomiss %xmm5, %xmm6
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $3, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vinserti128 $1, %xmm8, %ymm4, %ymm8
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm2, %xmm5
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm0, %xmm7
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm4 = xmm7[1,1,3,3]
+; AVX512BW-NEXT: vucomiss %xmm6, %xmm4
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vucomiss %xmm5, %xmm7
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmoval %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm4
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm4, %xmm9
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm6 = xmm5[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm7[1,0]
+; AVX512BW-NEXT: vucomiss %xmm6, %xmm4
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm9, %xmm4
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm6 = xmm7[3,1,2,3]
+; AVX512BW-NEXT: vucomiss %xmm5, %xmm6
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $3, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; AVX512BW-NEXT: vucomiss %xmm5, %xmm6
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vucomiss %xmm2, %xmm0
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmoval %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm5
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm6 = xmm2[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm7 = xmm0[1,0]
+; AVX512BW-NEXT: vucomiss %xmm6, %xmm7
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512BW-NEXT: vucomiss %xmm2, %xmm0
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $3, %edx, %xmm5, %xmm0
+; AVX512BW-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm8
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm3, %xmm2
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm4 = xmm2[1,1,3,3]
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm1, %xmm5
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; AVX512BW-NEXT: vucomiss %xmm4, %xmm6
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vucomiss %xmm2, %xmm5
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmoval %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm4
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm6 = xmm2[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm7 = xmm5[1,0]
+; AVX512BW-NEXT: vucomiss %xmm6, %xmm7
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX512BW-NEXT: vucomiss %xmm2, %xmm5
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $3, %edx, %xmm4, %xmm2
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm3, %xmm4
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm1, %xmm6
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm7 = xmm6[1,1,3,3]
+; AVX512BW-NEXT: vucomiss %xmm5, %xmm7
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vucomiss %xmm4, %xmm6
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmoval %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm5
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm6[1,0]
+; AVX512BW-NEXT: vucomiss %xmm7, %xmm0
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm5, %xmm0
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm5 = xmm6[3,1,2,3]
+; AVX512BW-NEXT: vucomiss %xmm4, %xmm5
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $3, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm3, %xmm0
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm1, %xmm5
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm6 = xmm5[1,1,3,3]
+; AVX512BW-NEXT: vucomiss %xmm4, %xmm6
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vucomiss %xmm0, %xmm5
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmoval %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm4
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm7 = xmm5[1,0]
+; AVX512BW-NEXT: vucomiss %xmm6, %xmm7
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[3,1,2,3]
+; AVX512BW-NEXT: vucomiss %xmm0, %xmm5
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $3, %edx, %xmm4, %xmm0
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; AVX512BW-NEXT: vucomiss %xmm4, %xmm5
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vucomiss %xmm3, %xmm1
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmoval %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm4
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm5 = xmm3[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm6 = xmm1[1,0]
+; AVX512BW-NEXT: vucomiss %xmm5, %xmm6
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmoval %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; AVX512BW-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX512BW-NEXT: vucomiss %xmm3, %xmm1
+; AVX512BW-NEXT: cmoval %ecx, %eax
+; AVX512BW-NEXT: vpinsrd $3, %eax, %xmm4, %xmm1
+; AVX512BW-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
%1 = fcmp ogt <32 x float> %a0, %a1
ret <32 x i1> %1
}
@@ -2785,136 +3523,398 @@ define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v16i64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vextracti32x4 $3, %zmm2, %xmm4
-; AVX512-NEXT: vpextrq $1, %xmm4, %rcx
-; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm5
-; AVX512-NEXT: vpextrq $1, %xmm5, %rdx
-; AVX512-NEXT: xorl %eax, %eax
-; AVX512-NEXT: cmpq %rcx, %rdx
-; AVX512-NEXT: movq $-1, %rcx
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm6
-; AVX512-NEXT: vmovq %xmm4, %rdx
-; AVX512-NEXT: vmovq %xmm5, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
-; AVX512-NEXT: vextracti32x4 $2, %zmm2, %xmm5
-; AVX512-NEXT: vpextrq $1, %xmm5, %rdx
-; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm6
-; AVX512-NEXT: vpextrq $1, %xmm6, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm7
-; AVX512-NEXT: vmovq %xmm5, %rdx
-; AVX512-NEXT: vmovq %xmm6, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm7[0]
-; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
-; AVX512-NEXT: vextracti32x4 $1, %zmm2, %xmm5
-; AVX512-NEXT: vpextrq $1, %xmm5, %rdx
-; AVX512-NEXT: vextracti32x4 $1, %zmm0, %xmm6
-; AVX512-NEXT: vpextrq $1, %xmm6, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm7
-; AVX512-NEXT: vmovq %xmm5, %rdx
-; AVX512-NEXT: vmovq %xmm6, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm7[0]
-; AVX512-NEXT: vpextrq $1, %xmm2, %rdx
-; AVX512-NEXT: vpextrq $1, %xmm0, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm6
-; AVX512-NEXT: vmovq %xmm2, %rdx
-; AVX512-NEXT: vmovq %xmm0, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0]
-; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: vextracti32x4 $3, %zmm3, %xmm2
-; AVX512-NEXT: vpextrq $1, %xmm2, %rdx
-; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm4
-; AVX512-NEXT: vpextrq $1, %xmm4, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vmovq %xmm2, %rdx
-; AVX512-NEXT: vmovq %xmm4, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm2
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
-; AVX512-NEXT: vextracti32x4 $2, %zmm3, %xmm4
-; AVX512-NEXT: vpextrq $1, %xmm4, %rdx
-; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm5
-; AVX512-NEXT: vpextrq $1, %xmm5, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm6
-; AVX512-NEXT: vmovq %xmm4, %rdx
-; AVX512-NEXT: vmovq %xmm5, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
-; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
-; AVX512-NEXT: vextracti32x4 $1, %zmm3, %xmm4
-; AVX512-NEXT: vpextrq $1, %xmm4, %rdx
-; AVX512-NEXT: vextracti32x4 $1, %zmm1, %xmm5
-; AVX512-NEXT: vpextrq $1, %xmm5, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm6
-; AVX512-NEXT: vmovq %xmm4, %rdx
-; AVX512-NEXT: vmovq %xmm5, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
-; AVX512-NEXT: vpextrq $1, %xmm3, %rdx
-; AVX512-NEXT: vpextrq $1, %xmm1, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vmovq %xmm3, %rdx
-; AVX512-NEXT: vmovq %xmm1, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: cmovgq %rcx, %rax
-; AVX512-NEXT: vmovq %rax, %xmm1
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512-NEXT: vpmovqd %zmm1, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v16i64:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextracti32x4 $3, %zmm2, %xmm4
+; AVX512F-NEXT: vpextrq $1, %xmm4, %rcx
+; AVX512F-NEXT: vextracti32x4 $3, %zmm0, %xmm5
+; AVX512F-NEXT: vpextrq $1, %xmm5, %rdx
+; AVX512F-NEXT: xorl %eax, %eax
+; AVX512F-NEXT: cmpq %rcx, %rdx
+; AVX512F-NEXT: movq $-1, %rcx
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm6
+; AVX512F-NEXT: vmovq %xmm4, %rdx
+; AVX512F-NEXT: vmovq %xmm5, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
+; AVX512F-NEXT: vextracti32x4 $2, %zmm2, %xmm5
+; AVX512F-NEXT: vpextrq $1, %xmm5, %rdx
+; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm6
+; AVX512F-NEXT: vpextrq $1, %xmm6, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm7
+; AVX512F-NEXT: vmovq %xmm5, %rdx
+; AVX512F-NEXT: vmovq %xmm6, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm7[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX512F-NEXT: vextracti32x4 $1, %zmm2, %xmm5
+; AVX512F-NEXT: vpextrq $1, %xmm5, %rdx
+; AVX512F-NEXT: vextracti32x4 $1, %zmm0, %xmm6
+; AVX512F-NEXT: vpextrq $1, %xmm6, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm7
+; AVX512F-NEXT: vmovq %xmm5, %rdx
+; AVX512F-NEXT: vmovq %xmm6, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm7[0]
+; AVX512F-NEXT: vpextrq $1, %xmm2, %rdx
+; AVX512F-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm6
+; AVX512F-NEXT: vmovq %xmm2, %rdx
+; AVX512F-NEXT: vmovq %xmm0, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: vextracti32x4 $3, %zmm3, %xmm2
+; AVX512F-NEXT: vpextrq $1, %xmm2, %rdx
+; AVX512F-NEXT: vextracti32x4 $3, %zmm1, %xmm4
+; AVX512F-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vmovq %xmm2, %rdx
+; AVX512F-NEXT: vmovq %xmm4, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm2
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
+; AVX512F-NEXT: vextracti32x4 $2, %zmm3, %xmm4
+; AVX512F-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512F-NEXT: vextracti32x4 $2, %zmm1, %xmm5
+; AVX512F-NEXT: vpextrq $1, %xmm5, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm6
+; AVX512F-NEXT: vmovq %xmm4, %rdx
+; AVX512F-NEXT: vmovq %xmm5, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
+; AVX512F-NEXT: vextracti32x4 $1, %zmm3, %xmm4
+; AVX512F-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512F-NEXT: vextracti32x4 $1, %zmm1, %xmm5
+; AVX512F-NEXT: vpextrq $1, %xmm5, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm6
+; AVX512F-NEXT: vmovq %xmm4, %rdx
+; AVX512F-NEXT: vmovq %xmm5, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
+; AVX512F-NEXT: vpextrq $1, %xmm3, %rdx
+; AVX512F-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vmovq %xmm3, %rdx
+; AVX512F-NEXT: vmovq %xmm1, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: cmovgq %rcx, %rax
+; AVX512F-NEXT: vmovq %rax, %xmm1
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v16i64:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vextracti64x2 $3, %zmm2, %xmm4
+; AVX512DQ-NEXT: vpextrq $1, %xmm4, %rcx
+; AVX512DQ-NEXT: vextracti64x2 $3, %zmm0, %xmm5
+; AVX512DQ-NEXT: vpextrq $1, %xmm5, %rdx
+; AVX512DQ-NEXT: xorl %eax, %eax
+; AVX512DQ-NEXT: cmpq %rcx, %rdx
+; AVX512DQ-NEXT: movq $-1, %rcx
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm6
+; AVX512DQ-NEXT: vmovq %xmm4, %rdx
+; AVX512DQ-NEXT: vmovq %xmm5, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
+; AVX512DQ-NEXT: vextracti64x2 $2, %zmm2, %xmm5
+; AVX512DQ-NEXT: vpextrq $1, %xmm5, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $2, %zmm0, %xmm6
+; AVX512DQ-NEXT: vpextrq $1, %xmm6, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm7
+; AVX512DQ-NEXT: vmovq %xmm5, %rdx
+; AVX512DQ-NEXT: vmovq %xmm6, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm7[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX512DQ-NEXT: vextracti64x2 $1, %zmm2, %xmm5
+; AVX512DQ-NEXT: vpextrq $1, %xmm5, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $1, %zmm0, %xmm6
+; AVX512DQ-NEXT: vpextrq $1, %xmm6, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm7
+; AVX512DQ-NEXT: vmovq %xmm5, %rdx
+; AVX512DQ-NEXT: vmovq %xmm6, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm7[0]
+; AVX512DQ-NEXT: vpextrq $1, %xmm2, %rdx
+; AVX512DQ-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm6
+; AVX512DQ-NEXT: vmovq %xmm2, %rdx
+; AVX512DQ-NEXT: vmovq %xmm0, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT: vextracti64x2 $3, %zmm3, %xmm2
+; AVX512DQ-NEXT: vpextrq $1, %xmm2, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $3, %zmm1, %xmm4
+; AVX512DQ-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vmovq %xmm2, %rdx
+; AVX512DQ-NEXT: vmovq %xmm4, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm2
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
+; AVX512DQ-NEXT: vextracti64x2 $2, %zmm3, %xmm4
+; AVX512DQ-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $2, %zmm1, %xmm5
+; AVX512DQ-NEXT: vpextrq $1, %xmm5, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm6
+; AVX512DQ-NEXT: vmovq %xmm4, %rdx
+; AVX512DQ-NEXT: vmovq %xmm5, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
+; AVX512DQ-NEXT: vextracti64x2 $1, %zmm3, %xmm4
+; AVX512DQ-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $1, %zmm1, %xmm5
+; AVX512DQ-NEXT: vpextrq $1, %xmm5, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm6
+; AVX512DQ-NEXT: vmovq %xmm4, %rdx
+; AVX512DQ-NEXT: vmovq %xmm5, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
+; AVX512DQ-NEXT: vpextrq $1, %xmm3, %rdx
+; AVX512DQ-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vmovq %xmm3, %rdx
+; AVX512DQ-NEXT: vmovq %xmm1, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: cmovgq %rcx, %rax
+; AVX512DQ-NEXT: vmovq %rax, %xmm1
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v16i64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm2, %xmm4
+; AVX512BW-NEXT: vpextrq $1, %xmm4, %rcx
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm5
+; AVX512BW-NEXT: vpextrq $1, %xmm5, %rdx
+; AVX512BW-NEXT: xorl %eax, %eax
+; AVX512BW-NEXT: cmpq %rcx, %rdx
+; AVX512BW-NEXT: movq $-1, %rcx
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm6
+; AVX512BW-NEXT: vmovq %xmm4, %rdx
+; AVX512BW-NEXT: vmovq %xmm5, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm2, %xmm5
+; AVX512BW-NEXT: vpextrq $1, %xmm5, %rdx
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm6
+; AVX512BW-NEXT: vpextrq $1, %xmm6, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm7
+; AVX512BW-NEXT: vmovq %xmm5, %rdx
+; AVX512BW-NEXT: vmovq %xmm6, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm7[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm2, %xmm5
+; AVX512BW-NEXT: vpextrq $1, %xmm5, %rdx
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm6
+; AVX512BW-NEXT: vpextrq $1, %xmm6, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm7
+; AVX512BW-NEXT: vmovq %xmm5, %rdx
+; AVX512BW-NEXT: vmovq %xmm6, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm7[0]
+; AVX512BW-NEXT: vpextrq $1, %xmm2, %rdx
+; AVX512BW-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm6
+; AVX512BW-NEXT: vmovq %xmm2, %rdx
+; AVX512BW-NEXT: vmovq %xmm0, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm3, %xmm2
+; AVX512BW-NEXT: vpextrq $1, %xmm2, %rdx
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm1, %xmm4
+; AVX512BW-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vmovq %xmm2, %rdx
+; AVX512BW-NEXT: vmovq %xmm4, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm2
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm3, %xmm4
+; AVX512BW-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm1, %xmm5
+; AVX512BW-NEXT: vpextrq $1, %xmm5, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm6
+; AVX512BW-NEXT: vmovq %xmm4, %rdx
+; AVX512BW-NEXT: vmovq %xmm5, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm3, %xmm4
+; AVX512BW-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm1, %xmm5
+; AVX512BW-NEXT: vpextrq $1, %xmm5, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm6
+; AVX512BW-NEXT: vmovq %xmm4, %rdx
+; AVX512BW-NEXT: vmovq %xmm5, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
+; AVX512BW-NEXT: vpextrq $1, %xmm3, %rdx
+; AVX512BW-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vmovq %xmm3, %rdx
+; AVX512BW-NEXT: vmovq %xmm1, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: cmovgq %rcx, %rax
+; AVX512BW-NEXT: vmovq %rax, %xmm1
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i64> %a0, %a1
ret <16 x i1> %1
}
@@ -3252,223 +4252,660 @@ define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v32i32:
-; AVX512: # BB#0:
-; AVX512-NEXT: vextracti32x4 $3, %zmm2, %xmm4
-; AVX512-NEXT: vpextrd $1, %xmm4, %ecx
-; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm5
-; AVX512-NEXT: vpextrd $1, %xmm5, %edx
-; AVX512-NEXT: xorl %eax, %eax
-; AVX512-NEXT: cmpl %ecx, %edx
-; AVX512-NEXT: movl $-1, %ecx
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vmovd %xmm4, %esi
-; AVX512-NEXT: vmovd %xmm5, %edi
-; AVX512-NEXT: cmpl %esi, %edi
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmovgl %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm6
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
-; AVX512-NEXT: vpextrd $2, %xmm4, %edx
-; AVX512-NEXT: vpextrd $2, %xmm5, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
-; AVX512-NEXT: vpextrd $3, %xmm4, %edx
-; AVX512-NEXT: vpextrd $3, %xmm5, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4
-; AVX512-NEXT: vextracti32x4 $2, %zmm2, %xmm5
-; AVX512-NEXT: vpextrd $1, %xmm5, %edx
-; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm6
-; AVX512-NEXT: vpextrd $1, %xmm6, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vmovd %xmm5, %esi
-; AVX512-NEXT: vmovd %xmm6, %edi
-; AVX512-NEXT: cmpl %esi, %edi
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmovgl %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm7
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm7, %xmm7
-; AVX512-NEXT: vpextrd $2, %xmm5, %edx
-; AVX512-NEXT: vpextrd $2, %xmm6, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm7, %xmm7
-; AVX512-NEXT: vpextrd $3, %xmm5, %edx
-; AVX512-NEXT: vpextrd $3, %xmm6, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm7, %xmm5
-; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
-; AVX512-NEXT: vextracti32x4 $1, %zmm2, %xmm5
-; AVX512-NEXT: vpextrd $1, %xmm5, %edx
-; AVX512-NEXT: vextracti32x4 $1, %zmm0, %xmm6
-; AVX512-NEXT: vpextrd $1, %xmm6, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vmovd %xmm5, %esi
-; AVX512-NEXT: vmovd %xmm6, %edi
-; AVX512-NEXT: cmpl %esi, %edi
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmovgl %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm7
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm7, %xmm7
-; AVX512-NEXT: vpextrd $2, %xmm5, %edx
-; AVX512-NEXT: vpextrd $2, %xmm6, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm7, %xmm7
-; AVX512-NEXT: vpextrd $3, %xmm5, %edx
-; AVX512-NEXT: vpextrd $3, %xmm6, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm7, %xmm5
-; AVX512-NEXT: vpextrd $1, %xmm2, %edx
-; AVX512-NEXT: vpextrd $1, %xmm0, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vmovd %xmm2, %esi
-; AVX512-NEXT: vmovd %xmm0, %edi
-; AVX512-NEXT: cmpl %esi, %edi
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmovgl %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm6
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
-; AVX512-NEXT: vpextrd $2, %xmm2, %edx
-; AVX512-NEXT: vpextrd $2, %xmm0, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
-; AVX512-NEXT: vpextrd $3, %xmm2, %edx
-; AVX512-NEXT: vpextrd $3, %xmm0, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm6, %xmm0
-; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0
-; AVX512-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512-NEXT: vextracti32x4 $3, %zmm3, %xmm2
-; AVX512-NEXT: vpextrd $1, %xmm2, %edx
-; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm4
-; AVX512-NEXT: vpextrd $1, %xmm4, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vmovd %xmm2, %esi
-; AVX512-NEXT: vmovd %xmm4, %edi
-; AVX512-NEXT: cmpl %esi, %edi
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmovgl %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm5
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
-; AVX512-NEXT: vpextrd $2, %xmm2, %edx
-; AVX512-NEXT: vpextrd $2, %xmm4, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
-; AVX512-NEXT: vpextrd $3, %xmm2, %edx
-; AVX512-NEXT: vpextrd $3, %xmm4, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm5, %xmm2
-; AVX512-NEXT: vextracti32x4 $2, %zmm3, %xmm4
-; AVX512-NEXT: vpextrd $1, %xmm4, %edx
-; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm5
-; AVX512-NEXT: vpextrd $1, %xmm5, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vmovd %xmm4, %esi
-; AVX512-NEXT: vmovd %xmm5, %edi
-; AVX512-NEXT: cmpl %esi, %edi
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmovgl %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm6
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
-; AVX512-NEXT: vpextrd $2, %xmm4, %edx
-; AVX512-NEXT: vpextrd $2, %xmm5, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
-; AVX512-NEXT: vpextrd $3, %xmm4, %edx
-; AVX512-NEXT: vpextrd $3, %xmm5, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4
-; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
-; AVX512-NEXT: vextracti32x4 $1, %zmm3, %xmm4
-; AVX512-NEXT: vpextrd $1, %xmm4, %edx
-; AVX512-NEXT: vextracti32x4 $1, %zmm1, %xmm5
-; AVX512-NEXT: vpextrd $1, %xmm5, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vmovd %xmm4, %esi
-; AVX512-NEXT: vmovd %xmm5, %edi
-; AVX512-NEXT: cmpl %esi, %edi
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmovgl %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm6
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
-; AVX512-NEXT: vpextrd $2, %xmm4, %edx
-; AVX512-NEXT: vpextrd $2, %xmm5, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
-; AVX512-NEXT: vpextrd $3, %xmm4, %edx
-; AVX512-NEXT: vpextrd $3, %xmm5, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4
-; AVX512-NEXT: vpextrd $1, %xmm3, %edx
-; AVX512-NEXT: vpextrd $1, %xmm1, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vmovd %xmm3, %esi
-; AVX512-NEXT: vmovd %xmm1, %edi
-; AVX512-NEXT: cmpl %esi, %edi
-; AVX512-NEXT: movl $0, %esi
-; AVX512-NEXT: cmovgl %ecx, %esi
-; AVX512-NEXT: vmovd %esi, %xmm5
-; AVX512-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
-; AVX512-NEXT: vpextrd $2, %xmm3, %edx
-; AVX512-NEXT: vpextrd $2, %xmm1, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgl %ecx, %edx
-; AVX512-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
-; AVX512-NEXT: vpextrd $3, %xmm3, %edx
-; AVX512-NEXT: vpextrd $3, %xmm1, %esi
-; AVX512-NEXT: cmpl %edx, %esi
-; AVX512-NEXT: cmovgl %ecx, %eax
-; AVX512-NEXT: vpinsrd $3, %eax, %xmm5, %xmm1
-; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
-; AVX512-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v32i32:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextracti32x4 $3, %zmm2, %xmm4
+; AVX512F-NEXT: vpextrd $1, %xmm4, %ecx
+; AVX512F-NEXT: vextracti32x4 $3, %zmm0, %xmm5
+; AVX512F-NEXT: vpextrd $1, %xmm5, %edx
+; AVX512F-NEXT: xorl %eax, %eax
+; AVX512F-NEXT: cmpl %ecx, %edx
+; AVX512F-NEXT: movl $-1, %ecx
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vmovd %xmm4, %esi
+; AVX512F-NEXT: vmovd %xmm5, %edi
+; AVX512F-NEXT: cmpl %esi, %edi
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmovgl %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm6
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
+; AVX512F-NEXT: vpextrd $2, %xmm4, %edx
+; AVX512F-NEXT: vpextrd $2, %xmm5, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
+; AVX512F-NEXT: vpextrd $3, %xmm4, %edx
+; AVX512F-NEXT: vpextrd $3, %xmm5, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4
+; AVX512F-NEXT: vextracti32x4 $2, %zmm2, %xmm5
+; AVX512F-NEXT: vpextrd $1, %xmm5, %edx
+; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm6
+; AVX512F-NEXT: vpextrd $1, %xmm6, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vmovd %xmm5, %esi
+; AVX512F-NEXT: vmovd %xmm6, %edi
+; AVX512F-NEXT: cmpl %esi, %edi
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmovgl %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm7
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm7, %xmm7
+; AVX512F-NEXT: vpextrd $2, %xmm5, %edx
+; AVX512F-NEXT: vpextrd $2, %xmm6, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm7, %xmm7
+; AVX512F-NEXT: vpextrd $3, %xmm5, %edx
+; AVX512F-NEXT: vpextrd $3, %xmm6, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $3, %edx, %xmm7, %xmm5
+; AVX512F-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX512F-NEXT: vextracti32x4 $1, %zmm2, %xmm5
+; AVX512F-NEXT: vpextrd $1, %xmm5, %edx
+; AVX512F-NEXT: vextracti32x4 $1, %zmm0, %xmm6
+; AVX512F-NEXT: vpextrd $1, %xmm6, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vmovd %xmm5, %esi
+; AVX512F-NEXT: vmovd %xmm6, %edi
+; AVX512F-NEXT: cmpl %esi, %edi
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmovgl %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm7
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm7, %xmm7
+; AVX512F-NEXT: vpextrd $2, %xmm5, %edx
+; AVX512F-NEXT: vpextrd $2, %xmm6, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm7, %xmm7
+; AVX512F-NEXT: vpextrd $3, %xmm5, %edx
+; AVX512F-NEXT: vpextrd $3, %xmm6, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $3, %edx, %xmm7, %xmm5
+; AVX512F-NEXT: vpextrd $1, %xmm2, %edx
+; AVX512F-NEXT: vpextrd $1, %xmm0, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vmovd %xmm2, %esi
+; AVX512F-NEXT: vmovd %xmm0, %edi
+; AVX512F-NEXT: cmpl %esi, %edi
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmovgl %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm6
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
+; AVX512F-NEXT: vpextrd $2, %xmm2, %edx
+; AVX512F-NEXT: vpextrd $2, %xmm0, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
+; AVX512F-NEXT: vpextrd $3, %xmm2, %edx
+; AVX512F-NEXT: vpextrd $3, %xmm0, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $3, %edx, %xmm6, %xmm0
+; AVX512F-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vextracti32x4 $3, %zmm3, %xmm2
+; AVX512F-NEXT: vpextrd $1, %xmm2, %edx
+; AVX512F-NEXT: vextracti32x4 $3, %zmm1, %xmm4
+; AVX512F-NEXT: vpextrd $1, %xmm4, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vmovd %xmm2, %esi
+; AVX512F-NEXT: vmovd %xmm4, %edi
+; AVX512F-NEXT: cmpl %esi, %edi
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmovgl %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm5
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
+; AVX512F-NEXT: vpextrd $2, %xmm2, %edx
+; AVX512F-NEXT: vpextrd $2, %xmm4, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
+; AVX512F-NEXT: vpextrd $3, %xmm2, %edx
+; AVX512F-NEXT: vpextrd $3, %xmm4, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $3, %edx, %xmm5, %xmm2
+; AVX512F-NEXT: vextracti32x4 $2, %zmm3, %xmm4
+; AVX512F-NEXT: vpextrd $1, %xmm4, %edx
+; AVX512F-NEXT: vextracti32x4 $2, %zmm1, %xmm5
+; AVX512F-NEXT: vpextrd $1, %xmm5, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vmovd %xmm4, %esi
+; AVX512F-NEXT: vmovd %xmm5, %edi
+; AVX512F-NEXT: cmpl %esi, %edi
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmovgl %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm6
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
+; AVX512F-NEXT: vpextrd $2, %xmm4, %edx
+; AVX512F-NEXT: vpextrd $2, %xmm5, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
+; AVX512F-NEXT: vpextrd $3, %xmm4, %edx
+; AVX512F-NEXT: vpextrd $3, %xmm5, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4
+; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
+; AVX512F-NEXT: vextracti32x4 $1, %zmm3, %xmm4
+; AVX512F-NEXT: vpextrd $1, %xmm4, %edx
+; AVX512F-NEXT: vextracti32x4 $1, %zmm1, %xmm5
+; AVX512F-NEXT: vpextrd $1, %xmm5, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vmovd %xmm4, %esi
+; AVX512F-NEXT: vmovd %xmm5, %edi
+; AVX512F-NEXT: cmpl %esi, %edi
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmovgl %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm6
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
+; AVX512F-NEXT: vpextrd $2, %xmm4, %edx
+; AVX512F-NEXT: vpextrd $2, %xmm5, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
+; AVX512F-NEXT: vpextrd $3, %xmm4, %edx
+; AVX512F-NEXT: vpextrd $3, %xmm5, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4
+; AVX512F-NEXT: vpextrd $1, %xmm3, %edx
+; AVX512F-NEXT: vpextrd $1, %xmm1, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vmovd %xmm3, %esi
+; AVX512F-NEXT: vmovd %xmm1, %edi
+; AVX512F-NEXT: cmpl %esi, %edi
+; AVX512F-NEXT: movl $0, %esi
+; AVX512F-NEXT: cmovgl %ecx, %esi
+; AVX512F-NEXT: vmovd %esi, %xmm5
+; AVX512F-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
+; AVX512F-NEXT: vpextrd $2, %xmm3, %edx
+; AVX512F-NEXT: vpextrd $2, %xmm1, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgl %ecx, %edx
+; AVX512F-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
+; AVX512F-NEXT: vpextrd $3, %xmm3, %edx
+; AVX512F-NEXT: vpextrd $3, %xmm1, %esi
+; AVX512F-NEXT: cmpl %edx, %esi
+; AVX512F-NEXT: cmovgl %ecx, %eax
+; AVX512F-NEXT: vpinsrd $3, %eax, %xmm5, %xmm1
+; AVX512F-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v32i32:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vextracti32x4 $3, %zmm2, %xmm4
+; AVX512DQ-NEXT: vpextrd $1, %xmm4, %ecx
+; AVX512DQ-NEXT: vextracti32x4 $3, %zmm0, %xmm5
+; AVX512DQ-NEXT: vpextrd $1, %xmm5, %edx
+; AVX512DQ-NEXT: xorl %eax, %eax
+; AVX512DQ-NEXT: cmpl %ecx, %edx
+; AVX512DQ-NEXT: movl $-1, %ecx
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vmovd %xmm4, %esi
+; AVX512DQ-NEXT: vmovd %xmm5, %edi
+; AVX512DQ-NEXT: cmpl %esi, %edi
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmovgl %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm6
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
+; AVX512DQ-NEXT: vpextrd $2, %xmm4, %edx
+; AVX512DQ-NEXT: vpextrd $2, %xmm5, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
+; AVX512DQ-NEXT: vpextrd $3, %xmm4, %edx
+; AVX512DQ-NEXT: vpextrd $3, %xmm5, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4
+; AVX512DQ-NEXT: vextracti32x4 $2, %zmm2, %xmm5
+; AVX512DQ-NEXT: vpextrd $1, %xmm5, %edx
+; AVX512DQ-NEXT: vextracti32x4 $2, %zmm0, %xmm6
+; AVX512DQ-NEXT: vpextrd $1, %xmm6, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vmovd %xmm5, %esi
+; AVX512DQ-NEXT: vmovd %xmm6, %edi
+; AVX512DQ-NEXT: cmpl %esi, %edi
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmovgl %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm7
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm7, %xmm7
+; AVX512DQ-NEXT: vpextrd $2, %xmm5, %edx
+; AVX512DQ-NEXT: vpextrd $2, %xmm6, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm7, %xmm7
+; AVX512DQ-NEXT: vpextrd $3, %xmm5, %edx
+; AVX512DQ-NEXT: vpextrd $3, %xmm6, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $3, %edx, %xmm7, %xmm5
+; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX512DQ-NEXT: vextracti32x4 $1, %zmm2, %xmm5
+; AVX512DQ-NEXT: vpextrd $1, %xmm5, %edx
+; AVX512DQ-NEXT: vextracti32x4 $1, %zmm0, %xmm6
+; AVX512DQ-NEXT: vpextrd $1, %xmm6, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vmovd %xmm5, %esi
+; AVX512DQ-NEXT: vmovd %xmm6, %edi
+; AVX512DQ-NEXT: cmpl %esi, %edi
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmovgl %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm7
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm7, %xmm7
+; AVX512DQ-NEXT: vpextrd $2, %xmm5, %edx
+; AVX512DQ-NEXT: vpextrd $2, %xmm6, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm7, %xmm7
+; AVX512DQ-NEXT: vpextrd $3, %xmm5, %edx
+; AVX512DQ-NEXT: vpextrd $3, %xmm6, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $3, %edx, %xmm7, %xmm5
+; AVX512DQ-NEXT: vpextrd $1, %xmm2, %edx
+; AVX512DQ-NEXT: vpextrd $1, %xmm0, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vmovd %xmm2, %esi
+; AVX512DQ-NEXT: vmovd %xmm0, %edi
+; AVX512DQ-NEXT: cmpl %esi, %edi
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmovgl %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm6
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
+; AVX512DQ-NEXT: vpextrd $2, %xmm2, %edx
+; AVX512DQ-NEXT: vpextrd $2, %xmm0, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
+; AVX512DQ-NEXT: vpextrd $3, %xmm2, %edx
+; AVX512DQ-NEXT: vpextrd $3, %xmm0, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $3, %edx, %xmm6, %xmm0
+; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX512DQ-NEXT: vinserti32x8 $1, %ymm4, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vextracti32x4 $3, %zmm3, %xmm2
+; AVX512DQ-NEXT: vpextrd $1, %xmm2, %edx
+; AVX512DQ-NEXT: vextracti32x4 $3, %zmm1, %xmm4
+; AVX512DQ-NEXT: vpextrd $1, %xmm4, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vmovd %xmm2, %esi
+; AVX512DQ-NEXT: vmovd %xmm4, %edi
+; AVX512DQ-NEXT: cmpl %esi, %edi
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmovgl %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm5
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
+; AVX512DQ-NEXT: vpextrd $2, %xmm2, %edx
+; AVX512DQ-NEXT: vpextrd $2, %xmm4, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
+; AVX512DQ-NEXT: vpextrd $3, %xmm2, %edx
+; AVX512DQ-NEXT: vpextrd $3, %xmm4, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $3, %edx, %xmm5, %xmm2
+; AVX512DQ-NEXT: vextracti32x4 $2, %zmm3, %xmm4
+; AVX512DQ-NEXT: vpextrd $1, %xmm4, %edx
+; AVX512DQ-NEXT: vextracti32x4 $2, %zmm1, %xmm5
+; AVX512DQ-NEXT: vpextrd $1, %xmm5, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vmovd %xmm4, %esi
+; AVX512DQ-NEXT: vmovd %xmm5, %edi
+; AVX512DQ-NEXT: cmpl %esi, %edi
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmovgl %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm6
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
+; AVX512DQ-NEXT: vpextrd $2, %xmm4, %edx
+; AVX512DQ-NEXT: vpextrd $2, %xmm5, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
+; AVX512DQ-NEXT: vpextrd $3, %xmm4, %edx
+; AVX512DQ-NEXT: vpextrd $3, %xmm5, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4
+; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
+; AVX512DQ-NEXT: vextracti32x4 $1, %zmm3, %xmm4
+; AVX512DQ-NEXT: vpextrd $1, %xmm4, %edx
+; AVX512DQ-NEXT: vextracti32x4 $1, %zmm1, %xmm5
+; AVX512DQ-NEXT: vpextrd $1, %xmm5, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vmovd %xmm4, %esi
+; AVX512DQ-NEXT: vmovd %xmm5, %edi
+; AVX512DQ-NEXT: cmpl %esi, %edi
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmovgl %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm6
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
+; AVX512DQ-NEXT: vpextrd $2, %xmm4, %edx
+; AVX512DQ-NEXT: vpextrd $2, %xmm5, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
+; AVX512DQ-NEXT: vpextrd $3, %xmm4, %edx
+; AVX512DQ-NEXT: vpextrd $3, %xmm5, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4
+; AVX512DQ-NEXT: vpextrd $1, %xmm3, %edx
+; AVX512DQ-NEXT: vpextrd $1, %xmm1, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vmovd %xmm3, %esi
+; AVX512DQ-NEXT: vmovd %xmm1, %edi
+; AVX512DQ-NEXT: cmpl %esi, %edi
+; AVX512DQ-NEXT: movl $0, %esi
+; AVX512DQ-NEXT: cmovgl %ecx, %esi
+; AVX512DQ-NEXT: vmovd %esi, %xmm5
+; AVX512DQ-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
+; AVX512DQ-NEXT: vpextrd $2, %xmm3, %edx
+; AVX512DQ-NEXT: vpextrd $2, %xmm1, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgl %ecx, %edx
+; AVX512DQ-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
+; AVX512DQ-NEXT: vpextrd $3, %xmm3, %edx
+; AVX512DQ-NEXT: vpextrd $3, %xmm1, %esi
+; AVX512DQ-NEXT: cmpl %edx, %esi
+; AVX512DQ-NEXT: cmovgl %ecx, %eax
+; AVX512DQ-NEXT: vpinsrd $3, %eax, %xmm5, %xmm1
+; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vinserti32x8 $1, %ymm2, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v32i32:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm2, %xmm4
+; AVX512BW-NEXT: vpextrd $1, %xmm4, %ecx
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm5
+; AVX512BW-NEXT: vpextrd $1, %xmm5, %edx
+; AVX512BW-NEXT: xorl %eax, %eax
+; AVX512BW-NEXT: cmpl %ecx, %edx
+; AVX512BW-NEXT: movl $-1, %ecx
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vmovd %xmm4, %esi
+; AVX512BW-NEXT: vmovd %xmm5, %edi
+; AVX512BW-NEXT: cmpl %esi, %edi
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgl %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm6
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrd $2, %xmm4, %edx
+; AVX512BW-NEXT: vpextrd $2, %xmm5, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrd $3, %xmm4, %edx
+; AVX512BW-NEXT: vpextrd $3, %xmm5, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm2, %xmm5
+; AVX512BW-NEXT: vpextrd $1, %xmm5, %edx
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm6
+; AVX512BW-NEXT: vpextrd $1, %xmm6, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vmovd %xmm5, %esi
+; AVX512BW-NEXT: vmovd %xmm6, %edi
+; AVX512BW-NEXT: cmpl %esi, %edi
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgl %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm7
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrd $2, %xmm5, %edx
+; AVX512BW-NEXT: vpextrd $2, %xmm6, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrd $3, %xmm5, %edx
+; AVX512BW-NEXT: vpextrd $3, %xmm6, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $3, %edx, %xmm7, %xmm5
+; AVX512BW-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm2, %xmm5
+; AVX512BW-NEXT: vpextrd $1, %xmm5, %edx
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm6
+; AVX512BW-NEXT: vpextrd $1, %xmm6, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vmovd %xmm5, %esi
+; AVX512BW-NEXT: vmovd %xmm6, %edi
+; AVX512BW-NEXT: cmpl %esi, %edi
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgl %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm7
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrd $2, %xmm5, %edx
+; AVX512BW-NEXT: vpextrd $2, %xmm6, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrd $3, %xmm5, %edx
+; AVX512BW-NEXT: vpextrd $3, %xmm6, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $3, %edx, %xmm7, %xmm5
+; AVX512BW-NEXT: vpextrd $1, %xmm2, %edx
+; AVX512BW-NEXT: vpextrd $1, %xmm0, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vmovd %xmm2, %esi
+; AVX512BW-NEXT: vmovd %xmm0, %edi
+; AVX512BW-NEXT: cmpl %esi, %edi
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgl %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm6
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrd $2, %xmm2, %edx
+; AVX512BW-NEXT: vpextrd $2, %xmm0, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrd $3, %xmm2, %edx
+; AVX512BW-NEXT: vpextrd $3, %xmm0, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $3, %edx, %xmm6, %xmm0
+; AVX512BW-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm3, %xmm2
+; AVX512BW-NEXT: vpextrd $1, %xmm2, %edx
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm1, %xmm4
+; AVX512BW-NEXT: vpextrd $1, %xmm4, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vmovd %xmm2, %esi
+; AVX512BW-NEXT: vmovd %xmm4, %edi
+; AVX512BW-NEXT: cmpl %esi, %edi
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgl %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm5
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrd $2, %xmm2, %edx
+; AVX512BW-NEXT: vpextrd $2, %xmm4, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrd $3, %xmm2, %edx
+; AVX512BW-NEXT: vpextrd $3, %xmm4, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $3, %edx, %xmm5, %xmm2
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm3, %xmm4
+; AVX512BW-NEXT: vpextrd $1, %xmm4, %edx
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm1, %xmm5
+; AVX512BW-NEXT: vpextrd $1, %xmm5, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vmovd %xmm4, %esi
+; AVX512BW-NEXT: vmovd %xmm5, %edi
+; AVX512BW-NEXT: cmpl %esi, %edi
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgl %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm6
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrd $2, %xmm4, %edx
+; AVX512BW-NEXT: vpextrd $2, %xmm5, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrd $3, %xmm4, %edx
+; AVX512BW-NEXT: vpextrd $3, %xmm5, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm3, %xmm4
+; AVX512BW-NEXT: vpextrd $1, %xmm4, %edx
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm1, %xmm5
+; AVX512BW-NEXT: vpextrd $1, %xmm5, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vmovd %xmm4, %esi
+; AVX512BW-NEXT: vmovd %xmm5, %edi
+; AVX512BW-NEXT: cmpl %esi, %edi
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgl %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm6
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrd $2, %xmm4, %edx
+; AVX512BW-NEXT: vpextrd $2, %xmm5, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrd $3, %xmm4, %edx
+; AVX512BW-NEXT: vpextrd $3, %xmm5, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $3, %edx, %xmm6, %xmm4
+; AVX512BW-NEXT: vpextrd $1, %xmm3, %edx
+; AVX512BW-NEXT: vpextrd $1, %xmm1, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vmovd %xmm3, %esi
+; AVX512BW-NEXT: vmovd %xmm1, %edi
+; AVX512BW-NEXT: cmpl %esi, %edi
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgl %ecx, %esi
+; AVX512BW-NEXT: vmovd %esi, %xmm5
+; AVX512BW-NEXT: vpinsrd $1, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrd $2, %xmm3, %edx
+; AVX512BW-NEXT: vpextrd $2, %xmm1, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgl %ecx, %edx
+; AVX512BW-NEXT: vpinsrd $2, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrd $3, %xmm3, %edx
+; AVX512BW-NEXT: vpextrd $3, %xmm1, %esi
+; AVX512BW-NEXT: cmpl %edx, %esi
+; AVX512BW-NEXT: cmovgl %ecx, %eax
+; AVX512BW-NEXT: vpinsrd $3, %eax, %xmm5, %xmm1
+; AVX512BW-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovdw %zmm1, %ymm1
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i32> %a0, %a1
ret <32 x i1> %1
}
@@ -4342,291 +5779,987 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v64i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpcmpgtw %ymm7, %ymm3, %ymm3
-; AVX512-NEXT: vpmovsxwd %ymm3, %zmm3
-; AVX512-NEXT: vpslld $31, %zmm3, %zmm3
-; AVX512-NEXT: vptestmd %zmm3, %zmm3, %k0
-; AVX512-NEXT: kshiftlw $14, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: kshiftlw $15, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %ecx
-; AVX512-NEXT: vmovd %ecx, %xmm3
-; AVX512-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
-; AVX512-NEXT: kshiftlw $13, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
-; AVX512-NEXT: kshiftlw $12, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
-; AVX512-NEXT: kshiftlw $11, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
-; AVX512-NEXT: kshiftlw $10, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
-; AVX512-NEXT: kshiftlw $9, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
-; AVX512-NEXT: kshiftlw $8, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
-; AVX512-NEXT: kshiftlw $7, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
-; AVX512-NEXT: kshiftlw $6, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
-; AVX512-NEXT: kshiftlw $5, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
-; AVX512-NEXT: kshiftlw $4, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
-; AVX512-NEXT: kshiftlw $3, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
-; AVX512-NEXT: kshiftlw $2, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
-; AVX512-NEXT: kshiftlw $1, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
-; AVX512-NEXT: kshiftrw $15, %k0, %k0
-; AVX512-NEXT: kmovw %k0, %eax
-; AVX512-NEXT: vpinsrb $15, %eax, %xmm3, %xmm3
-; AVX512-NEXT: vpcmpgtw %ymm6, %ymm2, %ymm2
-; AVX512-NEXT: vpmovsxwd %ymm2, %zmm2
-; AVX512-NEXT: vpslld $31, %zmm2, %zmm2
-; AVX512-NEXT: vptestmd %zmm2, %zmm2, %k0
-; AVX512-NEXT: kshiftlw $14, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: kshiftlw $15, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %ecx
-; AVX512-NEXT: vmovd %ecx, %xmm2
-; AVX512-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
-; AVX512-NEXT: kshiftlw $13, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
-; AVX512-NEXT: kshiftlw $12, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
-; AVX512-NEXT: kshiftlw $11, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
-; AVX512-NEXT: kshiftlw $10, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
-; AVX512-NEXT: kshiftlw $9, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
-; AVX512-NEXT: kshiftlw $8, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
-; AVX512-NEXT: kshiftlw $7, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
-; AVX512-NEXT: kshiftlw $6, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
-; AVX512-NEXT: kshiftlw $5, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
-; AVX512-NEXT: kshiftlw $4, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
-; AVX512-NEXT: kshiftlw $3, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
-; AVX512-NEXT: kshiftlw $2, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
-; AVX512-NEXT: kshiftlw $1, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
-; AVX512-NEXT: kshiftrw $15, %k0, %k0
-; AVX512-NEXT: kmovw %k0, %eax
-; AVX512-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
-; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
-; AVX512-NEXT: vpsllw $7, %ymm2, %ymm2
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
-; AVX512-NEXT: vpand %ymm3, %ymm2, %ymm2
-; AVX512-NEXT: vpxor %ymm6, %ymm6, %ymm6
-; AVX512-NEXT: vpcmpgtb %ymm2, %ymm6, %ymm2
-; AVX512-NEXT: vpcmpgtw %ymm5, %ymm1, %ymm1
-; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
-; AVX512-NEXT: vpslld $31, %zmm1, %zmm1
-; AVX512-NEXT: vptestmd %zmm1, %zmm1, %k0
-; AVX512-NEXT: kshiftlw $14, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: kshiftlw $15, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %ecx
-; AVX512-NEXT: vmovd %ecx, %xmm1
-; AVX512-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX512-NEXT: kshiftlw $13, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX512-NEXT: kshiftlw $12, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX512-NEXT: kshiftlw $11, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX512-NEXT: kshiftlw $10, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX512-NEXT: kshiftlw $9, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX512-NEXT: kshiftlw $8, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX512-NEXT: kshiftlw $7, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX512-NEXT: kshiftlw $6, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX512-NEXT: kshiftlw $5, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX512-NEXT: kshiftlw $4, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX512-NEXT: kshiftlw $3, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX512-NEXT: kshiftlw $2, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX512-NEXT: kshiftlw $1, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX512-NEXT: kshiftrw $15, %k0, %k0
-; AVX512-NEXT: kmovw %k0, %eax
-; AVX512-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
-; AVX512-NEXT: vpcmpgtw %ymm4, %ymm0, %ymm0
-; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
-; AVX512-NEXT: vpslld $31, %zmm0, %zmm0
-; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
-; AVX512-NEXT: kshiftlw $14, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: kshiftlw $15, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %ecx
-; AVX512-NEXT: vmovd %ecx, %xmm0
-; AVX512-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX512-NEXT: kshiftlw $13, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
-; AVX512-NEXT: kshiftlw $12, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX512-NEXT: kshiftlw $11, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
-; AVX512-NEXT: kshiftlw $10, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX512-NEXT: kshiftlw $9, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
-; AVX512-NEXT: kshiftlw $8, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX512-NEXT: kshiftlw $7, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
-; AVX512-NEXT: kshiftlw $6, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX512-NEXT: kshiftlw $5, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
-; AVX512-NEXT: kshiftlw $4, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX512-NEXT: kshiftlw $3, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
-; AVX512-NEXT: kshiftlw $2, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX512-NEXT: kshiftlw $1, %k0, %k1
-; AVX512-NEXT: kshiftrw $15, %k1, %k1
-; AVX512-NEXT: kmovw %k1, %eax
-; AVX512-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
-; AVX512-NEXT: kshiftrw $15, %k0, %k0
-; AVX512-NEXT: kmovw %k0, %eax
-; AVX512-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
-; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT: vpsllw $7, %ymm0, %ymm0
-; AVX512-NEXT: vpand %ymm3, %ymm0, %ymm0
-; AVX512-NEXT: vpcmpgtb %ymm0, %ymm6, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
-; AVX512-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<kill>
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v64i16:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpcmpgtw %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT: vpmovsxwd %ymm3, %zmm3
+; AVX512F-NEXT: vpslld $31, %zmm3, %zmm3
+; AVX512F-NEXT: vptestmd %zmm3, %zmm3, %k0
+; AVX512F-NEXT: kshiftlw $14, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: kshiftlw $15, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %ecx
+; AVX512F-NEXT: vmovd %ecx, %xmm3
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: kshiftlw $13, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: kshiftlw $12, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: kshiftlw $11, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: kshiftlw $10, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: kshiftlw $9, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: kshiftlw $8, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: kshiftlw $7, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: kshiftlw $6, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: kshiftlw $5, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: kshiftlw $4, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: kshiftlw $3, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: kshiftlw $2, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: kshiftlw $1, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: kshiftrw $15, %k0, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: vpinsrb $15, %eax, %xmm3, %xmm3
+; AVX512F-NEXT: vpcmpgtw %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
+; AVX512F-NEXT: vpslld $31, %zmm2, %zmm2
+; AVX512F-NEXT: vptestmd %zmm2, %zmm2, %k0
+; AVX512F-NEXT: kshiftlw $14, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: kshiftlw $15, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %ecx
+; AVX512F-NEXT: vmovd %ecx, %xmm2
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $13, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $12, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $11, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $10, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $9, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $8, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $7, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $6, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $5, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $4, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $3, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $2, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftlw $1, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: kshiftrw $15, %k0, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpsllw $7, %ymm2, %ymm2
+; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT: vpxor %ymm6, %ymm6, %ymm6
+; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm6, %ymm2
+; AVX512F-NEXT: vpcmpgtw %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512F-NEXT: kshiftlw $14, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: kshiftlw $15, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %ecx
+; AVX512F-NEXT: vmovd %ecx, %xmm1
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $13, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $12, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $11, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $10, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $9, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $8, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $7, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $6, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $5, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $4, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $3, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $2, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftlw $1, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: kshiftrw $15, %k0, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX512F-NEXT: vpcmpgtw %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512F-NEXT: kshiftlw $14, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: kshiftlw $15, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %ecx
+; AVX512F-NEXT: vmovd %ecx, %xmm0
+; AVX512F-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $13, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $12, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $11, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $10, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $9, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $8, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $7, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $6, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $5, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $4, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $3, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $2, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftlw $1, %k0, %k1
+; AVX512F-NEXT: kshiftrw $15, %k1, %k1
+; AVX512F-NEXT: kmovw %k1, %eax
+; AVX512F-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: kshiftrw $15, %k0, %k0
+; AVX512F-NEXT: kmovw %k0, %eax
+; AVX512F-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT: vpcmpgtb %ymm0, %ymm6, %ymm0
+; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<kill>
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v64i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpcmpgtw %ymm7, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpmovsxwd %ymm3, %zmm3
+; AVX512DQ-NEXT: vpslld $31, %zmm3, %zmm3
+; AVX512DQ-NEXT: vptestmd %zmm3, %zmm3, %k0
+; AVX512DQ-NEXT: kshiftlw $14, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: kshiftlw $15, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %ecx
+; AVX512DQ-NEXT: vmovd %ecx, %xmm3
+; AVX512DQ-NEXT: vpinsrb $1, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $13, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $2, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $12, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $3, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $11, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $4, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $10, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $5, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $9, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $6, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $8, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $7, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $7, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $8, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $6, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $9, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $5, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $10, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $4, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $11, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $3, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $12, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $2, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $13, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftlw $1, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $14, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm3, %xmm3
+; AVX512DQ-NEXT: vpcmpgtw %ymm6, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpmovsxwd %ymm2, %zmm2
+; AVX512DQ-NEXT: vpslld $31, %zmm2, %zmm2
+; AVX512DQ-NEXT: vptestmd %zmm2, %zmm2, %k0
+; AVX512DQ-NEXT: kshiftlw $14, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: kshiftlw $15, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %ecx
+; AVX512DQ-NEXT: vmovd %ecx, %xmm2
+; AVX512DQ-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $13, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $12, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $11, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $10, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $9, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $8, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $7, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $6, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $5, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $4, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $3, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $2, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftlw $1, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm2, %xmm2
+; AVX512DQ-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpsllw $7, %ymm2, %ymm2
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
+; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpxor %ymm6, %ymm6, %ymm6
+; AVX512DQ-NEXT: vpcmpgtb %ymm2, %ymm6, %ymm2
+; AVX512DQ-NEXT: vpcmpgtw %ymm5, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512DQ-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512DQ-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512DQ-NEXT: kshiftlw $14, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: kshiftlw $15, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %ecx
+; AVX512DQ-NEXT: vmovd %ecx, %xmm1
+; AVX512DQ-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: kshiftlw $13, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: kshiftlw $12, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: kshiftlw $11, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: kshiftlw $10, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: kshiftlw $9, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: kshiftlw $8, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: kshiftlw $7, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: kshiftlw $6, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: kshiftlw $5, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: kshiftlw $4, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: kshiftlw $3, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: kshiftlw $2, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: kshiftlw $1, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpcmpgtw %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512DQ-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: kshiftlw $14, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: kshiftlw $15, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %ecx
+; AVX512DQ-NEXT: vmovd %ecx, %xmm0
+; AVX512DQ-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftlw $13, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftlw $12, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftlw $11, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftlw $10, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftlw $9, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftlw $8, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftlw $7, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftlw $6, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftlw $5, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftlw $4, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftlw $3, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftlw $2, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftlw $1, %k0, %k1
+; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %eax
+; AVX512DQ-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
+; AVX512DQ-NEXT: kmovw %k0, %eax
+; AVX512DQ-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $7, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpand %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpcmpgtb %ymm0, %ymm6, %ymm0
+; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<kill>
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v64i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm2, %xmm4
+; AVX512BW-NEXT: vpextrw $1, %xmm4, %ecx
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm5
+; AVX512BW-NEXT: vpextrw $1, %xmm5, %edx
+; AVX512BW-NEXT: xorl %eax, %eax
+; AVX512BW-NEXT: cmpw %cx, %dx
+; AVX512BW-NEXT: movw $-1, %cx
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vmovd %xmm4, %esi
+; AVX512BW-NEXT: vmovd %xmm5, %edi
+; AVX512BW-NEXT: cmpw %si, %di
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgw %cx, %si
+; AVX512BW-NEXT: vmovd %esi, %xmm6
+; AVX512BW-NEXT: vpinsrw $1, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $2, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $2, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $2, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $3, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $3, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $3, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $4, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $4, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $4, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $5, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $5, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $5, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $6, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $6, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $6, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $7, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $7, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $7, %edx, %xmm6, %xmm4
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm2, %xmm5
+; AVX512BW-NEXT: vpextrw $1, %xmm5, %edx
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm6
+; AVX512BW-NEXT: vpextrw $1, %xmm6, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vmovd %xmm5, %esi
+; AVX512BW-NEXT: vmovd %xmm6, %edi
+; AVX512BW-NEXT: cmpw %si, %di
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgw %cx, %si
+; AVX512BW-NEXT: vmovd %esi, %xmm7
+; AVX512BW-NEXT: vpinsrw $1, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrw $2, %xmm5, %edx
+; AVX512BW-NEXT: vpextrw $2, %xmm6, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $2, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrw $3, %xmm5, %edx
+; AVX512BW-NEXT: vpextrw $3, %xmm6, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $3, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrw $4, %xmm5, %edx
+; AVX512BW-NEXT: vpextrw $4, %xmm6, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $4, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrw $5, %xmm5, %edx
+; AVX512BW-NEXT: vpextrw $5, %xmm6, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $5, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrw $6, %xmm5, %edx
+; AVX512BW-NEXT: vpextrw $6, %xmm6, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $6, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrw $7, %xmm5, %edx
+; AVX512BW-NEXT: vpextrw $7, %xmm6, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $7, %edx, %xmm7, %xmm5
+; AVX512BW-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm2, %xmm5
+; AVX512BW-NEXT: vpextrw $1, %xmm5, %edx
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm6
+; AVX512BW-NEXT: vpextrw $1, %xmm6, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vmovd %xmm5, %esi
+; AVX512BW-NEXT: vmovd %xmm6, %edi
+; AVX512BW-NEXT: cmpw %si, %di
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgw %cx, %si
+; AVX512BW-NEXT: vmovd %esi, %xmm7
+; AVX512BW-NEXT: vpinsrw $1, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrw $2, %xmm5, %edx
+; AVX512BW-NEXT: vpextrw $2, %xmm6, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $2, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrw $3, %xmm5, %edx
+; AVX512BW-NEXT: vpextrw $3, %xmm6, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $3, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrw $4, %xmm5, %edx
+; AVX512BW-NEXT: vpextrw $4, %xmm6, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $4, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrw $5, %xmm5, %edx
+; AVX512BW-NEXT: vpextrw $5, %xmm6, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $5, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrw $6, %xmm5, %edx
+; AVX512BW-NEXT: vpextrw $6, %xmm6, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $6, %edx, %xmm7, %xmm7
+; AVX512BW-NEXT: vpextrw $7, %xmm5, %edx
+; AVX512BW-NEXT: vpextrw $7, %xmm6, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $7, %edx, %xmm7, %xmm5
+; AVX512BW-NEXT: vpextrw $1, %xmm2, %edx
+; AVX512BW-NEXT: vpextrw $1, %xmm0, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vmovd %xmm2, %esi
+; AVX512BW-NEXT: vmovd %xmm0, %edi
+; AVX512BW-NEXT: cmpw %si, %di
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgw %cx, %si
+; AVX512BW-NEXT: vmovd %esi, %xmm6
+; AVX512BW-NEXT: vpinsrw $1, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $2, %xmm2, %edx
+; AVX512BW-NEXT: vpextrw $2, %xmm0, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $2, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $3, %xmm2, %edx
+; AVX512BW-NEXT: vpextrw $3, %xmm0, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $3, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $4, %xmm2, %edx
+; AVX512BW-NEXT: vpextrw $4, %xmm0, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $4, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $5, %xmm2, %edx
+; AVX512BW-NEXT: vpextrw $5, %xmm0, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $5, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $6, %xmm2, %edx
+; AVX512BW-NEXT: vpextrw $6, %xmm0, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $6, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $7, %xmm2, %edx
+; AVX512BW-NEXT: vpextrw $7, %xmm0, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $7, %edx, %xmm6, %xmm0
+; AVX512BW-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm3, %xmm2
+; AVX512BW-NEXT: vpextrw $1, %xmm2, %edx
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm1, %xmm4
+; AVX512BW-NEXT: vpextrw $1, %xmm4, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vmovd %xmm2, %esi
+; AVX512BW-NEXT: vmovd %xmm4, %edi
+; AVX512BW-NEXT: cmpw %si, %di
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgw %cx, %si
+; AVX512BW-NEXT: vmovd %esi, %xmm5
+; AVX512BW-NEXT: vpinsrw $1, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrw $2, %xmm2, %edx
+; AVX512BW-NEXT: vpextrw $2, %xmm4, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $2, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrw $3, %xmm2, %edx
+; AVX512BW-NEXT: vpextrw $3, %xmm4, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $3, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrw $4, %xmm2, %edx
+; AVX512BW-NEXT: vpextrw $4, %xmm4, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $4, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrw $5, %xmm2, %edx
+; AVX512BW-NEXT: vpextrw $5, %xmm4, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $5, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrw $6, %xmm2, %edx
+; AVX512BW-NEXT: vpextrw $6, %xmm4, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $6, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrw $7, %xmm2, %edx
+; AVX512BW-NEXT: vpextrw $7, %xmm4, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $7, %edx, %xmm5, %xmm2
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm3, %xmm4
+; AVX512BW-NEXT: vpextrw $1, %xmm4, %edx
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm1, %xmm5
+; AVX512BW-NEXT: vpextrw $1, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vmovd %xmm4, %esi
+; AVX512BW-NEXT: vmovd %xmm5, %edi
+; AVX512BW-NEXT: cmpw %si, %di
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgw %cx, %si
+; AVX512BW-NEXT: vmovd %esi, %xmm6
+; AVX512BW-NEXT: vpinsrw $1, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $2, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $2, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $2, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $3, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $3, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $3, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $4, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $4, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $4, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $5, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $5, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $5, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $6, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $6, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $6, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $7, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $7, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $7, %edx, %xmm6, %xmm4
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm3, %xmm4
+; AVX512BW-NEXT: vpextrw $1, %xmm4, %edx
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm1, %xmm5
+; AVX512BW-NEXT: vpextrw $1, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vmovd %xmm4, %esi
+; AVX512BW-NEXT: vmovd %xmm5, %edi
+; AVX512BW-NEXT: cmpw %si, %di
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgw %cx, %si
+; AVX512BW-NEXT: vmovd %esi, %xmm6
+; AVX512BW-NEXT: vpinsrw $1, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $2, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $2, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $2, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $3, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $3, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $3, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $4, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $4, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $4, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $5, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $5, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $5, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $6, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $6, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $6, %edx, %xmm6, %xmm6
+; AVX512BW-NEXT: vpextrw $7, %xmm4, %edx
+; AVX512BW-NEXT: vpextrw $7, %xmm5, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $7, %edx, %xmm6, %xmm4
+; AVX512BW-NEXT: vpextrw $1, %xmm3, %edx
+; AVX512BW-NEXT: vpextrw $1, %xmm1, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vmovd %xmm3, %esi
+; AVX512BW-NEXT: vmovd %xmm1, %edi
+; AVX512BW-NEXT: cmpw %si, %di
+; AVX512BW-NEXT: movl $0, %esi
+; AVX512BW-NEXT: cmovgw %cx, %si
+; AVX512BW-NEXT: vmovd %esi, %xmm5
+; AVX512BW-NEXT: vpinsrw $1, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrw $2, %xmm3, %edx
+; AVX512BW-NEXT: vpextrw $2, %xmm1, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $2, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrw $3, %xmm3, %edx
+; AVX512BW-NEXT: vpextrw $3, %xmm1, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $3, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrw $4, %xmm3, %edx
+; AVX512BW-NEXT: vpextrw $4, %xmm1, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $4, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrw $5, %xmm3, %edx
+; AVX512BW-NEXT: vpextrw $5, %xmm1, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $5, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrw $6, %xmm3, %edx
+; AVX512BW-NEXT: vpextrw $6, %xmm1, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgw %cx, %dx
+; AVX512BW-NEXT: vpinsrw $6, %edx, %xmm5, %xmm5
+; AVX512BW-NEXT: vpextrw $7, %xmm3, %edx
+; AVX512BW-NEXT: vpextrw $7, %xmm1, %esi
+; AVX512BW-NEXT: cmpw %dx, %si
+; AVX512BW-NEXT: cmovgw %cx, %ax
+; AVX512BW-NEXT: vpinsrw $7, %eax, %xmm5, %xmm1
+; AVX512BW-NEXT: vinserti128 $1, %xmm4, %ymm1, %ymm1
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1
+; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: retq
%1 = icmp sgt <64 x i16> %a0, %a1
ret <64 x i1> %1
}
@@ -6240,50 +8373,103 @@ define <128 x i1> @test_cmp_v128i8(<128 x i8> %a0, <128 x i8> %a1) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v128i8:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
-; AVX512-NEXT: vpcmpgtb %ymm5, %ymm1, %ymm1
-; AVX512-NEXT: vpcmpgtb %ymm6, %ymm2, %ymm2
-; AVX512-NEXT: vpcmpgtb %ymm7, %ymm3, %ymm3
-; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm4
-; AVX512-NEXT: vpmovsxbd %xmm4, %zmm4
-; AVX512-NEXT: vpslld $31, %zmm4, %zmm4
-; AVX512-NEXT: vptestmd %zmm4, %zmm4, %k0
-; AVX512-NEXT: kmovw %k0, 14(%rdi)
-; AVX512-NEXT: vpmovsxbd %xmm3, %zmm3
-; AVX512-NEXT: vpslld $31, %zmm3, %zmm3
-; AVX512-NEXT: vptestmd %zmm3, %zmm3, %k0
-; AVX512-NEXT: kmovw %k0, 12(%rdi)
-; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512-NEXT: vpmovsxbd %xmm3, %zmm3
-; AVX512-NEXT: vpslld $31, %zmm3, %zmm3
-; AVX512-NEXT: vptestmd %zmm3, %zmm3, %k0
-; AVX512-NEXT: kmovw %k0, 10(%rdi)
-; AVX512-NEXT: vpmovsxbd %xmm2, %zmm2
-; AVX512-NEXT: vpslld $31, %zmm2, %zmm2
-; AVX512-NEXT: vptestmd %zmm2, %zmm2, %k0
-; AVX512-NEXT: kmovw %k0, 8(%rdi)
-; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
-; AVX512-NEXT: vpmovsxbd %xmm2, %zmm2
-; AVX512-NEXT: vpslld $31, %zmm2, %zmm2
-; AVX512-NEXT: vptestmd %zmm2, %zmm2, %k0
-; AVX512-NEXT: kmovw %k0, 6(%rdi)
-; AVX512-NEXT: vpmovsxbd %xmm1, %zmm1
-; AVX512-NEXT: vpslld $31, %zmm1, %zmm1
-; AVX512-NEXT: vptestmd %zmm1, %zmm1, %k0
-; AVX512-NEXT: kmovw %k0, 4(%rdi)
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpmovsxbd %xmm1, %zmm1
-; AVX512-NEXT: vpslld $31, %zmm1, %zmm1
-; AVX512-NEXT: vptestmd %zmm1, %zmm1, %k0
-; AVX512-NEXT: kmovw %k0, 2(%rdi)
-; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
-; AVX512-NEXT: vpslld $31, %zmm0, %zmm0
-; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k0
-; AVX512-NEXT: kmovw %k0, (%rdi)
-; AVX512-NEXT: movq %rdi, %rax
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v128i8:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT: vpcmpgtb %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT: vpcmpgtb %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT: vpcmpgtb %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT: vextracti128 $1, %ymm3, %xmm4
+; AVX512F-NEXT: vpmovsxbd %xmm4, %zmm4
+; AVX512F-NEXT: vpslld $31, %zmm4, %zmm4
+; AVX512F-NEXT: vptestmd %zmm4, %zmm4, %k0
+; AVX512F-NEXT: kmovw %k0, 14(%rdi)
+; AVX512F-NEXT: vpmovsxbd %xmm3, %zmm3
+; AVX512F-NEXT: vpslld $31, %zmm3, %zmm3
+; AVX512F-NEXT: vptestmd %zmm3, %zmm3, %k0
+; AVX512F-NEXT: kmovw %k0, 12(%rdi)
+; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX512F-NEXT: vpmovsxbd %xmm3, %zmm3
+; AVX512F-NEXT: vpslld $31, %zmm3, %zmm3
+; AVX512F-NEXT: vptestmd %zmm3, %zmm3, %k0
+; AVX512F-NEXT: kmovw %k0, 10(%rdi)
+; AVX512F-NEXT: vpmovsxbd %xmm2, %zmm2
+; AVX512F-NEXT: vpslld $31, %zmm2, %zmm2
+; AVX512F-NEXT: vptestmd %zmm2, %zmm2, %k0
+; AVX512F-NEXT: kmovw %k0, 8(%rdi)
+; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512F-NEXT: vpmovsxbd %xmm2, %zmm2
+; AVX512F-NEXT: vpslld $31, %zmm2, %zmm2
+; AVX512F-NEXT: vptestmd %zmm2, %zmm2, %k0
+; AVX512F-NEXT: kmovw %k0, 6(%rdi)
+; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1
+; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512F-NEXT: kmovw %k0, 4(%rdi)
+; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1
+; AVX512F-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512F-NEXT: kmovw %k0, 2(%rdi)
+; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512F-NEXT: kmovw %k0, (%rdi)
+; AVX512F-NEXT: movq %rdi, %rax
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v128i8:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpcmpgtb %ymm5, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpcmpgtb %ymm6, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpcmpgtb %ymm7, %ymm3, %ymm3
+; AVX512DQ-NEXT: vextracti128 $1, %ymm3, %xmm4
+; AVX512DQ-NEXT: vpmovsxbd %xmm4, %zmm4
+; AVX512DQ-NEXT: vpslld $31, %zmm4, %zmm4
+; AVX512DQ-NEXT: vptestmd %zmm4, %zmm4, %k0
+; AVX512DQ-NEXT: kmovw %k0, 14(%rdi)
+; AVX512DQ-NEXT: vpmovsxbd %xmm3, %zmm3
+; AVX512DQ-NEXT: vpslld $31, %zmm3, %zmm3
+; AVX512DQ-NEXT: vptestmd %zmm3, %zmm3, %k0
+; AVX512DQ-NEXT: kmovw %k0, 12(%rdi)
+; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
+; AVX512DQ-NEXT: vpmovsxbd %xmm3, %zmm3
+; AVX512DQ-NEXT: vpslld $31, %zmm3, %zmm3
+; AVX512DQ-NEXT: vptestmd %zmm3, %zmm3, %k0
+; AVX512DQ-NEXT: kmovw %k0, 10(%rdi)
+; AVX512DQ-NEXT: vpmovsxbd %xmm2, %zmm2
+; AVX512DQ-NEXT: vpslld $31, %zmm2, %zmm2
+; AVX512DQ-NEXT: vptestmd %zmm2, %zmm2, %k0
+; AVX512DQ-NEXT: kmovw %k0, 8(%rdi)
+; AVX512DQ-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512DQ-NEXT: vpmovsxbd %xmm2, %zmm2
+; AVX512DQ-NEXT: vpslld $31, %zmm2, %zmm2
+; AVX512DQ-NEXT: vptestmd %zmm2, %zmm2, %k0
+; AVX512DQ-NEXT: kmovw %k0, 6(%rdi)
+; AVX512DQ-NEXT: vpmovsxbd %xmm1, %zmm1
+; AVX512DQ-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512DQ-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512DQ-NEXT: kmovw %k0, 4(%rdi)
+; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512DQ-NEXT: vpmovsxbd %xmm1, %zmm1
+; AVX512DQ-NEXT: vpslld $31, %zmm1, %zmm1
+; AVX512DQ-NEXT: vptestmd %zmm1, %zmm1, %k0
+; AVX512DQ-NEXT: kmovw %k0, 2(%rdi)
+; AVX512DQ-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512DQ-NEXT: vpslld $31, %zmm0, %zmm0
+; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
+; AVX512DQ-NEXT: kmovw %k0, (%rdi)
+; AVX512DQ-NEXT: movq %rdi, %rax
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v128i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpcmpgtb %zmm3, %zmm1, %k0
+; AVX512BW-NEXT: vpcmpgtb %zmm2, %zmm0, %k1
+; AVX512BW-NEXT: vpmovm2b %k1, %zmm0
+; AVX512BW-NEXT: vpmovm2b %k0, %zmm1
+; AVX512BW-NEXT: retq
%1 = icmp sgt <128 x i8> %a0, %a1
ret <128 x i1> %1
}
@@ -6781,231 +8967,684 @@ define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v32f64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vextractf32x4 $3, %zmm4, %xmm8
-; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm9
-; AVX512-NEXT: xorl %eax, %eax
-; AVX512-NEXT: vucomisd %xmm8, %xmm9
-; AVX512-NEXT: movq $-1, %rcx
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm10
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm8 = xmm8[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm9 = xmm9[1,0]
-; AVX512-NEXT: vucomisd %xmm8, %xmm9
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm8
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm10[0],xmm8[0]
-; AVX512-NEXT: vextractf32x4 $2, %zmm4, %xmm9
-; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm10
-; AVX512-NEXT: vucomisd %xmm9, %xmm10
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm11
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm9 = xmm9[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm10 = xmm10[1,0]
-; AVX512-NEXT: vucomisd %xmm9, %xmm10
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm9
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm11[0],xmm9[0]
-; AVX512-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8
-; AVX512-NEXT: vextractf32x4 $1, %zmm4, %xmm9
-; AVX512-NEXT: vextractf32x4 $1, %zmm0, %xmm10
-; AVX512-NEXT: vucomisd %xmm9, %xmm10
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm11
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm9 = xmm9[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm10 = xmm10[1,0]
-; AVX512-NEXT: vucomisd %xmm9, %xmm10
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm9
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm11[0],xmm9[0]
-; AVX512-NEXT: vucomisd %xmm4, %xmm0
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm10
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX512-NEXT: vucomisd %xmm4, %xmm0
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm10[0],xmm0[0]
-; AVX512-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm8
-; AVX512-NEXT: vextractf32x4 $3, %zmm5, %xmm4
-; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm0
-; AVX512-NEXT: vucomisd %xmm4, %xmm0
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm9
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX512-NEXT: vucomisd %xmm4, %xmm0
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm0[0]
-; AVX512-NEXT: vextractf32x4 $2, %zmm5, %xmm4
-; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm0
-; AVX512-NEXT: vucomisd %xmm4, %xmm0
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm10
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX512-NEXT: vucomisd %xmm4, %xmm0
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm10[0],xmm0[0]
-; AVX512-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512-NEXT: vextractf32x4 $1, %zmm5, %xmm4
-; AVX512-NEXT: vextractf32x4 $1, %zmm1, %xmm0
-; AVX512-NEXT: vucomisd %xmm4, %xmm0
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm10
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; AVX512-NEXT: vucomisd %xmm4, %xmm0
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm10[0],xmm0[0]
-; AVX512-NEXT: vucomisd %xmm5, %xmm1
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; AVX512-NEXT: vucomisd %xmm5, %xmm1
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm1
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm4[0],xmm1[0]
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm0
-; AVX512-NEXT: vpmovdb %zmm0, %xmm8
-; AVX512-NEXT: vextractf32x4 $3, %zmm6, %xmm1
-; AVX512-NEXT: vextractf32x4 $3, %zmm2, %xmm4
-; AVX512-NEXT: vucomisd %xmm1, %xmm4
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT: vucomisd %xmm1, %xmm4
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm1
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm1[0]
-; AVX512-NEXT: vextractf32x4 $2, %zmm6, %xmm4
-; AVX512-NEXT: vextractf32x4 $2, %zmm2, %xmm5
-; AVX512-NEXT: vucomisd %xmm4, %xmm5
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
-; AVX512-NEXT: vucomisd %xmm4, %xmm5
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
-; AVX512-NEXT: vextractf32x4 $1, %zmm6, %xmm1
-; AVX512-NEXT: vextractf32x4 $1, %zmm2, %xmm4
-; AVX512-NEXT: vucomisd %xmm1, %xmm4
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT: vucomisd %xmm1, %xmm4
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm1
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm1[0]
-; AVX512-NEXT: vucomisd %xmm6, %xmm2
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm5 = xmm6[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; AVX512-NEXT: vucomisd %xmm5, %xmm2
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm2
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm4[0],xmm2[0]
-; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: vextractf32x4 $3, %zmm7, %xmm1
-; AVX512-NEXT: vextractf32x4 $3, %zmm3, %xmm2
-; AVX512-NEXT: vucomisd %xmm1, %xmm2
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; AVX512-NEXT: vucomisd %xmm1, %xmm2
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm1
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm4[0],xmm1[0]
-; AVX512-NEXT: vextractf32x4 $2, %zmm7, %xmm2
-; AVX512-NEXT: vextractf32x4 $2, %zmm3, %xmm4
-; AVX512-NEXT: vucomisd %xmm2, %xmm4
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT: vucomisd %xmm2, %xmm4
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm2
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm5[0],xmm2[0]
-; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
-; AVX512-NEXT: vextractf32x4 $1, %zmm7, %xmm2
-; AVX512-NEXT: vextractf32x4 $1, %zmm3, %xmm4
-; AVX512-NEXT: vucomisd %xmm2, %xmm4
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
-; AVX512-NEXT: vucomisd %xmm2, %xmm4
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm2
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm5[0],xmm2[0]
-; AVX512-NEXT: vucomisd %xmm7, %xmm3
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovaq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm5 = xmm7[1,0]
-; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
-; AVX512-NEXT: vucomisd %xmm5, %xmm3
-; AVX512-NEXT: cmovaq %rcx, %rax
-; AVX512-NEXT: vmovq %rax, %xmm3
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
-; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512-NEXT: vpmovqd %zmm1, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm8, %ymm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v32f64:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextractf32x4 $3, %zmm4, %xmm8
+; AVX512F-NEXT: vextractf32x4 $3, %zmm0, %xmm9
+; AVX512F-NEXT: xorl %eax, %eax
+; AVX512F-NEXT: vucomisd %xmm8, %xmm9
+; AVX512F-NEXT: movq $-1, %rcx
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm10
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm8 = xmm8[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm9 = xmm9[1,0]
+; AVX512F-NEXT: vucomisd %xmm8, %xmm9
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm8
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm10[0],xmm8[0]
+; AVX512F-NEXT: vextractf32x4 $2, %zmm4, %xmm9
+; AVX512F-NEXT: vextractf32x4 $2, %zmm0, %xmm10
+; AVX512F-NEXT: vucomisd %xmm9, %xmm10
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm11
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm9 = xmm9[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm10 = xmm10[1,0]
+; AVX512F-NEXT: vucomisd %xmm9, %xmm10
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm9
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm11[0],xmm9[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8
+; AVX512F-NEXT: vextractf32x4 $1, %zmm4, %xmm9
+; AVX512F-NEXT: vextractf32x4 $1, %zmm0, %xmm10
+; AVX512F-NEXT: vucomisd %xmm9, %xmm10
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm11
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm9 = xmm9[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm10 = xmm10[1,0]
+; AVX512F-NEXT: vucomisd %xmm9, %xmm10
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm9
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm11[0],xmm9[0]
+; AVX512F-NEXT: vucomisd %xmm4, %xmm0
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm10
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vucomisd %xmm4, %xmm0
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm10[0],xmm0[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm8
+; AVX512F-NEXT: vextractf32x4 $3, %zmm5, %xmm4
+; AVX512F-NEXT: vextractf32x4 $3, %zmm1, %xmm0
+; AVX512F-NEXT: vucomisd %xmm4, %xmm0
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm9
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vucomisd %xmm4, %xmm0
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm0[0]
+; AVX512F-NEXT: vextractf32x4 $2, %zmm5, %xmm4
+; AVX512F-NEXT: vextractf32x4 $2, %zmm1, %xmm0
+; AVX512F-NEXT: vucomisd %xmm4, %xmm0
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm10
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vucomisd %xmm4, %xmm0
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm10[0],xmm0[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512F-NEXT: vextractf32x4 $1, %zmm5, %xmm4
+; AVX512F-NEXT: vextractf32x4 $1, %zmm1, %xmm0
+; AVX512F-NEXT: vucomisd %xmm4, %xmm0
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm10
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512F-NEXT: vucomisd %xmm4, %xmm0
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm10[0],xmm0[0]
+; AVX512F-NEXT: vucomisd %xmm5, %xmm1
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512F-NEXT: vucomisd %xmm5, %xmm1
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm1
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm4[0],xmm1[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm8
+; AVX512F-NEXT: vextractf32x4 $3, %zmm6, %xmm1
+; AVX512F-NEXT: vextractf32x4 $3, %zmm2, %xmm4
+; AVX512F-NEXT: vucomisd %xmm1, %xmm4
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512F-NEXT: vucomisd %xmm1, %xmm4
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm1
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm1[0]
+; AVX512F-NEXT: vextractf32x4 $2, %zmm6, %xmm4
+; AVX512F-NEXT: vextractf32x4 $2, %zmm2, %xmm5
+; AVX512F-NEXT: vucomisd %xmm4, %xmm5
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512F-NEXT: vucomisd %xmm4, %xmm5
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512F-NEXT: vextractf32x4 $1, %zmm6, %xmm1
+; AVX512F-NEXT: vextractf32x4 $1, %zmm2, %xmm4
+; AVX512F-NEXT: vucomisd %xmm1, %xmm4
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512F-NEXT: vucomisd %xmm1, %xmm4
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm1
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm1[0]
+; AVX512F-NEXT: vucomisd %xmm6, %xmm2
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm5 = xmm6[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512F-NEXT: vucomisd %xmm5, %xmm2
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm2
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm4[0],xmm2[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: vextractf32x4 $3, %zmm7, %xmm1
+; AVX512F-NEXT: vextractf32x4 $3, %zmm3, %xmm2
+; AVX512F-NEXT: vucomisd %xmm1, %xmm2
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512F-NEXT: vucomisd %xmm1, %xmm2
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm1
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm4[0],xmm1[0]
+; AVX512F-NEXT: vextractf32x4 $2, %zmm7, %xmm2
+; AVX512F-NEXT: vextractf32x4 $2, %zmm3, %xmm4
+; AVX512F-NEXT: vucomisd %xmm2, %xmm4
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512F-NEXT: vucomisd %xmm2, %xmm4
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm2
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm5[0],xmm2[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512F-NEXT: vextractf32x4 $1, %zmm7, %xmm2
+; AVX512F-NEXT: vextractf32x4 $1, %zmm3, %xmm4
+; AVX512F-NEXT: vucomisd %xmm2, %xmm4
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512F-NEXT: vucomisd %xmm2, %xmm4
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm2
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm5[0],xmm2[0]
+; AVX512F-NEXT: vucomisd %xmm7, %xmm3
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovaq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm5 = xmm7[1,0]
+; AVX512F-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; AVX512F-NEXT: vucomisd %xmm5, %xmm3
+; AVX512F-NEXT: cmovaq %rcx, %rax
+; AVX512F-NEXT: vmovq %rax, %xmm3
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm8, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v32f64:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vextractf64x2 $3, %zmm4, %xmm8
+; AVX512DQ-NEXT: vextractf64x2 $3, %zmm0, %xmm9
+; AVX512DQ-NEXT: xorl %eax, %eax
+; AVX512DQ-NEXT: vucomisd %xmm8, %xmm9
+; AVX512DQ-NEXT: movq $-1, %rcx
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm10
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm8 = xmm8[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm9 = xmm9[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm8, %xmm9
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm8
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm10[0],xmm8[0]
+; AVX512DQ-NEXT: vextractf64x2 $2, %zmm4, %xmm9
+; AVX512DQ-NEXT: vextractf64x2 $2, %zmm0, %xmm10
+; AVX512DQ-NEXT: vucomisd %xmm9, %xmm10
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm11
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm9 = xmm9[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm10 = xmm10[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm9, %xmm10
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm9
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm11[0],xmm9[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8
+; AVX512DQ-NEXT: vextractf64x2 $1, %zmm4, %xmm9
+; AVX512DQ-NEXT: vextractf64x2 $1, %zmm0, %xmm10
+; AVX512DQ-NEXT: vucomisd %xmm9, %xmm10
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm11
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm9 = xmm9[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm10 = xmm10[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm9, %xmm10
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm9
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm11[0],xmm9[0]
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm0
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm10
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm0
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm10[0],xmm0[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm8
+; AVX512DQ-NEXT: vextractf64x2 $3, %zmm5, %xmm4
+; AVX512DQ-NEXT: vextractf64x2 $3, %zmm1, %xmm0
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm0
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm9
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm0
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm0[0]
+; AVX512DQ-NEXT: vextractf64x2 $2, %zmm5, %xmm4
+; AVX512DQ-NEXT: vextractf64x2 $2, %zmm1, %xmm0
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm0
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm10
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm0
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm10[0],xmm0[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512DQ-NEXT: vextractf64x2 $1, %zmm5, %xmm4
+; AVX512DQ-NEXT: vextractf64x2 $1, %zmm1, %xmm0
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm0
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm10
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm0
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm10[0],xmm0[0]
+; AVX512DQ-NEXT: vucomisd %xmm5, %xmm1
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm5, %xmm1
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm1
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm4[0],xmm1[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm8, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm8
+; AVX512DQ-NEXT: vextractf64x2 $3, %zmm6, %xmm1
+; AVX512DQ-NEXT: vextractf64x2 $3, %zmm2, %xmm4
+; AVX512DQ-NEXT: vucomisd %xmm1, %xmm4
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm1, %xmm4
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm1
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm1[0]
+; AVX512DQ-NEXT: vextractf64x2 $2, %zmm6, %xmm4
+; AVX512DQ-NEXT: vextractf64x2 $2, %zmm2, %xmm5
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm5
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm4, %xmm5
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vextractf64x2 $1, %zmm6, %xmm1
+; AVX512DQ-NEXT: vextractf64x2 $1, %zmm2, %xmm4
+; AVX512DQ-NEXT: vucomisd %xmm1, %xmm4
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm1, %xmm4
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm1
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm1[0]
+; AVX512DQ-NEXT: vucomisd %xmm6, %xmm2
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm5 = xmm6[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm5, %xmm2
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm2
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm4[0],xmm2[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT: vextractf64x2 $3, %zmm7, %xmm1
+; AVX512DQ-NEXT: vextractf64x2 $3, %zmm3, %xmm2
+; AVX512DQ-NEXT: vucomisd %xmm1, %xmm2
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm1, %xmm2
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm1
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm4[0],xmm1[0]
+; AVX512DQ-NEXT: vextractf64x2 $2, %zmm7, %xmm2
+; AVX512DQ-NEXT: vextractf64x2 $2, %zmm3, %xmm4
+; AVX512DQ-NEXT: vucomisd %xmm2, %xmm4
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm2, %xmm4
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm2
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm5[0],xmm2[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512DQ-NEXT: vextractf64x2 $1, %zmm7, %xmm2
+; AVX512DQ-NEXT: vextractf64x2 $1, %zmm3, %xmm4
+; AVX512DQ-NEXT: vucomisd %xmm2, %xmm4
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm2, %xmm4
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm2
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm5[0],xmm2[0]
+; AVX512DQ-NEXT: vucomisd %xmm7, %xmm3
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovaq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm5 = xmm7[1,0]
+; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; AVX512DQ-NEXT: vucomisd %xmm5, %xmm3
+; AVX512DQ-NEXT: cmovaq %rcx, %rax
+; AVX512DQ-NEXT: vmovq %rax, %xmm3
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512DQ-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512DQ-NEXT: vinserti32x8 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm8, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v32f64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm4, %xmm8
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm0, %xmm9
+; AVX512BW-NEXT: xorl %eax, %eax
+; AVX512BW-NEXT: vucomisd %xmm8, %xmm9
+; AVX512BW-NEXT: movq $-1, %rcx
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm10
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm8 = xmm8[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm9 = xmm9[1,0]
+; AVX512BW-NEXT: vucomisd %xmm8, %xmm9
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm8
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm10[0],xmm8[0]
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm4, %xmm9
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm0, %xmm10
+; AVX512BW-NEXT: vucomisd %xmm9, %xmm10
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm11
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm9 = xmm9[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm10 = xmm10[1,0]
+; AVX512BW-NEXT: vucomisd %xmm9, %xmm10
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm9
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm11[0],xmm9[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm4, %xmm9
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm0, %xmm10
+; AVX512BW-NEXT: vucomisd %xmm9, %xmm10
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm11
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm9 = xmm9[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm10 = xmm10[1,0]
+; AVX512BW-NEXT: vucomisd %xmm9, %xmm10
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm9
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm11[0],xmm9[0]
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm0
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm10
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm0
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm10[0],xmm0[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm8
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm5, %xmm4
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm1, %xmm0
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm0
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm9
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm0
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm0[0]
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm5, %xmm4
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm1, %xmm0
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm0
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm10
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm0
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm10[0],xmm0[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm5, %xmm4
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm1, %xmm0
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm0
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm10
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm0
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm10[0],xmm0[0]
+; AVX512BW-NEXT: vucomisd %xmm5, %xmm1
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512BW-NEXT: vucomisd %xmm5, %xmm1
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm1
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm4[0],xmm1[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm8
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm6, %xmm1
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm2, %xmm4
+; AVX512BW-NEXT: vucomisd %xmm1, %xmm4
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512BW-NEXT: vucomisd %xmm1, %xmm4
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm1
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm1[0]
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm6, %xmm4
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm2, %xmm5
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm5
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
+; AVX512BW-NEXT: vucomisd %xmm4, %xmm5
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm6, %xmm1
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm2, %xmm4
+; AVX512BW-NEXT: vucomisd %xmm1, %xmm4
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512BW-NEXT: vucomisd %xmm1, %xmm4
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm1
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm1[0]
+; AVX512BW-NEXT: vucomisd %xmm6, %xmm2
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm5 = xmm6[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512BW-NEXT: vucomisd %xmm5, %xmm2
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm2
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm4[0],xmm2[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm7, %xmm1
+; AVX512BW-NEXT: vextractf32x4 $3, %zmm3, %xmm2
+; AVX512BW-NEXT: vucomisd %xmm1, %xmm2
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512BW-NEXT: vucomisd %xmm1, %xmm2
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm1
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm4[0],xmm1[0]
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm7, %xmm2
+; AVX512BW-NEXT: vextractf32x4 $2, %zmm3, %xmm4
+; AVX512BW-NEXT: vucomisd %xmm2, %xmm4
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512BW-NEXT: vucomisd %xmm2, %xmm4
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm2
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm5[0],xmm2[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm7, %xmm2
+; AVX512BW-NEXT: vextractf32x4 $1, %zmm3, %xmm4
+; AVX512BW-NEXT: vucomisd %xmm2, %xmm4
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; AVX512BW-NEXT: vucomisd %xmm2, %xmm4
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm2
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm5[0],xmm2[0]
+; AVX512BW-NEXT: vucomisd %xmm7, %xmm3
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovaq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm5 = xmm7[1,0]
+; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; AVX512BW-NEXT: vucomisd %xmm5, %xmm3
+; AVX512BW-NEXT: cmovaq %rcx, %rax
+; AVX512BW-NEXT: vmovq %rax, %xmm3
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
+; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
%1 = fcmp ogt <32 x double> %a0, %a1
ret <32 x i1> %1
}
@@ -7639,263 +10278,780 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
-; AVX512-LABEL: test_cmp_v32i64:
-; AVX512: # BB#0:
-; AVX512-NEXT: vextracti32x4 $3, %zmm4, %xmm8
-; AVX512-NEXT: vpextrq $1, %xmm8, %rcx
-; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm9
-; AVX512-NEXT: vpextrq $1, %xmm9, %rdx
-; AVX512-NEXT: xorl %eax, %eax
-; AVX512-NEXT: cmpq %rcx, %rdx
-; AVX512-NEXT: movq $-1, %rcx
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm10
-; AVX512-NEXT: vmovq %xmm8, %rdx
-; AVX512-NEXT: vmovq %xmm9, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm8
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0]
-; AVX512-NEXT: vextracti32x4 $2, %zmm4, %xmm9
-; AVX512-NEXT: vpextrq $1, %xmm9, %rdx
-; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm10
-; AVX512-NEXT: vpextrq $1, %xmm10, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm11
-; AVX512-NEXT: vmovq %xmm9, %rdx
-; AVX512-NEXT: vmovq %xmm10, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm9
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
-; AVX512-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8
-; AVX512-NEXT: vextracti32x4 $1, %zmm4, %xmm9
-; AVX512-NEXT: vpextrq $1, %xmm9, %rdx
-; AVX512-NEXT: vextracti32x4 $1, %zmm0, %xmm10
-; AVX512-NEXT: vpextrq $1, %xmm10, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm11
-; AVX512-NEXT: vmovq %xmm9, %rdx
-; AVX512-NEXT: vmovq %xmm10, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm9
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
-; AVX512-NEXT: vpextrq $1, %xmm4, %rdx
-; AVX512-NEXT: vpextrq $1, %xmm0, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm10
-; AVX512-NEXT: vmovq %xmm4, %rdx
-; AVX512-NEXT: vmovq %xmm0, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0]
-; AVX512-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm8
-; AVX512-NEXT: vextracti32x4 $3, %zmm5, %xmm4
-; AVX512-NEXT: vpextrq $1, %xmm4, %rdx
-; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm0
-; AVX512-NEXT: vpextrq $1, %xmm0, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm9
-; AVX512-NEXT: vmovq %xmm4, %rdx
-; AVX512-NEXT: vmovq %xmm0, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm0[0],xmm9[0]
-; AVX512-NEXT: vextracti32x4 $2, %zmm5, %xmm4
-; AVX512-NEXT: vpextrq $1, %xmm4, %rdx
-; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm0
-; AVX512-NEXT: vpextrq $1, %xmm0, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm10
-; AVX512-NEXT: vmovq %xmm4, %rdx
-; AVX512-NEXT: vmovq %xmm0, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0]
-; AVX512-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
-; AVX512-NEXT: vextracti32x4 $1, %zmm5, %xmm0
-; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX512-NEXT: vextracti32x4 $1, %zmm1, %xmm4
-; AVX512-NEXT: vpextrq $1, %xmm4, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm10
-; AVX512-NEXT: vmovq %xmm0, %rdx
-; AVX512-NEXT: vmovq %xmm4, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0]
-; AVX512-NEXT: vpextrq $1, %xmm5, %rdx
-; AVX512-NEXT: vpextrq $1, %xmm1, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vmovq %xmm5, %rdx
-; AVX512-NEXT: vmovq %xmm1, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm1
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm0
-; AVX512-NEXT: vpmovdb %zmm0, %xmm8
-; AVX512-NEXT: vextracti32x4 $3, %zmm6, %xmm1
-; AVX512-NEXT: vpextrq $1, %xmm1, %rdx
-; AVX512-NEXT: vextracti32x4 $3, %zmm2, %xmm4
-; AVX512-NEXT: vpextrq $1, %xmm4, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vmovq %xmm1, %rdx
-; AVX512-NEXT: vmovq %xmm4, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm1
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
-; AVX512-NEXT: vextracti32x4 $2, %zmm6, %xmm4
-; AVX512-NEXT: vpextrq $1, %xmm4, %rdx
-; AVX512-NEXT: vextracti32x4 $2, %zmm2, %xmm5
-; AVX512-NEXT: vpextrq $1, %xmm5, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vmovq %xmm4, %rdx
-; AVX512-NEXT: vmovq %xmm5, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm4[0],xmm0[0]
-; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
-; AVX512-NEXT: vextracti32x4 $1, %zmm6, %xmm0
-; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX512-NEXT: vextracti32x4 $1, %zmm2, %xmm4
-; AVX512-NEXT: vpextrq $1, %xmm4, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vmovq %xmm0, %rdx
-; AVX512-NEXT: vmovq %xmm4, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
-; AVX512-NEXT: vpextrq $1, %xmm6, %rdx
-; AVX512-NEXT: vpextrq $1, %xmm2, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vmovq %xmm6, %rdx
-; AVX512-NEXT: vmovq %xmm2, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm2
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm1
-; AVX512-NEXT: vextracti32x4 $3, %zmm7, %xmm0
-; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX512-NEXT: vextracti32x4 $3, %zmm3, %xmm2
-; AVX512-NEXT: vpextrq $1, %xmm2, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vmovq %xmm0, %rdx
-; AVX512-NEXT: vmovq %xmm2, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; AVX512-NEXT: vextracti32x4 $2, %zmm7, %xmm2
-; AVX512-NEXT: vpextrq $1, %xmm2, %rdx
-; AVX512-NEXT: vextracti32x4 $2, %zmm3, %xmm4
-; AVX512-NEXT: vpextrq $1, %xmm4, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vmovq %xmm2, %rdx
-; AVX512-NEXT: vmovq %xmm4, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm2
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm2
-; AVX512-NEXT: vextracti32x4 $1, %zmm7, %xmm0
-; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
-; AVX512-NEXT: vextracti32x4 $1, %zmm3, %xmm4
-; AVX512-NEXT: vpextrq $1, %xmm4, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm5
-; AVX512-NEXT: vmovq %xmm0, %rdx
-; AVX512-NEXT: vmovq %xmm4, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm0
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
-; AVX512-NEXT: vpextrq $1, %xmm7, %rdx
-; AVX512-NEXT: vpextrq $1, %xmm3, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: movl $0, %edx
-; AVX512-NEXT: cmovgq %rcx, %rdx
-; AVX512-NEXT: vmovq %rdx, %xmm4
-; AVX512-NEXT: vmovq %xmm7, %rdx
-; AVX512-NEXT: vmovq %xmm3, %rsi
-; AVX512-NEXT: cmpq %rdx, %rsi
-; AVX512-NEXT: cmovgq %rcx, %rax
-; AVX512-NEXT: vmovq %rax, %xmm3
-; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
-; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm8, %ymm0
-; AVX512-NEXT: retq
+; AVX512F-LABEL: test_cmp_v32i64:
+; AVX512F: # BB#0:
+; AVX512F-NEXT: vextracti32x4 $3, %zmm4, %xmm8
+; AVX512F-NEXT: vpextrq $1, %xmm8, %rcx
+; AVX512F-NEXT: vextracti32x4 $3, %zmm0, %xmm9
+; AVX512F-NEXT: vpextrq $1, %xmm9, %rdx
+; AVX512F-NEXT: xorl %eax, %eax
+; AVX512F-NEXT: cmpq %rcx, %rdx
+; AVX512F-NEXT: movq $-1, %rcx
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm10
+; AVX512F-NEXT: vmovq %xmm8, %rdx
+; AVX512F-NEXT: vmovq %xmm9, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm8
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0]
+; AVX512F-NEXT: vextracti32x4 $2, %zmm4, %xmm9
+; AVX512F-NEXT: vpextrq $1, %xmm9, %rdx
+; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm10
+; AVX512F-NEXT: vpextrq $1, %xmm10, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm11
+; AVX512F-NEXT: vmovq %xmm9, %rdx
+; AVX512F-NEXT: vmovq %xmm10, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm9
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8
+; AVX512F-NEXT: vextracti32x4 $1, %zmm4, %xmm9
+; AVX512F-NEXT: vpextrq $1, %xmm9, %rdx
+; AVX512F-NEXT: vextracti32x4 $1, %zmm0, %xmm10
+; AVX512F-NEXT: vpextrq $1, %xmm10, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm11
+; AVX512F-NEXT: vmovq %xmm9, %rdx
+; AVX512F-NEXT: vmovq %xmm10, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm9
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
+; AVX512F-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512F-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm10
+; AVX512F-NEXT: vmovq %xmm4, %rdx
+; AVX512F-NEXT: vmovq %xmm0, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm8
+; AVX512F-NEXT: vextracti32x4 $3, %zmm5, %xmm4
+; AVX512F-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512F-NEXT: vextracti32x4 $3, %zmm1, %xmm0
+; AVX512F-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm9
+; AVX512F-NEXT: vmovq %xmm4, %rdx
+; AVX512F-NEXT: vmovq %xmm0, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm0[0],xmm9[0]
+; AVX512F-NEXT: vextracti32x4 $2, %zmm5, %xmm4
+; AVX512F-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512F-NEXT: vextracti32x4 $2, %zmm1, %xmm0
+; AVX512F-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm10
+; AVX512F-NEXT: vmovq %xmm4, %rdx
+; AVX512F-NEXT: vmovq %xmm0, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512F-NEXT: vextracti32x4 $1, %zmm5, %xmm0
+; AVX512F-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512F-NEXT: vextracti32x4 $1, %zmm1, %xmm4
+; AVX512F-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm10
+; AVX512F-NEXT: vmovq %xmm0, %rdx
+; AVX512F-NEXT: vmovq %xmm4, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0]
+; AVX512F-NEXT: vpextrq $1, %xmm5, %rdx
+; AVX512F-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vmovq %xmm5, %rdx
+; AVX512F-NEXT: vmovq %xmm1, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm1
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm8
+; AVX512F-NEXT: vextracti32x4 $3, %zmm6, %xmm1
+; AVX512F-NEXT: vpextrq $1, %xmm1, %rdx
+; AVX512F-NEXT: vextracti32x4 $3, %zmm2, %xmm4
+; AVX512F-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vmovq %xmm1, %rdx
+; AVX512F-NEXT: vmovq %xmm4, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm1
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; AVX512F-NEXT: vextracti32x4 $2, %zmm6, %xmm4
+; AVX512F-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512F-NEXT: vextracti32x4 $2, %zmm2, %xmm5
+; AVX512F-NEXT: vpextrq $1, %xmm5, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vmovq %xmm4, %rdx
+; AVX512F-NEXT: vmovq %xmm5, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm4[0],xmm0[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512F-NEXT: vextracti32x4 $1, %zmm6, %xmm0
+; AVX512F-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512F-NEXT: vextracti32x4 $1, %zmm2, %xmm4
+; AVX512F-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vmovq %xmm0, %rdx
+; AVX512F-NEXT: vmovq %xmm4, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
+; AVX512F-NEXT: vpextrq $1, %xmm6, %rdx
+; AVX512F-NEXT: vpextrq $1, %xmm2, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vmovq %xmm6, %rdx
+; AVX512F-NEXT: vmovq %xmm2, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm2
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm1
+; AVX512F-NEXT: vextracti32x4 $3, %zmm7, %xmm0
+; AVX512F-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512F-NEXT: vextracti32x4 $3, %zmm3, %xmm2
+; AVX512F-NEXT: vpextrq $1, %xmm2, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vmovq %xmm0, %rdx
+; AVX512F-NEXT: vmovq %xmm2, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; AVX512F-NEXT: vextracti32x4 $2, %zmm7, %xmm2
+; AVX512F-NEXT: vpextrq $1, %xmm2, %rdx
+; AVX512F-NEXT: vextracti32x4 $2, %zmm3, %xmm4
+; AVX512F-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vmovq %xmm2, %rdx
+; AVX512F-NEXT: vmovq %xmm4, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm2
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm2
+; AVX512F-NEXT: vextracti32x4 $1, %zmm7, %xmm0
+; AVX512F-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512F-NEXT: vextracti32x4 $1, %zmm3, %xmm4
+; AVX512F-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm5
+; AVX512F-NEXT: vmovq %xmm0, %rdx
+; AVX512F-NEXT: vmovq %xmm4, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm0
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
+; AVX512F-NEXT: vpextrq $1, %xmm7, %rdx
+; AVX512F-NEXT: vpextrq $1, %xmm3, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: movl $0, %edx
+; AVX512F-NEXT: cmovgq %rcx, %rdx
+; AVX512F-NEXT: vmovq %rdx, %xmm4
+; AVX512F-NEXT: vmovq %xmm7, %rdx
+; AVX512F-NEXT: vmovq %xmm3, %rsi
+; AVX512F-NEXT: cmpq %rdx, %rsi
+; AVX512F-NEXT: cmovgq %rcx, %rax
+; AVX512F-NEXT: vmovq %rax, %xmm3
+; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512F-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm8, %ymm0
+; AVX512F-NEXT: retq
+;
+; AVX512DQ-LABEL: test_cmp_v32i64:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vextracti64x2 $3, %zmm4, %xmm8
+; AVX512DQ-NEXT: vpextrq $1, %xmm8, %rcx
+; AVX512DQ-NEXT: vextracti64x2 $3, %zmm0, %xmm9
+; AVX512DQ-NEXT: vpextrq $1, %xmm9, %rdx
+; AVX512DQ-NEXT: xorl %eax, %eax
+; AVX512DQ-NEXT: cmpq %rcx, %rdx
+; AVX512DQ-NEXT: movq $-1, %rcx
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm10
+; AVX512DQ-NEXT: vmovq %xmm8, %rdx
+; AVX512DQ-NEXT: vmovq %xmm9, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm8
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0]
+; AVX512DQ-NEXT: vextracti64x2 $2, %zmm4, %xmm9
+; AVX512DQ-NEXT: vpextrq $1, %xmm9, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $2, %zmm0, %xmm10
+; AVX512DQ-NEXT: vpextrq $1, %xmm10, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm11
+; AVX512DQ-NEXT: vmovq %xmm9, %rdx
+; AVX512DQ-NEXT: vmovq %xmm10, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm9
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8
+; AVX512DQ-NEXT: vextracti64x2 $1, %zmm4, %xmm9
+; AVX512DQ-NEXT: vpextrq $1, %xmm9, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $1, %zmm0, %xmm10
+; AVX512DQ-NEXT: vpextrq $1, %xmm10, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm11
+; AVX512DQ-NEXT: vmovq %xmm9, %rdx
+; AVX512DQ-NEXT: vmovq %xmm10, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm9
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
+; AVX512DQ-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512DQ-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm10
+; AVX512DQ-NEXT: vmovq %xmm4, %rdx
+; AVX512DQ-NEXT: vmovq %xmm0, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm8
+; AVX512DQ-NEXT: vextracti64x2 $3, %zmm5, %xmm4
+; AVX512DQ-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $3, %zmm1, %xmm0
+; AVX512DQ-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm9
+; AVX512DQ-NEXT: vmovq %xmm4, %rdx
+; AVX512DQ-NEXT: vmovq %xmm0, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm0[0],xmm9[0]
+; AVX512DQ-NEXT: vextracti64x2 $2, %zmm5, %xmm4
+; AVX512DQ-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $2, %zmm1, %xmm0
+; AVX512DQ-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm10
+; AVX512DQ-NEXT: vmovq %xmm4, %rdx
+; AVX512DQ-NEXT: vmovq %xmm0, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512DQ-NEXT: vextracti64x2 $1, %zmm5, %xmm0
+; AVX512DQ-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $1, %zmm1, %xmm4
+; AVX512DQ-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm10
+; AVX512DQ-NEXT: vmovq %xmm0, %rdx
+; AVX512DQ-NEXT: vmovq %xmm4, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0]
+; AVX512DQ-NEXT: vpextrq $1, %xmm5, %rdx
+; AVX512DQ-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vmovq %xmm5, %rdx
+; AVX512DQ-NEXT: vmovq %xmm1, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm1
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm8, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm8
+; AVX512DQ-NEXT: vextracti64x2 $3, %zmm6, %xmm1
+; AVX512DQ-NEXT: vpextrq $1, %xmm1, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $3, %zmm2, %xmm4
+; AVX512DQ-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vmovq %xmm1, %rdx
+; AVX512DQ-NEXT: vmovq %xmm4, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm1
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; AVX512DQ-NEXT: vextracti64x2 $2, %zmm6, %xmm4
+; AVX512DQ-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $2, %zmm2, %xmm5
+; AVX512DQ-NEXT: vpextrq $1, %xmm5, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vmovq %xmm4, %rdx
+; AVX512DQ-NEXT: vmovq %xmm5, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm4[0],xmm0[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-NEXT: vextracti64x2 $1, %zmm6, %xmm0
+; AVX512DQ-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $1, %zmm2, %xmm4
+; AVX512DQ-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vmovq %xmm0, %rdx
+; AVX512DQ-NEXT: vmovq %xmm4, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
+; AVX512DQ-NEXT: vpextrq $1, %xmm6, %rdx
+; AVX512DQ-NEXT: vpextrq $1, %xmm2, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vmovq %xmm6, %rdx
+; AVX512DQ-NEXT: vmovq %xmm2, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm2
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm1
+; AVX512DQ-NEXT: vextracti64x2 $3, %zmm7, %xmm0
+; AVX512DQ-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $3, %zmm3, %xmm2
+; AVX512DQ-NEXT: vpextrq $1, %xmm2, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vmovq %xmm0, %rdx
+; AVX512DQ-NEXT: vmovq %xmm2, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; AVX512DQ-NEXT: vextracti64x2 $2, %zmm7, %xmm2
+; AVX512DQ-NEXT: vpextrq $1, %xmm2, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $2, %zmm3, %xmm4
+; AVX512DQ-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vmovq %xmm2, %rdx
+; AVX512DQ-NEXT: vmovq %xmm4, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm2
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm2
+; AVX512DQ-NEXT: vextracti64x2 $1, %zmm7, %xmm0
+; AVX512DQ-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512DQ-NEXT: vextracti64x2 $1, %zmm3, %xmm4
+; AVX512DQ-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm5
+; AVX512DQ-NEXT: vmovq %xmm0, %rdx
+; AVX512DQ-NEXT: vmovq %xmm4, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm0
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
+; AVX512DQ-NEXT: vpextrq $1, %xmm7, %rdx
+; AVX512DQ-NEXT: vpextrq $1, %xmm3, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: movl $0, %edx
+; AVX512DQ-NEXT: cmovgq %rcx, %rdx
+; AVX512DQ-NEXT: vmovq %rdx, %xmm4
+; AVX512DQ-NEXT: vmovq %xmm7, %rdx
+; AVX512DQ-NEXT: vmovq %xmm3, %rsi
+; AVX512DQ-NEXT: cmpq %rdx, %rsi
+; AVX512DQ-NEXT: cmovgq %rcx, %rax
+; AVX512DQ-NEXT: vmovq %rax, %xmm3
+; AVX512DQ-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512DQ-NEXT: vinserti32x8 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-NEXT: vpmovdb %zmm0, %xmm0
+; AVX512DQ-NEXT: vinserti128 $1, %xmm0, %ymm8, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: test_cmp_v32i64:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm4, %xmm8
+; AVX512BW-NEXT: vpextrq $1, %xmm8, %rcx
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm9
+; AVX512BW-NEXT: vpextrq $1, %xmm9, %rdx
+; AVX512BW-NEXT: xorl %eax, %eax
+; AVX512BW-NEXT: cmpq %rcx, %rdx
+; AVX512BW-NEXT: movq $-1, %rcx
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm10
+; AVX512BW-NEXT: vmovq %xmm8, %rdx
+; AVX512BW-NEXT: vmovq %xmm9, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm8
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0]
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm4, %xmm9
+; AVX512BW-NEXT: vpextrq $1, %xmm9, %rdx
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm10
+; AVX512BW-NEXT: vpextrq $1, %xmm10, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm11
+; AVX512BW-NEXT: vmovq %xmm9, %rdx
+; AVX512BW-NEXT: vmovq %xmm10, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm9
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm4, %xmm9
+; AVX512BW-NEXT: vpextrq $1, %xmm9, %rdx
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm10
+; AVX512BW-NEXT: vpextrq $1, %xmm10, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm11
+; AVX512BW-NEXT: vmovq %xmm9, %rdx
+; AVX512BW-NEXT: vmovq %xmm10, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm9
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
+; AVX512BW-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512BW-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm10
+; AVX512BW-NEXT: vmovq %xmm4, %rdx
+; AVX512BW-NEXT: vmovq %xmm0, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm8
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm5, %xmm4
+; AVX512BW-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm1, %xmm0
+; AVX512BW-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm9
+; AVX512BW-NEXT: vmovq %xmm4, %rdx
+; AVX512BW-NEXT: vmovq %xmm0, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm0[0],xmm9[0]
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm5, %xmm4
+; AVX512BW-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm1, %xmm0
+; AVX512BW-NEXT: vpextrq $1, %xmm0, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm10
+; AVX512BW-NEXT: vmovq %xmm4, %rdx
+; AVX512BW-NEXT: vmovq %xmm0, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm9, %ymm0, %ymm9
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm5, %xmm0
+; AVX512BW-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm1, %xmm4
+; AVX512BW-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm10
+; AVX512BW-NEXT: vmovq %xmm0, %rdx
+; AVX512BW-NEXT: vmovq %xmm4, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm10[0]
+; AVX512BW-NEXT: vpextrq $1, %xmm5, %rdx
+; AVX512BW-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vmovq %xmm5, %rdx
+; AVX512BW-NEXT: vmovq %xmm1, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm1
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm8
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm6, %xmm1
+; AVX512BW-NEXT: vpextrq $1, %xmm1, %rdx
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm2, %xmm4
+; AVX512BW-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vmovq %xmm1, %rdx
+; AVX512BW-NEXT: vmovq %xmm4, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm1
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm6, %xmm4
+; AVX512BW-NEXT: vpextrq $1, %xmm4, %rdx
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm2, %xmm5
+; AVX512BW-NEXT: vpextrq $1, %xmm5, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vmovq %xmm4, %rdx
+; AVX512BW-NEXT: vmovq %xmm5, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm4[0],xmm0[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm6, %xmm0
+; AVX512BW-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm2, %xmm4
+; AVX512BW-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vmovq %xmm0, %rdx
+; AVX512BW-NEXT: vmovq %xmm4, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
+; AVX512BW-NEXT: vpextrq $1, %xmm6, %rdx
+; AVX512BW-NEXT: vpextrq $1, %xmm2, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vmovq %xmm6, %rdx
+; AVX512BW-NEXT: vmovq %xmm2, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm2
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm1
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm7, %xmm0
+; AVX512BW-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm3, %xmm2
+; AVX512BW-NEXT: vpextrq $1, %xmm2, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vmovq %xmm0, %rdx
+; AVX512BW-NEXT: vmovq %xmm2, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm7, %xmm2
+; AVX512BW-NEXT: vpextrq $1, %xmm2, %rdx
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm3, %xmm4
+; AVX512BW-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vmovq %xmm2, %rdx
+; AVX512BW-NEXT: vmovq %xmm4, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm2
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm5[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm2
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm7, %xmm0
+; AVX512BW-NEXT: vpextrq $1, %xmm0, %rdx
+; AVX512BW-NEXT: vextracti32x4 $1, %zmm3, %xmm4
+; AVX512BW-NEXT: vpextrq $1, %xmm4, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm5
+; AVX512BW-NEXT: vmovq %xmm0, %rdx
+; AVX512BW-NEXT: vmovq %xmm4, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm0
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
+; AVX512BW-NEXT: vpextrq $1, %xmm7, %rdx
+; AVX512BW-NEXT: vpextrq $1, %xmm3, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: movl $0, %edx
+; AVX512BW-NEXT: cmovgq %rcx, %rdx
+; AVX512BW-NEXT: vmovq %rdx, %xmm4
+; AVX512BW-NEXT: vmovq %xmm7, %rdx
+; AVX512BW-NEXT: vmovq %xmm3, %rsi
+; AVX512BW-NEXT: cmpq %rdx, %rsi
+; AVX512BW-NEXT: cmovgq %rcx, %rax
+; AVX512BW-NEXT: vmovq %rax, %xmm3
+; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; AVX512BW-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm8, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i64> %a0, %a1
ret <32 x i1> %1
}
diff --git a/test/CodeGen/X86/vector-sext.ll b/test/CodeGen/X86/vector-sext.ll
index 39fbc7611de86..774d615ae8962 100644
--- a/test/CodeGen/X86/vector-sext.ll
+++ b/test/CodeGen/X86/vector-sext.ll
@@ -1244,8 +1244,7 @@ define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512F-NEXT: retq
;
@@ -1253,8 +1252,7 @@ define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512BW-NEXT: retq
;
@@ -1435,8 +1433,7 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512F-NEXT: retq
@@ -1445,8 +1442,7 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512BW-NEXT: retq
@@ -1642,8 +1638,7 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512F-NEXT: retq
;
@@ -1651,8 +1646,7 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
; AVX512BW-NEXT: retq
;
@@ -1945,8 +1939,7 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
; AVX512F-NEXT: retq
;
@@ -1954,8 +1947,7 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: vpmovqw %zmm0, %xmm0
; AVX512BW-NEXT: retq
;
@@ -2348,8 +2340,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: retq
;
@@ -2357,8 +2348,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
-; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: retq
;
@@ -2860,8 +2850,7 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
; AVX512-LABEL: load_sext_16i1_to_16i8:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: kmovw (%rdi), %k1
-; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: retq
;
@@ -3398,8 +3387,7 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
; AVX512-LABEL: load_sext_16i1_to_16i16:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: kmovw (%rdi), %k1
-; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: retq
;
@@ -4244,12 +4232,11 @@ define <32 x i8> @load_sext_32i1_to_32i8(<32 x i1> *%ptr) nounwind readnone {
; AVX512: # BB#0: # %entry
; AVX512-NEXT: kmovw (%rdi), %k1
; AVX512-NEXT: kmovw 2(%rdi), %k2
-; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1} {z}
-; AVX512-NEXT: vpmovdb %zmm1, %xmm1
-; AVX512-NEXT: vmovdqa32 %zmm0, %zmm0 {%k2} {z}
+; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512-NEXT: vpmovdb %zmm0, %xmm0
-; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; AVX512-NEXT: vpmovdb %zmm1, %xmm1
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_32i1_to_32i8:
diff --git a/test/CodeGen/X86/vector-shift-ashr-128.ll b/test/CodeGen/X86/vector-shift-ashr-128.ll
index 27b65b829923e..440faa689fb8a 100644
--- a/test/CodeGen/X86/vector-shift-ashr-128.ll
+++ b/test/CodeGen/X86/vector-shift-ashr-128.ll
@@ -5,6 +5,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
@@ -321,13 +322,22 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
-; AVX512-LABEL: var_shift_v8i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; AVX512-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v8i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX512DQ-NEXT: vpsravd %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: var_shift_v8i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v8i16:
; X32-SSE: # BB#0:
@@ -499,30 +509,10 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
;
; AVX512-LABEL: var_shift_v16i8:
; AVX512: # BB#0:
-; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512-NEXT: vpsraw $4, %xmm3, %xmm4
-; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX512-NEXT: vpsraw $2, %xmm3, %xmm4
-; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX512-NEXT: vpsraw $1, %xmm3, %xmm4
-; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
-; AVX512-NEXT: vpsrlw $8, %xmm2, %xmm2
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512-NEXT: vpsraw $4, %xmm0, %xmm3
-; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX512-NEXT: vpsraw $2, %xmm0, %xmm3
-; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX512-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX512-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512-NEXT: vpsravd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v16i8:
@@ -911,30 +901,10 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512-LABEL: splatvar_shift_v16i8:
; AVX512: # BB#0:
; AVX512-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512-NEXT: vpsraw $4, %xmm3, %xmm4
-; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX512-NEXT: vpsraw $2, %xmm3, %xmm4
-; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX512-NEXT: vpsraw $1, %xmm3, %xmm4
-; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
-; AVX512-NEXT: vpsrlw $8, %xmm2, %xmm2
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512-NEXT: vpsraw $4, %xmm0, %xmm3
-; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX512-NEXT: vpsraw $2, %xmm0, %xmm3
-; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX512-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX512-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512-NEXT: vpsravd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v16i8:
@@ -1221,13 +1191,21 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
-; AVX512-LABEL: constant_shift_v8i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
-; AVX512-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v8i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v8i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
+; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v8i16:
; X32-SSE: # BB#0:
@@ -1384,31 +1362,9 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
;
; AVX512-LABEL: constant_shift_v16i8:
; AVX512: # BB#0:
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX512-NEXT: vpsraw $4, %xmm3, %xmm4
-; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX512-NEXT: vpsraw $2, %xmm3, %xmm4
-; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm3
-; AVX512-NEXT: vpsraw $1, %xmm3, %xmm4
-; AVX512-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vpblendvb %xmm2, %xmm4, %xmm3, %xmm2
-; AVX512-NEXT: vpsrlw $8, %xmm2, %xmm2
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; AVX512-NEXT: vpsraw $4, %xmm0, %xmm3
-; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX512-NEXT: vpsraw $2, %xmm0, %xmm3
-; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX512-NEXT: vpsraw $1, %xmm0, %xmm3
-; AVX512-NEXT: vpaddw %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX512-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
+; AVX512-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v16i8:
diff --git a/test/CodeGen/X86/vector-shift-ashr-256.ll b/test/CodeGen/X86/vector-shift-ashr-256.ll
index ee1879b6696e6..79902acfec24c 100644
--- a/test/CodeGen/X86/vector-shift-ashr-256.ll
+++ b/test/CodeGen/X86/vector-shift-ashr-256.ll
@@ -3,6 +3,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
; Variable Shifts
@@ -212,13 +213,21 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: var_shift_v16i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; AVX512-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v16i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512DQ-NEXT: vpsravd %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: var_shift_v16i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
%shift = ashr <16 x i16> %a, %b
ret <16 x i16> %shift
}
@@ -331,33 +340,41 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: var_shift_v32i8:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512-NEXT: vpsraw $4, %ymm3, %ymm4
-; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; AVX512-NEXT: vpsraw $2, %ymm3, %ymm4
-; AVX512-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; AVX512-NEXT: vpsraw $1, %ymm3, %ymm4
-; AVX512-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
-; AVX512-NEXT: vpsrlw $8, %ymm2, %ymm2
-; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512-NEXT: vpsraw $4, %ymm0, %ymm3
-; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512-NEXT: vpsraw $2, %ymm0, %ymm3
-; AVX512-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512-NEXT: vpsraw $1, %ymm0, %ymm3
-; AVX512-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX512-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v32i8:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512DQ-NEXT: vpsraw $4, %ymm3, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsraw $2, %ymm3, %ymm4
+; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsraw $1, %ymm3, %ymm4
+; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: var_shift_v32i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
+; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
%shift = ashr <32 x i8> %a, %b
ret <32 x i8> %shift
}
@@ -608,34 +625,43 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: splatvar_shift_v32i8:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512-NEXT: vpsraw $4, %ymm3, %ymm4
-; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; AVX512-NEXT: vpsraw $2, %ymm3, %ymm4
-; AVX512-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; AVX512-NEXT: vpsraw $1, %ymm3, %ymm4
-; AVX512-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
-; AVX512-NEXT: vpsrlw $8, %ymm2, %ymm2
-; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512-NEXT: vpsraw $4, %ymm0, %ymm3
-; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512-NEXT: vpsraw $2, %ymm0, %ymm3
-; AVX512-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512-NEXT: vpsraw $1, %ymm0, %ymm3
-; AVX512-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX512-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: splatvar_shift_v32i8:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512DQ-NEXT: vpsraw $4, %ymm3, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsraw $2, %ymm3, %ymm4
+; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsraw $1, %ymm3, %ymm4
+; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: splatvar_shift_v32i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = ashr <32 x i8> %a, %splat
ret <32 x i8> %shift
@@ -804,13 +830,20 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: constant_shift_v16i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v16i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v16i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
%shift = ashr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <16 x i16> %shift
}
@@ -913,34 +946,41 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: constant_shift_v32i8:
-; AVX512: # BB#0:
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512-NEXT: vpsraw $4, %ymm3, %ymm4
-; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; AVX512-NEXT: vpsraw $2, %ymm3, %ymm4
-; AVX512-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
-; AVX512-NEXT: vpsraw $1, %ymm3, %ymm4
-; AVX512-NEXT: vpaddw %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
-; AVX512-NEXT: vpsrlw $8, %ymm2, %ymm2
-; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; AVX512-NEXT: vpsraw $4, %ymm0, %ymm3
-; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512-NEXT: vpsraw $2, %ymm0, %ymm3
-; AVX512-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512-NEXT: vpsraw $1, %ymm0, %ymm3
-; AVX512-NEXT: vpaddw %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlw $8, %ymm0, %ymm0
-; AVX512-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v32i8:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512DQ-NEXT: vpsraw $4, %ymm3, %ymm4
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsraw $2, %ymm3, %ymm4
+; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpsraw $1, %ymm3, %ymm4
+; AVX512DQ-NEXT: vpaddw %ymm2, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm3, %ymm2
+; AVX512DQ-NEXT: vpsrlw $8, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
+; AVX512DQ-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512DQ-NEXT: vpsraw $4, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsraw $2, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsraw $1, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpaddw %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm3, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $8, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v32i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm0
+; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
%shift = ashr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
diff --git a/test/CodeGen/X86/vector-shift-ashr-512.ll b/test/CodeGen/X86/vector-shift-ashr-512.ll
index 1280641c557b9..2c9e433cfb2ca 100644
--- a/test/CodeGen/X86/vector-shift-ashr-512.ll
+++ b/test/CodeGen/X86/vector-shift-ashr-512.ll
@@ -26,25 +26,14 @@ define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512DQ-LABEL: var_shift_v32i16:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15]
-; AVX512DQ-NEXT: vpsravd %ymm5, %ymm6, %ymm5
-; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11]
-; AVX512DQ-NEXT: vpsravd %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15]
-; AVX512DQ-NEXT: vpsravd %ymm2, %ymm5, %ymm2
-; AVX512DQ-NEXT: vpsrld $16, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11]
-; AVX512DQ-NEXT: vpsravd %ymm3, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512DQ-NEXT: vpsravd %zmm2, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
+; AVX512DQ-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512DQ-NEXT: vpsravd %zmm2, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpmovdw %zmm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v32i16:
@@ -1025,24 +1014,13 @@ define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) nounwind {
define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
; AVX512DQ-LABEL: constant_shift_v32i16:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
-; AVX512DQ-NEXT: vpsravd %ymm4, %ymm5, %ymm5
-; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
-; AVX512DQ-NEXT: vpsravd %ymm3, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
-; AVX512DQ-NEXT: vpsravd %ymm4, %ymm5, %ymm4
-; AVX512DQ-NEXT: vpsrld $16, %ymm4, %ymm4
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
-; AVX512DQ-NEXT: vpsravd %ymm3, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpackusdw %ymm4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVX512DQ-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-NEXT: vpsravd %zmm2, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: vpmovsxwd %ymm1, %zmm1
+; AVX512DQ-NEXT: vpsravd %zmm2, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpmovdw %zmm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v32i16:
diff --git a/test/CodeGen/X86/vector-shift-lshr-128.ll b/test/CodeGen/X86/vector-shift-lshr-128.ll
index 42488f2ec3a75..a7e1a531b6598 100644
--- a/test/CodeGen/X86/vector-shift-lshr-128.ll
+++ b/test/CodeGen/X86/vector-shift-lshr-128.ll
@@ -5,6 +5,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
@@ -290,13 +291,22 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
-; AVX512-LABEL: var_shift_v8i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; AVX512-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v8i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512DQ-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: var_shift_v8i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v8i16:
; X32-SSE: # BB#0:
@@ -417,18 +427,10 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
;
; AVX512-LABEL: var_shift_v16i8:
; AVX512: # BB#0:
-; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $2, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v16i8:
@@ -701,18 +703,10 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512-LABEL: splatvar_shift_v16i8:
; AVX512: # BB#0:
; AVX512-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $2, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v16i8:
@@ -955,13 +949,21 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
-; AVX512-LABEL: constant_shift_v8i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
-; AVX512-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v8i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512DQ-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v8i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v8i16:
; X32-SSE: # BB#0:
@@ -1064,19 +1066,9 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
;
; AVX512-LABEL: constant_shift_v16i8:
; AVX512: # BB#0:
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $2, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpsrlw $1, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v16i8:
diff --git a/test/CodeGen/X86/vector-shift-lshr-256.ll b/test/CodeGen/X86/vector-shift-lshr-256.ll
index 5223d7bba353d..25667e7d1661a 100644
--- a/test/CodeGen/X86/vector-shift-lshr-256.ll
+++ b/test/CodeGen/X86/vector-shift-lshr-256.ll
@@ -3,6 +3,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
; Variable Shifts
@@ -189,13 +190,21 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: var_shift_v16i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; AVX512-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v16i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: var_shift_v16i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
%shift = lshr <16 x i16> %a, %b
ret <16 x i16> %shift
}
@@ -275,21 +284,29 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: var_shift_v32i8:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v32i8:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: var_shift_v32i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
%shift = lshr <32 x i8> %a, %b
ret <32 x i8> %shift
}
@@ -490,22 +507,31 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: splatvar_shift_v32i8:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: splatvar_shift_v32i8:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: splatvar_shift_v32i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = lshr <32 x i8> %a, %splat
ret <32 x i8> %shift
@@ -659,13 +685,20 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: constant_shift_v16i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v16i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v16i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
%shift = lshr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <16 x i16> %shift
}
@@ -739,22 +772,29 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: constant_shift_v32i8:
-; AVX512: # BB#0:
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlw $2, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpsrlw $1, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v32i8:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v32i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
%shift = lshr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
diff --git a/test/CodeGen/X86/vector-shift-lshr-512.ll b/test/CodeGen/X86/vector-shift-lshr-512.ll
index 4c3caf329fb75..3da8f9437e575 100644
--- a/test/CodeGen/X86/vector-shift-lshr-512.ll
+++ b/test/CodeGen/X86/vector-shift-lshr-512.ll
@@ -27,25 +27,14 @@ define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512DQ-LABEL: var_shift_v32i16:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15]
-; AVX512DQ-NEXT: vpsrlvd %ymm5, %ymm6, %ymm5
-; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11]
-; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15]
-; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm5, %ymm2
-; AVX512DQ-NEXT: vpsrld $16, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11]
-; AVX512DQ-NEXT: vpsrlvd %ymm3, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512DQ-NEXT: vpsrlvd %zmm2, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpmovdw %zmm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v32i16:
@@ -988,24 +977,13 @@ define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) nounwind {
define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
; AVX512DQ-LABEL: constant_shift_v32i16:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
-; AVX512DQ-NEXT: vpsrlvd %ymm4, %ymm5, %ymm5
-; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
-; AVX512DQ-NEXT: vpsrlvd %ymm3, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
-; AVX512DQ-NEXT: vpsrlvd %ymm4, %ymm5, %ymm4
-; AVX512DQ-NEXT: vpsrld $16, %ymm4, %ymm4
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
-; AVX512DQ-NEXT: vpsrlvd %ymm3, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpackusdw %ymm4, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512DQ-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512DQ-NEXT: vpsrlvd %zmm2, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpmovdw %zmm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v32i16:
diff --git a/test/CodeGen/X86/vector-shift-shl-128.ll b/test/CodeGen/X86/vector-shift-shl-128.ll
index 5c89949e924b1..8706078b40c99 100644
--- a/test/CodeGen/X86/vector-shift-shl-128.ll
+++ b/test/CodeGen/X86/vector-shift-shl-128.ll
@@ -5,6 +5,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
@@ -245,13 +246,22 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
; XOP-NEXT: retq
;
-; AVX512-LABEL: var_shift_v8i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; AVX512-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v8i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX512DQ-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: var_shift_v8i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v8i16:
; X32-SSE: # BB#0:
@@ -367,17 +377,10 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
;
; AVX512-LABEL: var_shift_v16i8:
; AVX512: # BB#0:
-; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX512-NEXT: vpsllw $4, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpsllw $2, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpaddb %xmm0, %xmm0, %xmm2
-; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: retq
;
; X32-SSE-LABEL: var_shift_v16i8:
@@ -642,17 +645,10 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX512-LABEL: splatvar_shift_v16i8:
; AVX512: # BB#0:
; AVX512-NEXT: vpbroadcastb %xmm1, %xmm1
-; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX512-NEXT: vpsllw $4, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpsllw $2, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpaddb %xmm0, %xmm0, %xmm2
-; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
+; AVX512-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: retq
;
; X32-SSE-LABEL: splatvar_shift_v16i8:
@@ -827,13 +823,18 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
-; AVX512-LABEL: constant_shift_v8i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
-; AVX512-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v8i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v8i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v8i16:
; X32-SSE: # BB#0:
@@ -919,18 +920,9 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
;
; AVX512-LABEL: constant_shift_v16i8:
; AVX512: # BB#0:
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
-; AVX512-NEXT: vpsllw $4, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpsllw $2, %xmm0, %xmm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
-; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
-; AVX512-NEXT: vpaddb %xmm0, %xmm0, %xmm2
-; AVX512-NEXT: vpaddb %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
+; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
+; AVX512-NEXT: vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: vpmovdb %zmm0, %xmm0
; AVX512-NEXT: retq
;
; X32-SSE-LABEL: constant_shift_v16i8:
diff --git a/test/CodeGen/X86/vector-shift-shl-256.ll b/test/CodeGen/X86/vector-shift-shl-256.ll
index eb52ae3ccaca0..a1ef2791c1b03 100644
--- a/test/CodeGen/X86/vector-shift-shl-256.ll
+++ b/test/CodeGen/X86/vector-shift-shl-256.ll
@@ -3,6 +3,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
;
@@ -164,13 +165,21 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: var_shift_v16i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; AVX512-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v16i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: var_shift_v16i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
%shift = shl <16 x i16> %a, %b
ret <16 x i16> %shift
}
@@ -240,20 +249,28 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: var_shift_v32i8:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512-NEXT: vpsllw $4, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpsllw $2, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpaddb %ymm0, %ymm0, %ymm2
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: var_shift_v32i8:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: var_shift_v32i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
%shift = shl <32 x i8> %a, %b
ret <32 x i8> %shift
}
@@ -446,21 +463,30 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: splatvar_shift_v32i8:
-; AVX512: # BB#0:
-; AVX512-NEXT: vpbroadcastb %xmm1, %ymm1
-; AVX512-NEXT: vpsllw $4, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpsllw $2, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpaddb %ymm0, %ymm0, %ymm2
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: splatvar_shift_v32i8:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: splatvar_shift_v32i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = shl <32 x i8> %a, %splat
ret <32 x i8> %shift
@@ -571,13 +597,18 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; XOPAVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: constant_shift_v16i16:
-; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
-; AVX512-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v16i16:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v16i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
+; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: retq
%shift = shl <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <16 x i16> %shift
}
@@ -645,21 +676,28 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
;
-; AVX512-LABEL: constant_shift_v32i8:
-; AVX512: # BB#0:
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
-; AVX512-NEXT: vpsllw $5, %ymm1, %ymm1
-; AVX512-NEXT: vpsllw $4, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpsllw $2, %ymm0, %ymm2
-; AVX512-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: vpaddb %ymm0, %ymm0, %ymm2
-; AVX512-NEXT: vpaddb %ymm1, %ymm1, %ymm1
-; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
-; AVX512-NEXT: retq
+; AVX512DQ-LABEL: constant_shift_v32i8:
+; AVX512DQ: # BB#0:
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; AVX512DQ-NEXT: vpsllw $5, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpsllw $4, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpsllw $2, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpaddb %ymm0, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpaddb %ymm1, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
+; AVX512DQ-NEXT: retq
+;
+; AVX512BW-LABEL: constant_shift_v32i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
+; AVX512BW-NEXT: retq
%shift = shl <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
diff --git a/test/CodeGen/X86/vector-shift-shl-512.ll b/test/CodeGen/X86/vector-shift-shl-512.ll
index 520c3237a57f7..b9c9b56427f18 100644
--- a/test/CodeGen/X86/vector-shift-shl-512.ll
+++ b/test/CodeGen/X86/vector-shift-shl-512.ll
@@ -27,25 +27,14 @@ define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512DQ-LABEL: var_shift_v32i16:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15]
-; AVX512DQ-NEXT: vpsllvd %ymm5, %ymm6, %ymm5
-; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11]
-; AVX512DQ-NEXT: vpsllvd %ymm2, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
-; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15]
-; AVX512DQ-NEXT: vpsllvd %ymm2, %ymm5, %ymm2
-; AVX512DQ-NEXT: vpsrld $16, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
-; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11]
-; AVX512DQ-NEXT: vpsllvd %ymm3, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512DQ-NEXT: vpsllvd %zmm2, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero
+; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512DQ-NEXT: vpsllvd %zmm2, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpmovdw %zmm1, %ymm1
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v32i16:
diff --git a/test/CodeGen/X86/vector-shuffle-512-v64.ll b/test/CodeGen/X86/vector-shuffle-512-v64.ll
index 2836d69a0fec8..f4650ec741a71 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v64.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v64.ll
@@ -178,13 +178,8 @@ define <64 x i8> @shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_
;
; AVX512BW-LABEL: shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_47_46_45_44_43_42_41_40_39_38_37_36_35_34_33_32_31_30_29_28_27_26_25_24_23_22_21_20_19_18_17_16_15_14_13_12_11_10_09_08_07_06_05_04_03_02_01_00:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
-; AVX512BW-NEXT: vpshufb %ymm1, %ymm0, %ymm2
-; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
-; AVX512BW-NEXT: vpshufb %ymm1, %ymm0, %ymm0
-; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
-; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48]
+; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[6,7,4,5,2,3,0,1]
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuffle_v64i8_63_62_61_60_59_58_57_56_55_54_53_52_51_50_49_48_47_46_45_44_43_42_41_40_39_38_37_36_35_34_33_32_31_30_29_28_27_26_25_24_23_22_21_20_19_18_17_16_15_14_13_12_11_10_09_08_07_06_05_04_03_02_01_00:
diff --git a/test/CodeGen/X86/vector-shuffle-masked.ll b/test/CodeGen/X86/vector-shuffle-masked.ll
index 04d6b37332466..37fd022999e42 100644
--- a/test/CodeGen/X86/vector-shuffle-masked.ll
+++ b/test/CodeGen/X86/vector-shuffle-masked.ll
@@ -216,7 +216,8 @@ define <8 x i32> @mask_shuffle_v8i32_23456701(<8 x i32> %a, <8 x i32> %passthru,
; CHECK: # BB#0:
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[1,2,3,0]
; CHECK-NEXT: kmovb %edi, %k1
-; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm1 {%k1}
+; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1>
%mask.cast = bitcast i8 %mask to <8 x i1>
@@ -686,3 +687,33 @@ define <2 x double> @mask_cast_extract_v16f32_v2f64_1(<16 x float> %a, <2 x doub
%res = select <2 x i1> %mask.extract, <2 x double> %shuffle.cast, <2 x double> %passthru
ret <2 x double> %res
}
+
+define <2 x double> @broadcast_v4f32_0101_from_v2f32_mask(double* %x, <2 x double> %passthru, i8 %mask) {
+; CHECK-LABEL: broadcast_v4f32_0101_from_v2f32_mask:
+; CHECK: # BB#0:
+; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = mem[0,0]
+; CHECK-NEXT: retq
+ %q = load double, double* %x, align 1
+ %vecinit.i = insertelement <2 x double> undef, double %q, i32 0
+ %vecinit2.i = insertelement <2 x double> %vecinit.i, double %q, i32 1
+ %mask.cast = bitcast i8 %mask to <8 x i1>
+ %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %res = select <2 x i1> %mask.extract, <2 x double> %vecinit2.i, <2 x double> %passthru
+ ret <2 x double> %res
+}
+
+define <2 x double> @broadcast_v4f32_0101_from_v2f32_maskz(double* %x, i8 %mask) {
+; CHECK-LABEL: broadcast_v4f32_0101_from_v2f32_maskz:
+; CHECK: # BB#0:
+; CHECK-NEXT: kmovb %esi, %k1
+; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = mem[0,0]
+; CHECK-NEXT: retq
+ %q = load double, double* %x, align 1
+ %vecinit.i = insertelement <2 x double> undef, double %q, i32 0
+ %vecinit2.i = insertelement <2 x double> %vecinit.i, double %q, i32 1
+ %mask.cast = bitcast i8 %mask to <8 x i1>
+ %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> <i32 0, i32 1>
+ %res = select <2 x i1> %mask.extract, <2 x double> %vecinit2.i, <2 x double> zeroinitializer
+ ret <2 x double> %res
+}
diff --git a/test/CodeGen/X86/vector-shuffle-v1.ll b/test/CodeGen/X86/vector-shuffle-v1.ll
index 3ad92737a2efb..4312b67546d29 100644
--- a/test/CodeGen/X86/vector-shuffle-v1.ll
+++ b/test/CodeGen/X86/vector-shuffle-v1.ll
@@ -71,13 +71,12 @@ define <8 x i1> @shuf8i1_3_6_1_0_3_7_7_0(<8 x i64> %a, <8 x i64> %b, <8 x i64> %
; AVX512F-LABEL: shuf8i1_3_6_1_0_3_7_7_0:
; AVX512F: # BB#0:
; AVX512F-NEXT: vpcmpeqq %zmm2, %zmm0, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} {z}
-; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [3,6,1,0,3,7,7,0]
-; AVX512F-NEXT: vpermq %zmm1, %zmm2, %zmm1
-; AVX512F-NEXT: vpsllq $63, %zmm1, %zmm1
-; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k1
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [3,6,1,0,3,7,7,0]
+; AVX512F-NEXT: vpermq %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
; AVX512F-NEXT: retq
;
@@ -101,14 +100,13 @@ define <16 x i1> @shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<16 x i32> %a, <1
; AVX512F: # BB#0:
; AVX512F-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
; AVX512F-NEXT: vpcmpeqd %zmm3, %zmm1, %k2
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm1 {%k2} {z}
-; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm2 {%k1} {z}
-; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm3 = [3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0]
-; AVX512F-NEXT: vpermi2d %zmm1, %zmm2, %zmm3
-; AVX512F-NEXT: vpslld $31, %zmm3, %zmm1
-; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k1
-; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; AVX512F-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm2 = [3,6,22,12,3,7,7,0,3,6,1,13,3,21,7,0]
+; AVX512F-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
+; AVX512F-NEXT: vpslld $31, %zmm2, %zmm0
+; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
+; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: retq
;
@@ -157,13 +155,12 @@ define <8 x i1> @shuf8i1_u_2_u_u_2_u_2_u(i8 %a) {
; AVX512F-LABEL: shuf8i1_u_2_u_u_2_u_2_u:
; AVX512F: # BB#0:
; AVX512F-NEXT: kmovw %edi, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} {z}
-; AVX512F-NEXT: vextracti32x4 $1, %zmm1, %xmm1
-; AVX512F-NEXT: vpbroadcastq %xmm1, %zmm1
-; AVX512F-NEXT: vpsllq $63, %zmm1, %zmm1
-; AVX512F-NEXT: vptestmq %zmm1, %zmm1, %k1
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vextracti32x4 $1, %zmm0, %xmm0
+; AVX512F-NEXT: vpbroadcastq %xmm0, %zmm0
+; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
+; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqw %zmm0, %xmm0
; AVX512F-NEXT: retq
;
@@ -185,8 +182,7 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %a) {
; AVX512F-LABEL: shuf8i1_10_2_9_u_3_u_2_u:
; AVX512F: # BB#0:
; AVX512F-NEXT: kmovw %edi, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = <8,2,10,u,3,u,2,u>
; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
@@ -215,8 +211,7 @@ define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a) {
; AVX512F-LABEL: shuf8i1_0_1_4_5_u_u_u_u:
; AVX512F: # BB#0:
; AVX512F-NEXT: kmovw %edi, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1]
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
@@ -241,8 +236,7 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a) {
; AVX512F-LABEL: shuf8i1_9_6_1_0_3_7_7_0:
; AVX512F: # BB#0:
; AVX512F-NEXT: kmovw %edi, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [8,6,1,0,3,7,7,0]
; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
@@ -271,8 +265,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %a) {
; AVX512F-LABEL: shuf8i1_9_6_1_10_3_7_7_0:
; AVX512F: # BB#0:
; AVX512F-NEXT: kmovw %edi, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,1,2,10,4,5,6,7]
; AVX512F-NEXT: vpxord %zmm2, %zmm2, %zmm2
; AVX512F-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
@@ -301,13 +294,12 @@ define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8 %a) {
; AVX512F-LABEL: shuf8i1__9_6_1_10_3_7_7_1:
; AVX512F: # BB#0:
; AVX512F-NEXT: kmovw %edi, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; AVX512F-NEXT: movb $51, %al
; AVX512F-NEXT: kmovw %eax, %k2
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1 {%k2} {z}
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; AVX512F-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [9,6,1,0,3,7,7,1]
-; AVX512F-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
+; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
@@ -337,10 +329,10 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_ones(<8 x i1> %a) {
; AVX512F-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512F-NEXT: vmovdqa64 %zmm0, %zmm1 {%k1} {z}
-; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm2 = [9,1,2,3,4,5,6,7]
-; AVX512F-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vmovdqa64 {{.*#+}} zmm1 = [9,1,2,3,4,5,6,7]
+; AVX512F-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512F-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
@@ -367,8 +359,7 @@ define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0(i16 %a) {
; AVX512F-LABEL: shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0:
; AVX512F: # BB#0:
; AVX512F-NEXT: kmovw %edi, %k1
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpbroadcastd %xmm0, %zmm0
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
@@ -403,9 +394,8 @@ define i64 @shuf64i1_zero(i64 %a) {
; AVX512F-NEXT: andq $-32, %rsp
; AVX512F-NEXT: subq $96, %rsp
; AVX512F-NEXT: movl %edi, {{[0-9]+}}(%rsp)
-; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; AVX512F-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
-; AVX512F-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
+; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
; AVX512F-NEXT: vpbroadcastb %xmm0, %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
diff --git a/test/ExecutionEngine/Interpreter/lit.local.cfg b/test/ExecutionEngine/Interpreter/lit.local.cfg
index 8cbaf03217d59..231d8e22cc6f3 100644
--- a/test/ExecutionEngine/Interpreter/lit.local.cfg
+++ b/test/ExecutionEngine/Interpreter/lit.local.cfg
@@ -1,3 +1,3 @@
# These tests require foreign function calls
-if config.enable_ffi != "ON":
+if not config.enable_ffi:
config.unsupported = True
diff --git a/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_BE-relocations.s b/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_BE-relocations.s
index 3ba95e4d394bc..a9ec009395043 100644
--- a/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_BE-relocations.s
+++ b/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_BE-relocations.s
@@ -1,6 +1,11 @@
# RUN: llvm-mc -triple=aarch64_be-none-linux-gnu -filetype=obj -o %T/be-reloc.o %s
# RUN: llvm-rtdyld -triple=aarch64_be-none-linux-gnu -verify -dummy-extern f=0x0123456789abcdef -check=%s %T/be-reloc.o
+ .globl Q
+ .section .dummy, "ax"
+Q:
+ nop
+
.text
.globl g
.p2align 2
@@ -23,8 +28,11 @@ g:
.globl k
.p2align 3
k:
- .xword f
+ .xword f
.size k, 8
+r:
+# R_AARCH64_PREL32: use Q instead of f to fit in 32 bits.
+ .word Q - .
# LE instructions read as BE
# rtdyld-check: *{4}(g) = 0x6024e0d2
@@ -32,3 +40,4 @@ k:
# rtdyld-check: *{4}(g + 8) = 0x6035b1f2
# rtdyld-check: *{4}(g + 12) = 0xe0bd99f2
# rtdyld-check: *{8}k = f
+# rtdyld-check: *{4}r = (Q - r)[31:0]
diff --git a/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_local_branch.s b/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_local_branch.s
new file mode 100644
index 0000000000000..679930a14e06f
--- /dev/null
+++ b/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_local_branch.s
@@ -0,0 +1,14 @@
+# RUN: llvm-mc -triple=arm64-none-linux-gnu -filetype=obj -o %T/branch.o %s
+# RUN: llvm-rtdyld -triple=arm64-none-linux-gnu -verify -check=%s %T/branch.o
+
+.globl _main
+.weak _label1
+
+.section .text.1,"ax"
+_label1:
+ nop
+_main:
+ b _label1
+
+## Branch 1 instruction back from _main
+# rtdyld-check: *{4}(_main) = 0x17ffffff
diff --git a/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s b/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s
index c57234a906e37..f9a03ab40667c 100644
--- a/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s
+++ b/test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s
@@ -1,6 +1,11 @@
# RUN: llvm-mc -triple=arm64-none-linux-gnu -filetype=obj -o %T/reloc.o %s
# RUN: llvm-rtdyld -triple=arm64-none-linux-gnu -verify -dummy-extern f=0x0123456789abcdef -check=%s %T/reloc.o
-
+
+ .globl Q
+ .section .dummy, "ax"
+Q:
+ nop
+
.text
.globl g
.p2align 2
@@ -14,6 +19,18 @@ g:
movk x0, #:abs_g1_nc:f
# R_AARCH64_MOVW_UABS_G0_NC
movk x0, #:abs_g0_nc:f
+l:
+# R_AARCH64_LDST32_ABS_LO12_NC
+ ldr s4, [x5, :lo12:a]
+# R_AARCH64_LDST64_ABS_LO12_NC
+ ldr x4, [x5, :lo12:a]
+p:
+# R_AARCH64_ADR_PREL_PG_HI21
+# Test both low and high immediate values
+ adrp x4, a + 20480 # 16384 + 4096
+# Align next label to 16 bytes, so that LDST immediate
+# fields will be non-zero
+ .align 4
a:
# R_AARCH64_ADD_ABS_LO12_NC
add x0, x0, :lo12:f
@@ -27,13 +44,27 @@ a:
.p2align 3
k:
.xword f
- .size k, 8
+ .size k, 16
+r:
+# R_AARCH64_PREL32: use Q instead of f to fit in 32 bits.
+ .word Q - .
# rtdyld-check: *{4}(g) = 0xd2e02460
# rtdyld-check: *{4}(g + 4) = 0xf2c8ace0
# rtdyld-check: *{4}(g + 8) = 0xf2b13560
# rtdyld-check: *{4}(g + 12) = 0xf299bde0
+
+## Check LDST32_ABS_LO12_NC and LDST64_ABS_LO12_NC
+# rtdyld-check: (*{4}l)[21:10] = a[11:2]
+# rtdyld-check: (*{4}(l+4))[21:10] = a[11:3]
+
+## Check ADR_PREL_PG_HI21. Low order bits of immediate value
+## go to bits 30:29. High order bits go to bits 23:5
+# rtdyld-check: (*{4}p)[30:29] = (a - p + 20480)[13:12]
+# rtdyld-check: (*{4}p)[23:5] = (a - p + 20480)[32:14]
+
# rtdyld-check: *{8}k = f
+# rtdyld-check: *{4}r = (Q - r)[31:0]
## f & 0xFFF = 0xdef (bits 11:0 of f)
## 0xdef << 10 = 0x37bc00
diff --git a/test/Instrumentation/AddressSanitizer/global_metadata_darwin.ll b/test/Instrumentation/AddressSanitizer/global_metadata_darwin.ll
index 9b0c1ef9b5e07..af4da14d786f7 100644
--- a/test/Instrumentation/AddressSanitizer/global_metadata_darwin.ll
+++ b/test/Instrumentation/AddressSanitizer/global_metadata_darwin.ll
@@ -22,7 +22,7 @@ target triple = "x86_64-apple-macosx10.11.0"
; CHECK: @__asan_binder_global = internal global {{.*}} @global {{.*}} [[METADATA]] {{.*}} section "__DATA,__asan_liveness,regular,live_support"
; Test that there is the flag global variable:
-; CHECK: @__asan_globals_registered = common global i64 0
+; CHECK: @__asan_globals_registered = common hidden global i64 0
; The binder has to be inserted to llvm.compiler.used to avoid being stripped
; during LTO.
diff --git a/test/JitListener/lit.local.cfg b/test/JitListener/lit.local.cfg
index 05f34a744ad69..f485229b01c2d 100644
--- a/test/JitListener/lit.local.cfg
+++ b/test/JitListener/lit.local.cfg
@@ -1,3 +1,3 @@
-if not config.root.llvm_use_intel_jitevents == "true":
+if not config.root.llvm_use_intel_jitevents:
config.unsupported = True
diff --git a/test/ThinLTO/X86/Inputs/funcimport-tbaa.ll b/test/ThinLTO/X86/Inputs/funcimport-tbaa.ll
new file mode 100644
index 0000000000000..72aea1e5e2522
--- /dev/null
+++ b/test/ThinLTO/X86/Inputs/funcimport-tbaa.ll
@@ -0,0 +1,11 @@
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.11.0"
+
+
+define i32 @main() {
+entry:
+ %unused = call float @globalfunc1(i32* null, float*null)
+ ret i32 0
+}
+
+declare float @globalfunc1(i32*, float*) \ No newline at end of file
diff --git a/test/ThinLTO/X86/Inputs/local_name_conflict1.ll b/test/ThinLTO/X86/Inputs/local_name_conflict1.ll
new file mode 100644
index 0000000000000..2ef7bdd3eb7be
--- /dev/null
+++ b/test/ThinLTO/X86/Inputs/local_name_conflict1.ll
@@ -0,0 +1,17 @@
+; ModuleID = 'local_name_conflict.o'
+source_filename = "local_name_conflict.c"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind uwtable
+define i32 @a() {
+entry:
+ %call = call i32 @foo()
+ ret i32 %call
+}
+
+; Function Attrs: noinline nounwind uwtable
+define internal i32 @foo() {
+entry:
+ ret i32 1
+}
diff --git a/test/ThinLTO/X86/Inputs/local_name_conflict2.ll b/test/ThinLTO/X86/Inputs/local_name_conflict2.ll
new file mode 100644
index 0000000000000..a8c20a29228a6
--- /dev/null
+++ b/test/ThinLTO/X86/Inputs/local_name_conflict2.ll
@@ -0,0 +1,17 @@
+; ModuleID = 'local_name_conflict.o'
+source_filename = "local_name_conflict.c"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind uwtable
+define i32 @b() {
+entry:
+ %call = call i32 @foo()
+ ret i32 %call
+}
+
+; Function Attrs: noinline nounwind uwtable
+define internal i32 @foo() {
+entry:
+ ret i32 2
+}
diff --git a/test/ThinLTO/X86/funcimport-tbaa.ll b/test/ThinLTO/X86/funcimport-tbaa.ll
new file mode 100644
index 0000000000000..c3dfd7d90b001
--- /dev/null
+++ b/test/ThinLTO/X86/funcimport-tbaa.ll
@@ -0,0 +1,38 @@
+; We generate invalid TBAA, hence -disable-verify, but this is a convenient way
+; to trigger a metadata lazyloading crash
+
+; RUN: opt -module-summary %s -o %t.bc -bitcode-mdindex-threshold=0 -disable-verify
+; RUN: opt -module-summary %p/Inputs/funcimport-tbaa.ll -o %t2.bc
+; RUN: llvm-lto -thinlto-action=thinlink -o %t3.bc %t.bc %t2.bc
+
+
+; RUN: llvm-lto -thinlto-action=import %t2.bc -thinlto-index=%t3.bc -o - \
+; RUN: | llvm-dis -o - | FileCheck %s --check-prefix=IMPORTGLOB1
+; IMPORTGLOB1: define available_externally float @globalfunc1
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx10.11.0"
+
+define float @globalfunc1(i32*, float*) {
+ %3 = load i32, i32* %0, align 4, !tbaa !0
+ %4 = sitofp i32 %3 to float
+ %5 = load float, float* %1, align 4, !tbaa !4
+ %6 = fadd float %4, %5
+ ret float %6
+}
+
+; We need a second function for force the metadata to be emitted in the global block
+define float @globalfunc2(i32*, float*) {
+ %3 = load i32, i32* %0, align 4, !tbaa !0
+ %4 = sitofp i32 %3 to float
+ %5 = load float, float* %1, align 4, !tbaa !4
+ %6 = fadd float %4, %5
+ ret float %6
+}
+
+!0 = !{!1, !4, i64 0}
+!1 = !{!"int", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"float", !2, i64 0}
diff --git a/test/ThinLTO/X86/local_name_conflict.ll b/test/ThinLTO/X86/local_name_conflict.ll
new file mode 100644
index 0000000000000..9cbb32ecf2111
--- /dev/null
+++ b/test/ThinLTO/X86/local_name_conflict.ll
@@ -0,0 +1,29 @@
+; Do setup work for all below tests: generate bitcode and combined index
+; RUN: opt -module-summary -module-hash %s -o %t.bc
+; RUN: opt -module-summary -module-hash %p/Inputs/local_name_conflict1.ll -o %t2.bc
+; RUN: opt -module-summary -module-hash %p/Inputs/local_name_conflict2.ll -o %t3.bc
+; RUN: llvm-lto -thinlto-action=thinlink -o %t4.bc %t.bc %t2.bc %t3.bc
+
+; Make sure foo is promoted and renamed without complaint in both
+; Inputs/local_name_conflict1.ll and Inputs/local_name_conflict2.ll
+; FIXME: Once the importer is fixed to import the correct copy of the
+; local, we should be able to verify that via an import action.
+; RUN: llvm-lto -thinlto-action=promote %t2.bc -thinlto-index=%t4.bc -o - | llvm-dis -o - | FileCheck %s --check-prefix=EXPORTSTATIC
+; RUN: llvm-lto -thinlto-action=promote %t3.bc -thinlto-index=%t4.bc -o - | llvm-dis -o - | FileCheck %s --check-prefix=EXPORTSTATIC
+; EXPORTSTATIC: define hidden i32 @foo.llvm.
+
+; ModuleID = 'local_name_conflict_main.o'
+source_filename = "local_name_conflict_main.c"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind uwtable
+define i32 @main() {
+entry:
+ %retval = alloca i32, align 4
+ store i32 0, i32* %retval, align 4
+ %call = call i32 (...) @b()
+ ret i32 %call
+}
+
+declare i32 @b(...)
diff --git a/test/Transforms/GVN/invariant.group.ll b/test/Transforms/GVN/invariant.group.ll
index 026671a5bdf48..d0b32d7f3dd8c 100644
--- a/test/Transforms/GVN/invariant.group.ll
+++ b/test/Transforms/GVN/invariant.group.ll
@@ -344,11 +344,63 @@ _Z1gR1A.exit: ; preds = %0, %5
ret void
}
+; Check if no optimizations are performed with global pointers.
+; FIXME: we could do the optimizations if we would check if dependency comes
+; from the same function.
+; CHECK-LABEL: define void @testGlobal() {
+define void @testGlobal() {
+; CHECK: %a = load i8, i8* @unknownPtr, !invariant.group !0
+ %a = load i8, i8* @unknownPtr, !invariant.group !0
+ call void @foo2(i8* @unknownPtr, i8 %a)
+; CHECK: %1 = load i8, i8* @unknownPtr, !invariant.group !0
+ %1 = load i8, i8* @unknownPtr, !invariant.group !0
+ call void @bar(i8 %1)
+
+ %b0 = bitcast i8* @unknownPtr to i1*
+ call void @fooBit(i1* %b0, i1 1)
+; Adding regex because of canonicalization of bitcasts
+; CHECK: %2 = load i1, i1* {{.*}}, !invariant.group !0
+ %2 = load i1, i1* %b0, !invariant.group !0
+ call void @fooBit(i1* %b0, i1 %2)
+; CHECK: %3 = load i1, i1* {{.*}}, !invariant.group !0
+ %3 = load i1, i1* %b0, !invariant.group !0
+ call void @fooBit(i1* %b0, i1 %3)
+ ret void
+}
+; And in the case it is not global
+; CHECK-LABEL: define void @testNotGlobal() {
+define void @testNotGlobal() {
+ %a = alloca i8
+ call void @foo(i8* %a)
+; CHECK: %b = load i8, i8* %a, !invariant.group !0
+ %b = load i8, i8* %a, !invariant.group !0
+ call void @foo2(i8* %a, i8 %b)
+
+ %1 = load i8, i8* %a, !invariant.group !0
+; CHECK: call void @bar(i8 %b)
+ call void @bar(i8 %1)
+
+ %b0 = bitcast i8* %a to i1*
+ call void @fooBit(i1* %b0, i1 1)
+; CHECK: %trunc = trunc i8 %b to i1
+ %2 = load i1, i1* %b0, !invariant.group !0
+; CHECK-NEXT: call void @fooBit(i1* %b0, i1 %trunc)
+ call void @fooBit(i1* %b0, i1 %2)
+ %3 = load i1, i1* %b0, !invariant.group !0
+; CHECK-NEXT: call void @fooBit(i1* %b0, i1 %trunc)
+ call void @fooBit(i1* %b0, i1 %3)
+ ret void
+}
+
+
declare void @foo(i8*)
+declare void @foo2(i8*, i8)
declare void @bar(i8)
declare i8* @getPointer(i8*)
declare void @_ZN1A3fooEv(%struct.A*)
declare void @_ZN1AC1Ev(%struct.A*)
+declare void @fooBit(i1*, i1)
+
declare i8* @llvm.invariant.group.barrier(i8*)
; Function Attrs: nounwind
diff --git a/test/Transforms/InstCombine/assume.ll b/test/Transforms/InstCombine/assume.ll
index 7987aa2423199..6e690426db99f 100644
--- a/test/Transforms/InstCombine/assume.ll
+++ b/test/Transforms/InstCombine/assume.ll
@@ -2,7 +2,6 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-; Function Attrs: nounwind uwtable
define i32 @foo1(i32* %a) #0 {
entry:
%0 = load i32, i32* %a, align 4
@@ -22,7 +21,6 @@ entry:
ret i32 %0
}
-; Function Attrs: nounwind uwtable
define i32 @foo2(i32* %a) #0 {
entry:
; Same check as in @foo1, but make sure it works if the assume is first too.
@@ -40,7 +38,6 @@ entry:
ret i32 %0
}
-; Function Attrs: nounwind
declare void @llvm.assume(i1) #1
define i32 @simple(i32 %a) #1 {
@@ -55,7 +52,6 @@ entry:
ret i32 %a
}
-; Function Attrs: nounwind uwtable
define i32 @can1(i1 %a, i1 %b, i1 %c) {
entry:
%and1 = and i1 %a, %b
@@ -71,7 +67,6 @@ entry:
ret i32 5
}
-; Function Attrs: nounwind uwtable
define i32 @can2(i1 %a, i1 %b, i1 %c) {
entry:
%v = or i1 %a, %b
@@ -103,7 +98,6 @@ entry:
ret i32 %and1
}
-; Function Attrs: nounwind uwtable
define i32 @bar2(i32 %a) #0 {
entry:
; CHECK-LABEL: @bar2
@@ -118,7 +112,6 @@ entry:
ret i32 %and1
}
-; Function Attrs: nounwind uwtable
define i32 @bar3(i32 %a, i1 %x, i1 %y) #0 {
entry:
%and1 = and i32 %a, 3
@@ -139,7 +132,6 @@ entry:
ret i32 %and1
}
-; Function Attrs: nounwind uwtable
define i32 @bar4(i32 %a, i32 %b) {
entry:
%and1 = and i32 %b, 3
@@ -160,30 +152,41 @@ entry:
}
define i32 @icmp1(i32 %a) #0 {
-entry:
+; CHECK-LABEL: @icmp1(
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 5
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i32 1
+;
%cmp = icmp sgt i32 %a, 5
tail call void @llvm.assume(i1 %cmp)
%conv = zext i1 %cmp to i32
ret i32 %conv
-
-; CHECK-LABEL: @icmp1
-; CHECK: call void @llvm.assume
-; CHECK: ret i32 1
-
}
-; Function Attrs: nounwind uwtable
define i32 @icmp2(i32 %a) #0 {
-entry:
+; CHECK-LABEL: @icmp2(
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 5
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i32 0
+;
%cmp = icmp sgt i32 %a, 5
tail call void @llvm.assume(i1 %cmp)
- %0 = zext i1 %cmp to i32
- %lnot.ext = xor i32 %0, 1
+ %t0 = zext i1 %cmp to i32
+ %lnot.ext = xor i32 %t0, 1
ret i32 %lnot.ext
+}
-; CHECK-LABEL: @icmp2
-; CHECK: call void @llvm.assume
-; CHECK: ret i32 0
+; FIXME: If the 'not' of a condition is known true, then the condition must be false.
+
+define i1 @assume_not(i1 %cond) {
+; CHECK-LABEL: @assume_not(
+; CHECK-NEXT: [[NOTCOND:%.*]] = xor i1 [[COND:%.*]], true
+; CHECK-NEXT: call void @llvm.assume(i1 [[NOTCOND]])
+; CHECK-NEXT: ret i1 [[COND]]
+;
+ %notcond = xor i1 %cond, true
+ call void @llvm.assume(i1 %notcond)
+ ret i1 %cond
}
declare void @escape(i32* %a)
diff --git a/test/Transforms/InstCombine/assume2.ll b/test/Transforms/InstCombine/assume2.ll
index c41bbaa04eb73..e8fbc049f41aa 100644
--- a/test/Transforms/InstCombine/assume2.ll
+++ b/test/Transforms/InstCombine/assume2.ll
@@ -1,170 +1,155 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-; Function Attrs: nounwind
declare void @llvm.assume(i1) #1
-; Function Attrs: nounwind uwtable
define i32 @test1(i32 %a) #0 {
-entry:
-; CHECK-LABEL: @test1
-; CHECK: call void @llvm.assume
-; CHECK: ret i32 5
-
+; CHECK-LABEL: @test1(
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[A:%.*]], 15
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 5
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i32 5
+;
%and = and i32 %a, 15
%cmp = icmp eq i32 %and, 5
tail call void @llvm.assume(i1 %cmp)
-
%and1 = and i32 %a, 7
ret i32 %and1
}
-; Function Attrs: nounwind uwtable
define i32 @test2(i32 %a) #0 {
-entry:
-; CHECK-LABEL: @test2
-; CHECK: call void @llvm.assume
-; CHECK: ret i32 2
-
+; CHECK-LABEL: @test2(
+; CHECK-NEXT: [[A_NOT:%.*]] = or i32 [[A:%.*]], -16
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A_NOT]], -6
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i32 2
+;
%and = and i32 %a, 15
%nand = xor i32 %and, -1
%cmp = icmp eq i32 %nand, 4294967285
tail call void @llvm.assume(i1 %cmp)
-
%and1 = and i32 %a, 7
ret i32 %and1
}
-; Function Attrs: nounwind uwtable
define i32 @test3(i32 %a) #0 {
-entry:
-; CHECK-LABEL: @test3
-; CHECK: call void @llvm.assume
-; CHECK: ret i32 5
-
+; CHECK-LABEL: @test3(
+; CHECK-NEXT: [[V:%.*]] = or i32 [[A:%.*]], -16
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V]], -11
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i32 5
+;
%v = or i32 %a, 4294967280
%cmp = icmp eq i32 %v, 4294967285
tail call void @llvm.assume(i1 %cmp)
-
%and1 = and i32 %a, 7
ret i32 %and1
}
-; Function Attrs: nounwind uwtable
define i32 @test4(i32 %a) #0 {
-entry:
-; CHECK-LABEL: @test4
-; CHECK: call void @llvm.assume
-; CHECK: ret i32 2
-
+; CHECK-LABEL: @test4(
+; CHECK-NEXT: [[A_NOT:%.*]] = and i32 [[A:%.*]], 15
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A_NOT]], 10
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i32 2
+;
%v = or i32 %a, 4294967280
%nv = xor i32 %v, -1
%cmp = icmp eq i32 %nv, 5
tail call void @llvm.assume(i1 %cmp)
-
%and1 = and i32 %a, 7
ret i32 %and1
}
-; Function Attrs: nounwind uwtable
define i32 @test5(i32 %a) #0 {
-entry:
-; CHECK-LABEL: @test5
-; CHECK: call void @llvm.assume
-; CHECK: ret i32 4
-
+; CHECK-LABEL: @test5(
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], 4
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i32 4
+;
%v = xor i32 %a, 1
%cmp = icmp eq i32 %v, 5
tail call void @llvm.assume(i1 %cmp)
-
%and1 = and i32 %a, 7
ret i32 %and1
}
-; Function Attrs: nounwind uwtable
define i32 @test6(i32 %a) #0 {
-entry:
-; CHECK-LABEL: @test6
-; CHECK: call void @llvm.assume
-; CHECK: ret i32 5
-
+; CHECK-LABEL: @test6(
+; CHECK-NEXT: [[V_MASK:%.*]] = and i32 [[A:%.*]], 1073741823
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V_MASK]], 5
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i32 5
+;
%v = shl i32 %a, 2
%cmp = icmp eq i32 %v, 20
tail call void @llvm.assume(i1 %cmp)
-
%and1 = and i32 %a, 63
ret i32 %and1
}
-; Function Attrs: nounwind uwtable
define i32 @test7(i32 %a) #0 {
-entry:
-; CHECK-LABEL: @test7
-; CHECK: call void @llvm.assume
-; CHECK: ret i32 20
-
+; CHECK-LABEL: @test7(
+; CHECK-NEXT: [[V_MASK:%.*]] = and i32 [[A:%.*]], -4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V_MASK]], 20
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i32 20
+;
%v = lshr i32 %a, 2
%cmp = icmp eq i32 %v, 5
tail call void @llvm.assume(i1 %cmp)
-
%and1 = and i32 %a, 252
ret i32 %and1
}
-; Function Attrs: nounwind uwtable
define i32 @test8(i32 %a) #0 {
-entry:
-; CHECK-LABEL: @test8
-; CHECK: call void @llvm.assume
-; CHECK: ret i32 20
-
+; CHECK-LABEL: @test8(
+; CHECK-NEXT: [[V_MASK:%.*]] = and i32 [[A:%.*]], -4
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[V_MASK]], 20
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i32 20
+;
%v = lshr i32 %a, 2
%cmp = icmp eq i32 %v, 5
tail call void @llvm.assume(i1 %cmp)
-
%and1 = and i32 %a, 252
ret i32 %and1
}
-; Function Attrs: nounwind uwtable
define i32 @test9(i32 %a) #0 {
-entry:
-; CHECK-LABEL: @test9
-; CHECK: call void @llvm.assume
-; CHECK: ret i32 0
-
+; CHECK-LABEL: @test9(
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 5
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i32 0
+;
%cmp = icmp sgt i32 %a, 5
tail call void @llvm.assume(i1 %cmp)
-
%and1 = and i32 %a, 2147483648
ret i32 %and1
}
-; Function Attrs: nounwind uwtable
define i32 @test10(i32 %a) #0 {
-entry:
-; CHECK-LABEL: @test10
-; CHECK: call void @llvm.assume
-; CHECK: ret i32 -2147483648
-
+; CHECK-LABEL: @test10(
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[A:%.*]], -1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i32 -2147483648
+;
%cmp = icmp sle i32 %a, -2
tail call void @llvm.assume(i1 %cmp)
-
%and1 = and i32 %a, 2147483648
ret i32 %and1
}
-; Function Attrs: nounwind uwtable
define i32 @test11(i32 %a) #0 {
-entry:
-; CHECK-LABEL: @test11
-; CHECK: call void @llvm.assume
-; CHECK: ret i32 0
-
+; CHECK-LABEL: @test11(
+; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[A:%.*]], 257
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: ret i32 0
+;
%cmp = icmp ule i32 %a, 256
tail call void @llvm.assume(i1 %cmp)
-
%and1 = and i32 %a, 3072
ret i32 %and1
}
diff --git a/test/Transforms/InstCombine/fabs.ll b/test/Transforms/InstCombine/fabs.ll
index 09bea5895aaff..6b5f5a9495301 100644
--- a/test/Transforms/InstCombine/fabs.ll
+++ b/test/Transforms/InstCombine/fabs.ll
@@ -13,7 +13,8 @@ define float @square_fabs_call_f32(float %x) {
; CHECK-LABEL: square_fabs_call_f32(
; CHECK-NEXT: %mul = fmul float %x, %x
-; CHECK-NEXT: ret float %mul
+; CHECK-NEXT: %fabsf = tail call float @fabsf(float %mul)
+; CHECK-NEXT: ret float %fabsf
}
define double @square_fabs_call_f64(double %x) {
@@ -23,7 +24,8 @@ define double @square_fabs_call_f64(double %x) {
; CHECK-LABEL: square_fabs_call_f64(
; CHECK-NEXT: %mul = fmul double %x, %x
-; CHECK-NEXT: ret double %mul
+; CHECK-NEXT: %fabs = tail call double @fabs(double %mul)
+; CHECK-NEXT: ret double %fabs
}
define fp128 @square_fabs_call_f128(fp128 %x) {
@@ -33,15 +35,18 @@ define fp128 @square_fabs_call_f128(fp128 %x) {
; CHECK-LABEL: square_fabs_call_f128(
; CHECK-NEXT: %mul = fmul fp128 %x, %x
-; CHECK-NEXT: ret fp128 %mul
+; CHECK-NEXT: %fabsl = tail call fp128 @fabsl(fp128 %mul)
+; CHECK-NEXT: ret fp128 %fabsl
}
-; Make sure all intrinsic calls are eliminated when the input is known positive.
+; Make sure all intrinsic calls are eliminated when the input is known
+; positive.
declare float @llvm.fabs.f32(float)
declare double @llvm.fabs.f64(double)
declare fp128 @llvm.fabs.f128(fp128)
+; The fabs cannot be eliminated because %x may be a NaN
define float @square_fabs_intrinsic_f32(float %x) {
%mul = fmul float %x, %x
%fabsf = tail call float @llvm.fabs.f32(float %mul)
@@ -49,7 +54,8 @@ define float @square_fabs_intrinsic_f32(float %x) {
; CHECK-LABEL: square_fabs_intrinsic_f32(
; CHECK-NEXT: %mul = fmul float %x, %x
-; CHECK-NEXT: ret float %mul
+; CHECK-NEXT: %fabsf = tail call float @llvm.fabs.f32(float %mul)
+; CHECK-NEXT: ret float %fabsf
}
define double @square_fabs_intrinsic_f64(double %x) {
@@ -59,7 +65,8 @@ define double @square_fabs_intrinsic_f64(double %x) {
; CHECK-LABEL: square_fabs_intrinsic_f64(
; CHECK-NEXT: %mul = fmul double %x, %x
-; CHECK-NEXT: ret double %mul
+; CHECK-NEXT: %fabs = tail call double @llvm.fabs.f64(double %mul)
+; CHECK-NEXT: ret double %fabs
}
define fp128 @square_fabs_intrinsic_f128(fp128 %x) {
@@ -69,7 +76,20 @@ define fp128 @square_fabs_intrinsic_f128(fp128 %x) {
; CHECK-LABEL: square_fabs_intrinsic_f128(
; CHECK-NEXT: %mul = fmul fp128 %x, %x
-; CHECK-NEXT: ret fp128 %mul
+; CHECK-NEXT: %fabsl = tail call fp128 @llvm.fabs.f128(fp128 %mul)
+; CHECK-NEXT: ret fp128 %fabsl
+}
+
+; TODO: This should be able to elimnated the fabs
+define float @square_nnan_fabs_intrinsic_f32(float %x) {
+ %mul = fmul nnan float %x, %x
+ %fabsf = call float @llvm.fabs.f32(float %mul)
+ ret float %fabsf
+
+; CHECK-LABEL: square_nnan_fabs_intrinsic_f32(
+; CHECK-NEXT: %mul = fmul nnan float %x, %x
+; CHECK-NEXT: %fabsf = call float @llvm.fabs.f32(float %mul)
+; CHECK-NEXT: ret float %fabsf
}
; Shrinking a library call to a smaller type should not be inhibited by nor inhibit the square optimization.
@@ -82,7 +102,10 @@ define float @square_fabs_shrink_call1(float %x) {
ret float %trunc
; CHECK-LABEL: square_fabs_shrink_call1(
-; CHECK-NEXT: %trunc = fmul float %x, %x
+; CHECK-NEXT: %ext = fpext float %x to double
+; CHECK-NEXT: %sq = fmul double %ext, %ext
+; CHECK-NEXT: call double @fabs(double %sq)
+; CHECK-NEXT: %trunc = fptrunc double %fabs to float
; CHECK-NEXT: ret float %trunc
}
@@ -95,7 +118,8 @@ define float @square_fabs_shrink_call2(float %x) {
; CHECK-LABEL: square_fabs_shrink_call2(
; CHECK-NEXT: %sq = fmul float %x, %x
-; CHECK-NEXT: ret float %sq
+; CHECK-NEXT: %fabsf = call float @fabsf(float %sq)
+; CHECK-NEXT: ret float %fabsf
}
; CHECK-LABEL: @fabs_select_constant_negative_positive(
diff --git a/test/Transforms/InstCombine/fast-math.ll b/test/Transforms/InstCombine/fast-math.ll
index 6ccf6e9fa7742..84f24ca0bf247 100644
--- a/test/Transforms/InstCombine/fast-math.ll
+++ b/test/Transforms/InstCombine/fast-math.ll
@@ -672,7 +672,8 @@ define double @sqrt_intrinsic_arg_4th(double %x) {
; CHECK-LABEL: sqrt_intrinsic_arg_4th(
; CHECK-NEXT: %mul = fmul fast double %x, %x
-; CHECK-NEXT: ret double %mul
+; CHECK-NEXT: %fabs = call fast double @llvm.fabs.f64(double %mul)
+; CHECK-NEXT: ret double %fabs
}
define double @sqrt_intrinsic_arg_5th(double %x) {
@@ -684,8 +685,9 @@ define double @sqrt_intrinsic_arg_5th(double %x) {
; CHECK-LABEL: sqrt_intrinsic_arg_5th(
; CHECK-NEXT: %mul = fmul fast double %x, %x
+; CHECK-NEXT: %fabs = call fast double @llvm.fabs.f64(double %mul)
; CHECK-NEXT: %sqrt1 = call fast double @llvm.sqrt.f64(double %x)
-; CHECK-NEXT: %1 = fmul fast double %mul, %sqrt1
+; CHECK-NEXT: %1 = fmul fast double %fabs, %sqrt1
; CHECK-NEXT: ret double %1
}
diff --git a/test/Transforms/InstCombine/urem-simplify-bug.ll b/test/Transforms/InstCombine/urem-simplify-bug.ll
index 1220dfdc77f05..4f18f35985407 100644
--- a/test/Transforms/InstCombine/urem-simplify-bug.ll
+++ b/test/Transforms/InstCombine/urem-simplify-bug.ll
@@ -1,32 +1,36 @@
-; RUN: opt < %s -instcombine -S | grep "= or i32 %x, -5"
+; RUN: opt < %s -instcombine -S | FileCheck %s
-@.str = internal constant [5 x i8] c"foo\0A\00" ; <[5 x i8]*> [#uses=1]
-@.str1 = internal constant [5 x i8] c"bar\0A\00" ; <[5 x i8]*> [#uses=1]
+@.str = internal constant [5 x i8] c"foo\0A\00"
+@.str1 = internal constant [5 x i8] c"bar\0A\00"
define i32 @main() nounwind {
entry:
- %x = call i32 @func_11( ) nounwind ; <i32> [#uses=1]
- %tmp3 = or i32 %x, -5 ; <i32> [#uses=1]
- %tmp5 = urem i32 251, %tmp3 ; <i32> [#uses=1]
- %tmp6 = icmp ne i32 %tmp5, 0 ; <i1> [#uses=1]
- %tmp67 = zext i1 %tmp6 to i32 ; <i32> [#uses=1]
- %tmp9 = urem i32 %tmp67, 95 ; <i32> [#uses=1]
- %tmp10 = and i32 %tmp9, 1 ; <i32> [#uses=1]
- %tmp12 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1]
- br i1 %tmp12, label %bb14, label %bb
-
-bb: ; preds = %entry
- br label %bb15
-
-bb14: ; preds = %entry
- br label %bb15
-
-bb15: ; preds = %bb14, %bb
- %iftmp.0.0 = phi i8* [ getelementptr ([5 x i8], [5 x i8]* @.str1, i32 0, i32 0), %bb14 ], [ getelementptr ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), %bb ] ; <i8*> [#uses=1]
- %tmp17 = call i32 (i8*, ...) @printf( i8* %iftmp.0.0 ) nounwind ; <i32> [#uses=0]
- ret i32 0
+ %x = call i32 @func_11() nounwind
+ %tmp3 = or i32 %x, -5
+ %tmp5 = urem i32 251, %tmp3
+ %tmp6 = icmp ne i32 %tmp5, 0
+ %tmp67 = zext i1 %tmp6 to i32
+ %tmp9 = urem i32 %tmp67, 95
+ %tmp10 = and i32 %tmp9, 1
+ %tmp12 = icmp eq i32 %tmp10, 0
+ br i1 %tmp12, label %bb14, label %bb
+
+bb:
+ br label %bb15
+
+bb14:
+ br label %bb15
+
+bb15:
+ %iftmp.0.0 = phi i8* [ getelementptr ([5 x i8], [5 x i8]* @.str1, i32 0, i32 0), %bb14 ], [ getelementptr ([5 x i8], [5 x i8]* @.str, i32 0, i32 0), %bb ]
+ %tmp17 = call i32 (i8*, ...) @printf(i8* %iftmp.0.0) nounwind
+ ret i32 0
}
+; CHECK-LABEL: define i32 @main(
+; CHECK: call i32 @func_11()
+; CHECK-NEXT: br i1 false, label %bb14, label %bb
+
declare i32 @func_11()
-declare i32 @printf(i8*, ...) nounwind
+declare i32 @printf(i8*, ...) nounwind
diff --git a/test/Transforms/InstSimplify/div.ll b/test/Transforms/InstSimplify/div.ll
new file mode 100644
index 0000000000000..b8ce34aaa37e6
--- /dev/null
+++ b/test/Transforms/InstSimplify/div.ll
@@ -0,0 +1,15 @@
+; RUN: opt < %s -instsimplify -S | FileCheck %s
+
+declare i32 @external()
+
+define i32 @div1() {
+; CHECK-LABEL: @div1(
+; CHECK: [[CALL:%.*]] = call i32 @external(), !range !0
+; CHECK-NEXT: ret i32 0
+;
+ %call = call i32 @external(), !range !0
+ %urem = udiv i32 %call, 3
+ ret i32 %urem
+}
+
+!0 = !{i32 0, i32 3}
diff --git a/test/Transforms/InstSimplify/rem.ll b/test/Transforms/InstSimplify/rem.ll
index df3f659b782e4..c73d34346ded5 100644
--- a/test/Transforms/InstSimplify/rem.ll
+++ b/test/Transforms/InstSimplify/rem.ll
@@ -49,3 +49,17 @@ define i32 @rem3(i32 %x, i32 %n) {
%mod1 = urem i32 %mod, %n
ret i32 %mod1
}
+
+declare i32 @external()
+
+define i32 @rem4() {
+; CHECK-LABEL: @rem4(
+; CHECK: [[CALL:%.*]] = call i32 @external(), !range !0
+; CHECK-NEXT: ret i32 [[CALL]]
+;
+ %call = call i32 @external(), !range !0
+ %urem = urem i32 %call, 3
+ ret i32 %urem
+}
+
+!0 = !{i32 0, i32 3}
diff --git a/test/Transforms/LICM/hoisting.ll b/test/Transforms/LICM/hoisting.ll
index cb6981ede1e7f..c61131b476b9c 100644
--- a/test/Transforms/LICM/hoisting.ll
+++ b/test/Transforms/LICM/hoisting.ll
@@ -5,6 +5,8 @@
declare void @foo()
+declare i32 @llvm.bitreverse.i32(i32)
+
; This testcase tests for a problem where LICM hoists
; potentially trapping instructions when they are not guaranteed to execute.
define i32 @test1(i1 %c) {
@@ -122,3 +124,28 @@ then: ; preds = %tailrecurse
ifend: ; preds = %tailrecurse
ret { i32*, i32 } %d
}
+
+; CHECK: define i32 @hoist_bitreverse(i32)
+; CHECK: bitreverse
+; CHECK: br label %header
+define i32 @hoist_bitreverse(i32) {
+ br label %header
+
+header:
+ %sum = phi i32 [ 0, %1 ], [ %5, %latch ]
+ %2 = phi i32 [ 0, %1 ], [ %6, %latch ]
+ %3 = icmp slt i32 %2, 1024
+ br i1 %3, label %body, label %return
+
+body:
+ %4 = call i32 @llvm.bitreverse.i32(i32 %0)
+ %5 = add i32 %sum, %4
+ br label %latch
+
+latch:
+ %6 = add nsw i32 %2, 1
+ br label %header
+
+return:
+ ret i32 %sum
+}
diff --git a/test/Transforms/LoopLoadElim/forward.ll b/test/Transforms/LoopLoadElim/forward.ll
index ed0d162ab7e3c..9a0e03a317c8c 100644
--- a/test/Transforms/LoopLoadElim/forward.ll
+++ b/test/Transforms/LoopLoadElim/forward.ll
@@ -16,8 +16,8 @@ define void @f(i32* %A, i32* %B, i32* %C, i64 %N) {
; CHECK-NOT: %found.conflict{{.*}} =
entry:
-; for.body.ph:
-; CHECK: %load_initial = load i32, i32* %A
+; Make sure the hoisted load keeps the alignment
+; CHECK: %load_initial = load i32, i32* %A, align 1
br label %for.body
for.body: ; preds = %for.body, %entry
@@ -34,7 +34,7 @@ for.body: ; preds = %for.body, %entry
%a_p1 = add i32 %b, 2
store i32 %a_p1, i32* %Aidx_next, align 4
- %a = load i32, i32* %Aidx, align 4
+ %a = load i32, i32* %Aidx, align 1
; CHECK: %c = mul i32 %store_forwarded, 2
%c = mul i32 %a, 2
store i32 %c, i32* %Cidx, align 4
diff --git a/test/Transforms/LoopVectorize/iv_outside_user.ll b/test/Transforms/LoopVectorize/iv_outside_user.ll
index d536d1023f413..8a44af96e7f4b 100644
--- a/test/Transforms/LoopVectorize/iv_outside_user.ll
+++ b/test/Transforms/LoopVectorize/iv_outside_user.ll
@@ -133,3 +133,48 @@ for.end:
store i32 %phi2, i32* %p
ret i32 %phi
}
+
+; CHECK-LABEL: @PR30742
+; CHECK: min.iters.checked
+; CHECK: %[[N_MOD_VF:.+]] = urem i32 %[[T5:.+]], 2
+; CHECK: %[[N_VEC:.+]] = sub i32 %[[T5]], %[[N_MOD_VF]]
+; CHECK: middle.block
+; CHECK: %[[CMP:.+]] = icmp eq i32 %[[T5]], %[[N_VEC]]
+; CHECK: %[[T15:.+]] = add i32 %tmp03, -7
+; CHECK: %[[T16:.+]] = shl i32 %[[N_MOD_VF]], 3
+; CHECK: %[[T17:.+]] = add i32 %[[T15]], %[[T16]]
+; CHECK: %[[T18:.+]] = shl i32 {{.*}}, 3
+; CHECK: %ind.escape = sub i32 %[[T17]], %[[T18]]
+; CHECK: br i1 %[[CMP]], label %BB3, label %scalar.ph
+define void @PR30742() {
+BB0:
+ br label %BB1
+
+BB1:
+ %tmp00 = load i32, i32* undef, align 16
+ %tmp01 = sub i32 %tmp00, undef
+ %tmp02 = icmp slt i32 %tmp01, 1
+ %tmp03 = select i1 %tmp02, i32 1, i32 %tmp01
+ %tmp04 = add nsw i32 %tmp03, -7
+ br label %BB2
+
+BB2:
+ %tmp05 = phi i32 [ %tmp04, %BB1 ], [ %tmp06, %BB2 ]
+ %tmp06 = add i32 %tmp05, -8
+ %tmp07 = icmp sgt i32 %tmp06, 0
+ br i1 %tmp07, label %BB2, label %BB3
+
+BB3:
+ %tmp08 = phi i32 [ %tmp05, %BB2 ]
+ %tmp09 = sub i32 %tmp00, undef
+ %tmp10 = icmp slt i32 %tmp09, 1
+ %tmp11 = select i1 %tmp10, i32 1, i32 %tmp09
+ %tmp12 = add nsw i32 %tmp11, -7
+ br label %BB4
+
+BB4:
+ %tmp13 = phi i32 [ %tmp12, %BB3 ], [ %tmp14, %BB4 ]
+ %tmp14 = add i32 %tmp13, -8
+ %tmp15 = icmp sgt i32 %tmp14, 0
+ br i1 %tmp15, label %BB4, label %BB1
+}
diff --git a/test/Transforms/NewGVN/basic-cyclic-opt.ll b/test/Transforms/NewGVN/basic-cyclic-opt.ll
new file mode 100644
index 0000000000000..523ed2612e3ce
--- /dev/null
+++ b/test/Transforms/NewGVN/basic-cyclic-opt.ll
@@ -0,0 +1,235 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+;; Function Attrs: nounwind ssp uwtable
+;; We should eliminate the sub, and one of the phi nodes
+define void @vnum_test1(i32* %data) #0 {
+; CHECK-LABEL: @vnum_test1(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], i64 3
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[DATA]], i64 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
+; CHECK-NEXT: br label [[BB4:%.*]]
+; CHECK: bb4:
+; CHECK-NEXT: [[M_0:%.*]] = phi i32 [ [[TMP3]], [[BB:%.*]] ], [ [[TMP15:%.*]], [[BB17:%.*]] ]
+; CHECK-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[BB]] ], [ [[TMP18:%.*]], [[BB17]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = icmp slt i32 [[I_0]], [[TMP1]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[BB6:%.*]], label [[BB19:%.*]]
+; CHECK: bb6:
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[DATA]], i64 2
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
+; CHECK-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[DATA]], i64 [[TMP9]]
+; CHECK-NEXT: store i32 2, i32* [[TMP10]], align 4
+; CHECK-NEXT: store i32 0, i32* [[DATA]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[DATA]], i64 1
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
+; CHECK-NEXT: [[TMP15]] = add nsw i32 [[M_0]], [[TMP14]]
+; CHECK-NEXT: br label [[BB17]]
+; CHECK: bb17:
+; CHECK-NEXT: [[TMP18]] = add nsw i32 [[I_0]], 1
+; CHECK-NEXT: br label [[BB4]]
+; CHECK: bb19:
+; CHECK-NEXT: ret void
+;
+bb:
+ %tmp = getelementptr inbounds i32, i32* %data, i64 3
+ %tmp1 = load i32, i32* %tmp, align 4
+ %tmp2 = getelementptr inbounds i32, i32* %data, i64 4
+ %tmp3 = load i32, i32* %tmp2, align 4
+ br label %bb4
+
+bb4: ; preds = %bb17, %bb
+ %m.0 = phi i32 [ %tmp3, %bb ], [ %tmp15, %bb17 ]
+ %i.0 = phi i32 [ 0, %bb ], [ %tmp18, %bb17 ]
+ %n.0 = phi i32 [ %tmp3, %bb ], [ %tmp16, %bb17 ]
+ %tmp5 = icmp slt i32 %i.0, %tmp1
+ br i1 %tmp5, label %bb6, label %bb19
+
+bb6: ; preds = %bb4
+ %tmp7 = getelementptr inbounds i32, i32* %data, i64 2
+ %tmp8 = load i32, i32* %tmp7, align 4
+ %tmp9 = sext i32 %tmp8 to i64
+ %tmp10 = getelementptr inbounds i32, i32* %data, i64 %tmp9
+ store i32 2, i32* %tmp10, align 4
+ %tmp11 = sub nsw i32 %m.0, %n.0
+ %tmp12 = getelementptr inbounds i32, i32* %data, i64 0
+ store i32 %tmp11, i32* %tmp12, align 4
+ %tmp13 = getelementptr inbounds i32, i32* %data, i64 1
+ %tmp14 = load i32, i32* %tmp13, align 4
+ %tmp15 = add nsw i32 %m.0, %tmp14
+ %tmp16 = add nsw i32 %n.0, %tmp14
+ br label %bb17
+
+bb17: ; preds = %bb6
+ %tmp18 = add nsw i32 %i.0, 1
+ br label %bb4
+
+bb19: ; preds = %bb4
+ ret void
+}
+
+;; Function Attrs: nounwind ssp uwtable
+;; We should eliminate the sub, one of the phi nodes, prove the store of the sub
+;; and the load of data are equivalent, that the load always produces constant 0, and
+;; delete the load replacing it with constant 0.
+define i32 @vnum_test2(i32* %data) #0 {
+; CHECK-LABEL: @vnum_test2(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], i64 3
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[DATA]], i64 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
+; CHECK-NEXT: br label [[BB4:%.*]]
+; CHECK: bb4:
+; CHECK-NEXT: [[M_0:%.*]] = phi i32 [ [[TMP3]], [[BB:%.*]] ], [ [[TMP15:%.*]], [[BB19:%.*]] ]
+; CHECK-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[BB]] ], [ [[TMP20:%.*]], [[BB19]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = icmp slt i32 [[I_0]], [[TMP1]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[BB6:%.*]], label [[BB21:%.*]]
+; CHECK: bb6:
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[DATA]], i64 2
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
+; CHECK-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[DATA]], i64 [[TMP9]]
+; CHECK-NEXT: store i32 2, i32* [[TMP10]], align 4
+; CHECK-NEXT: store i32 0, i32* [[DATA]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[DATA]], i64 1
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4
+; CHECK-NEXT: [[TMP15]] = add nsw i32 [[M_0]], [[TMP14]]
+; CHECK-NEXT: br label [[BB19]]
+; CHECK: bb19:
+; CHECK-NEXT: [[TMP20]] = add nsw i32 [[I_0]], 1
+; CHECK-NEXT: br label [[BB4]]
+; CHECK: bb21:
+; CHECK-NEXT: ret i32 0
+;
+bb:
+ %tmp = getelementptr inbounds i32, i32* %data, i64 3
+ %tmp1 = load i32, i32* %tmp, align 4
+ %tmp2 = getelementptr inbounds i32, i32* %data, i64 4
+ %tmp3 = load i32, i32* %tmp2, align 4
+ br label %bb4
+
+bb4: ; preds = %bb19, %bb
+ %m.0 = phi i32 [ %tmp3, %bb ], [ %tmp15, %bb19 ]
+ %n.0 = phi i32 [ %tmp3, %bb ], [ %tmp16, %bb19 ]
+ %i.0 = phi i32 [ 0, %bb ], [ %tmp20, %bb19 ]
+ %p.0 = phi i32 [ undef, %bb ], [ %tmp18, %bb19 ]
+ %tmp5 = icmp slt i32 %i.0, %tmp1
+ br i1 %tmp5, label %bb6, label %bb21
+
+bb6: ; preds = %bb4
+ %tmp7 = getelementptr inbounds i32, i32* %data, i64 2
+ %tmp8 = load i32, i32* %tmp7, align 4
+ %tmp9 = sext i32 %tmp8 to i64
+ %tmp10 = getelementptr inbounds i32, i32* %data, i64 %tmp9
+ store i32 2, i32* %tmp10, align 4
+ %tmp11 = sub nsw i32 %m.0, %n.0
+ %tmp12 = getelementptr inbounds i32, i32* %data, i64 0
+ store i32 %tmp11, i32* %tmp12, align 4
+ %tmp13 = getelementptr inbounds i32, i32* %data, i64 1
+ %tmp14 = load i32, i32* %tmp13, align 4
+ %tmp15 = add nsw i32 %m.0, %tmp14
+ %tmp16 = add nsw i32 %n.0, %tmp14
+ %tmp17 = getelementptr inbounds i32, i32* %data, i64 0
+ %tmp18 = load i32, i32* %tmp17, align 4
+ br label %bb19
+
+bb19: ; preds = %bb6
+ %tmp20 = add nsw i32 %i.0, 1
+ br label %bb4
+
+bb21: ; preds = %bb4
+ ret i32 %p.0
+}
+
+
+; Function Attrs: nounwind ssp uwtable
+;; Same as test 2, with a conditional store of m-n, so it has to also discover
+;; that data ends up with the same value no matter what branch is taken.
+define i32 @vnum_test3(i32* %data) #0 {
+; CHECK-LABEL: @vnum_test3(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], i64 3
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[DATA]], i64 4
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
+; CHECK-NEXT: br label [[BB4:%.*]]
+; CHECK: bb4:
+; CHECK-NEXT: [[N_0:%.*]] = phi i32 [ [[TMP3]], [[BB:%.*]] ], [ [[TMP19:%.*]], [[BB21:%.*]] ]
+; CHECK-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[BB]] ], [ [[TMP22:%.*]], [[BB21]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = icmp slt i32 [[I_0]], [[TMP1]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[BB6:%.*]], label [[BB23:%.*]]
+; CHECK: bb6:
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[DATA]], i64 2
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[DATA]], i64 5
+; CHECK-NEXT: store i32 0, i32* [[TMP9]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = icmp slt i32 [[I_0]], 30
+; CHECK-NEXT: br i1 [[TMP10]], label [[BB11:%.*]], label [[BB14:%.*]]
+; CHECK: bb11:
+; CHECK-NEXT: store i32 0, i32* [[TMP9]], align 4
+; CHECK-NEXT: br label [[BB14]]
+; CHECK: bb14:
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[DATA]], i64 1
+; CHECK-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4
+; CHECK-NEXT: [[TMP19]] = add nsw i32 [[N_0]], [[TMP18]]
+; CHECK-NEXT: br label [[BB21]]
+; CHECK: bb21:
+; CHECK-NEXT: [[TMP22]] = add nsw i32 [[I_0]], 1
+; CHECK-NEXT: br label [[BB4]]
+; CHECK: bb23:
+; CHECK-NEXT: ret i32 0
+;
+bb:
+ %tmp = getelementptr inbounds i32, i32* %data, i64 3
+ %tmp1 = load i32, i32* %tmp, align 4
+ %tmp2 = getelementptr inbounds i32, i32* %data, i64 4
+ %tmp3 = load i32, i32* %tmp2, align 4
+ br label %bb4
+
+bb4: ; preds = %bb21, %bb
+ %n.0 = phi i32 [ %tmp3, %bb ], [ %tmp20, %bb21 ]
+ %m.0 = phi i32 [ %tmp3, %bb ], [ %tmp19, %bb21 ]
+ %p.0 = phi i32 [ 0, %bb ], [ %tmp16, %bb21 ]
+ %i.0 = phi i32 [ 0, %bb ], [ %tmp22, %bb21 ]
+ %tmp5 = icmp slt i32 %i.0, %tmp1
+ br i1 %tmp5, label %bb6, label %bb23
+
+bb6: ; preds = %bb4
+ %tmp7 = getelementptr inbounds i32, i32* %data, i64 2
+ %tmp8 = load i32, i32* %tmp7, align 4
+ %tmp9 = getelementptr inbounds i32, i32* %data, i64 5
+ store i32 0, i32* %tmp9, align 4
+ %tmp10 = icmp slt i32 %i.0, 30
+ br i1 %tmp10, label %bb11, label %bb14
+
+bb11: ; preds = %bb6
+ %tmp12 = sub nsw i32 %m.0, %n.0
+ %tmp13 = getelementptr inbounds i32, i32* %data, i64 5
+ store i32 %tmp12, i32* %tmp13, align 4
+ br label %bb14
+
+bb14: ; preds = %bb11, %bb6
+ %tmp15 = getelementptr inbounds i32, i32* %data, i64 5
+ %tmp16 = load i32, i32* %tmp15, align 4
+ %tmp17 = getelementptr inbounds i32, i32* %data, i64 1
+ %tmp18 = load i32, i32* %tmp17, align 4
+ %tmp19 = add nsw i32 %m.0, %tmp18
+ %tmp20 = add nsw i32 %n.0, %tmp18
+ br label %bb21
+
+bb21: ; preds = %bb14
+ %tmp22 = add nsw i32 %i.0, 1
+ br label %bb4
+
+bb23: ; preds = %bb4
+ ret i32 %p.0
+}
+
+attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.ident = !{!0, !0, !0}
+
+!0 = !{!"Apple LLVM version 6.0 (clang-600.0.56) (based on LLVM 3.5svn)"}
diff --git a/test/Transforms/NewGVN/cyclic-phi-handling.ll b/test/Transforms/NewGVN/cyclic-phi-handling.ll
new file mode 100644
index 0000000000000..283c78548995d
--- /dev/null
+++ b/test/Transforms/NewGVN/cyclic-phi-handling.ll
@@ -0,0 +1,37 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @foo(i32 %arg, i32 %arg1, i32 (i32, i32)* %arg2) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: br label %bb3
+; CHECK: bb3:
+; CHECK-NEXT: [[TMP:%.*]] = phi i32 [ %arg1, %bb ], [ [[TMP:%.*]]4, %bb7 ]
+; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ %arg, %bb ], [ [[TMP]], %bb7 ]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 %arg2(i32 [[TMP4]], i32 [[TMP]])
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
+; CHECK-NEXT: br i1 [[TMP6]], label %bb7, label %bb8
+; CHECK: bb7:
+; CHECK-NEXT: br label %bb3
+; CHECK: bb8:
+; CHECK-NEXT: ret void
+;
+bb:
+ br label %bb3
+
+;; While non-standard, llvm allows mutually dependent phi nodes
+;; Ensure we do not infinite loop trying to process them
+bb3: ; preds = %bb7, %bb
+ %tmp = phi i32 [ %arg1, %bb ], [ %tmp4, %bb7 ]
+ %tmp4 = phi i32 [ %arg, %bb ], [ %tmp, %bb7 ]
+ %tmp5 = call i32 %arg2(i32 %tmp4, i32 %tmp)
+ %tmp6 = icmp ne i32 %tmp5, 0
+ br i1 %tmp6, label %bb7, label %bb8
+
+bb7: ; preds = %bb3
+ br label %bb3
+
+bb8: ; preds = %bb3
+ ret void
+}
diff --git a/test/Transforms/NewGVN/invariant.group.ll b/test/Transforms/NewGVN/invariant.group.ll
index 2bddc99c8b855..80c6e05a8e24e 100644
--- a/test/Transforms/NewGVN/invariant.group.ll
+++ b/test/Transforms/NewGVN/invariant.group.ll
@@ -345,11 +345,63 @@ _Z1gR1A.exit: ; preds = %0, %5
ret void
}
+; Check if no optimizations are performed with global pointers.
+; FIXME: we could do the optimizations if we would check if dependency comes
+; from the same function.
+; CHECK-LABEL: define void @testGlobal() {
+define void @testGlobal() {
+; CHECK: %a = load i8, i8* @unknownPtr, !invariant.group !0
+ %a = load i8, i8* @unknownPtr, !invariant.group !0
+ call void @foo2(i8* @unknownPtr, i8 %a)
+; CHECK: %1 = load i8, i8* @unknownPtr, !invariant.group !0
+ %1 = load i8, i8* @unknownPtr, !invariant.group !0
+ call void @bar(i8 %1)
+
+ %b0 = bitcast i8* @unknownPtr to i1*
+ call void @fooBit(i1* %b0, i1 1)
+; Adding regex because of canonicalization of bitcasts
+; CHECK: %2 = load i1, i1* {{.*}}, !invariant.group !0
+ %2 = load i1, i1* %b0, !invariant.group !0
+ call void @fooBit(i1* %b0, i1 %2)
+; CHECK: %3 = load i1, i1* {{.*}}, !invariant.group !0
+ %3 = load i1, i1* %b0, !invariant.group !0
+ call void @fooBit(i1* %b0, i1 %3)
+ ret void
+}
+; And in the case it is not global
+; CHECK-LABEL: define void @testNotGlobal() {
+define void @testNotGlobal() {
+ %a = alloca i8
+ call void @foo(i8* %a)
+; CHECK: %b = load i8, i8* %a, !invariant.group !0
+ %b = load i8, i8* %a, !invariant.group !0
+ call void @foo2(i8* %a, i8 %b)
+
+ %1 = load i8, i8* %a, !invariant.group !0
+; CHECK: call void @bar(i8 %b)
+ call void @bar(i8 %1)
+
+ %b0 = bitcast i8* %a to i1*
+ call void @fooBit(i1* %b0, i1 1)
+; CHECK: %trunc = trunc i8 %b to i1
+ %2 = load i1, i1* %b0, !invariant.group !0
+; CHECK-NEXT: call void @fooBit(i1* %b0, i1 %trunc)
+ call void @fooBit(i1* %b0, i1 %2)
+ %3 = load i1, i1* %b0, !invariant.group !0
+; CHECK-NEXT: call void @fooBit(i1* %b0, i1 %trunc)
+ call void @fooBit(i1* %b0, i1 %3)
+ ret void
+}
+
+
declare void @foo(i8*)
+declare void @foo2(i8*, i8)
declare void @bar(i8)
declare i8* @getPointer(i8*)
declare void @_ZN1A3fooEv(%struct.A*)
declare void @_ZN1AC1Ev(%struct.A*)
+declare void @fooBit(i1*, i1)
+
declare i8* @llvm.invariant.group.barrier(i8*)
; Function Attrs: nounwind
diff --git a/test/Transforms/NewGVN/memory-handling.ll b/test/Transforms/NewGVN/memory-handling.ll
new file mode 100644
index 0000000000000..a0c4a998b8b6a
--- /dev/null
+++ b/test/Transforms/NewGVN/memory-handling.ll
@@ -0,0 +1,195 @@
+;; This test is really dependent on propagating a lot of memory info around, but in the end, not
+;; screwing up a single add.
+; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+%struct.Letter = type { i32, i32, i32, i32 }
+
+@alPhrase = external local_unnamed_addr global [26 x %struct.Letter], align 16
+@aqMainMask = external local_unnamed_addr global [2 x i64], align 16
+@aqMainSign = external local_unnamed_addr global [2 x i64], align 16
+@cchPhraseLength = external local_unnamed_addr global i32, align 4
+@auGlobalFrequency = external local_unnamed_addr global [26 x i32], align 16
+@.str.7 = external hidden unnamed_addr constant [28 x i8], align 1
+
+; Function Attrs: nounwind uwtable
+declare void @Fatal(i8*, i32) local_unnamed_addr #0
+
+; Function Attrs: nounwind readnone
+declare i16** @__ctype_b_loc() local_unnamed_addr #1
+
+; Function Attrs: nounwind uwtable
+define void @BuildMask(i8* nocapture readonly) local_unnamed_addr #0 {
+ tail call void @llvm.memset.p0i8.i64(i8* bitcast ([26 x %struct.Letter]* @alPhrase to i8*), i8 0, i64 416, i32 16, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* bitcast ([2 x i64]* @aqMainMask to i8*), i8 0, i64 16, i32 16, i1 false)
+ tail call void @llvm.memset.p0i8.i64(i8* bitcast ([2 x i64]* @aqMainSign to i8*), i8 0, i64 16, i32 16, i1 false)
+ br label %.sink.split
+
+.sink.split: ; preds = %14, %1
+ %.0 = phi i8* [ %0, %1 ], [ %.lcssa67, %14 ]
+ %.sink = phi i32 [ 0, %1 ], [ %23, %14 ]
+ store i32 %.sink, i32* @cchPhraseLength, align 4, !tbaa !1
+ br label %2
+
+; <label>:2: ; preds = %6, %.sink.split
+ %.1 = phi i8* [ %.0, %.sink.split ], [ %3, %6 ]
+ %3 = getelementptr inbounds i8, i8* %.1, i64 1
+ %4 = load i8, i8* %.1, align 1, !tbaa !5
+ %5 = icmp eq i8 %4, 0
+ br i1 %5, label %.preheader.preheader, label %6
+
+.preheader.preheader: ; preds = %2
+ br label %.preheader
+
+; <label>:6: ; preds = %2
+ %7 = tail call i16** @__ctype_b_loc() #4
+ %8 = load i16*, i16** %7, align 8, !tbaa !6
+ %9 = sext i8 %4 to i64
+ %10 = getelementptr inbounds i16, i16* %8, i64 %9
+ %11 = load i16, i16* %10, align 2, !tbaa !8
+ %12 = and i16 %11, 1024
+ %13 = icmp eq i16 %12, 0
+ br i1 %13, label %2, label %14
+
+; <label>:14: ; preds = %6
+ %.lcssa67 = phi i8* [ %3, %6 ]
+ %.lcssa65 = phi i8 [ %4, %6 ]
+ %15 = sext i8 %.lcssa65 to i32
+ %16 = tail call i32 @tolower(i32 %15) #5
+ %17 = add nsw i32 %16, -97
+ %18 = sext i32 %17 to i64
+ %19 = getelementptr inbounds [26 x %struct.Letter], [26 x %struct.Letter]* @alPhrase, i64 0, i64 %18, i32 0
+ %20 = load i32, i32* %19, align 16, !tbaa !10
+ %21 = add i32 %20, 1
+ store i32 %21, i32* %19, align 16, !tbaa !10
+ %22 = load i32, i32* @cchPhraseLength, align 4, !tbaa !1
+ %23 = add nsw i32 %22, 1
+ br label %.sink.split
+
+.preheader: ; preds = %58, %.preheader.preheader
+ %indvars.iv = phi i64 [ 0, %.preheader.preheader ], [ %indvars.iv.next, %58 ]
+ %.04961 = phi i32 [ %.2, %58 ], [ 0, %.preheader.preheader ]
+ %.05160 = phi i32 [ %.253, %58 ], [ 0, %.preheader.preheader ]
+ %24 = getelementptr inbounds [26 x %struct.Letter], [26 x %struct.Letter]* @alPhrase, i64 0, i64 %indvars.iv, i32 0
+ %25 = load i32, i32* %24, align 16, !tbaa !10
+ %26 = icmp eq i32 %25, 0
+ %27 = getelementptr inbounds [26 x i32], [26 x i32]* @auGlobalFrequency, i64 0, i64 %indvars.iv
+ br i1 %26, label %28, label %29
+
+; <label>:28: ; preds = %.preheader
+ store i32 -1, i32* %27, align 4, !tbaa !1
+ br label %58
+
+; <label>:29: ; preds = %.preheader
+ store i32 0, i32* %27, align 4, !tbaa !1
+ %30 = zext i32 %25 to i64
+ br i1 false, label %._crit_edge, label %.lr.ph.preheader
+
+.lr.ph.preheader: ; preds = %29
+ br label %.lr.ph
+
+.lr.ph: ; preds = %.lr.ph, %.lr.ph.preheader
+ %.04658 = phi i64 [ %32, %.lr.ph ], [ 1, %.lr.ph.preheader ]
+ %.04857 = phi i32 [ %31, %.lr.ph ], [ 1, %.lr.ph.preheader ]
+ %31 = add nuw nsw i32 %.04857, 1
+ %32 = shl i64 %.04658, 1
+ %33 = icmp ult i64 %30, %32
+ br i1 %33, label %._crit_edge.loopexit, label %.lr.ph
+
+._crit_edge.loopexit: ; preds = %.lr.ph
+ %.lcssa63 = phi i32 [ %31, %.lr.ph ]
+ %.lcssa = phi i64 [ %32, %.lr.ph ]
+ br label %._crit_edge
+
+._crit_edge: ; preds = %._crit_edge.loopexit, %29
+ %.048.lcssa = phi i32 [ 1, %29 ], [ %.lcssa63, %._crit_edge.loopexit ]
+ %.046.lcssa = phi i64 [ 1, %29 ], [ %.lcssa, %._crit_edge.loopexit ]
+ %34 = add nsw i32 %.048.lcssa, %.04961
+ %35 = icmp ugt i32 %34, 64
+ br i1 %35, label %36, label %40
+
+; <label>:36: ; preds = %._crit_edge
+; This testcase essentially comes down to this little add.
+; If we screw up the revisitation of the users of store of %sink above
+; we will end up propagating and simplifying this to 1 in the final output
+; because we keep an optimistic assumption we should not.
+; CHECK: add i32 %.05160, 1
+ %37 = add i32 %.05160, 1
+ %38 = icmp ugt i32 %37, 1
+ br i1 %38, label %39, label %40
+
+; <label>:39: ; preds = %36
+ tail call void @Fatal(i8* getelementptr inbounds ([28 x i8], [28 x i8]* @.str.7, i64 0, i64 0), i32 0)
+ br label %40
+
+; <label>:40: ; preds = %39, %36, %._crit_edge
+ %.152 = phi i32 [ %.05160, %._crit_edge ], [ %37, %39 ], [ %37, %36 ]
+ %.150 = phi i32 [ %.04961, %._crit_edge ], [ 0, %39 ], [ 0, %36 ]
+ %41 = add i64 %.046.lcssa, 4294967295
+ %42 = trunc i64 %41 to i32
+ %43 = getelementptr inbounds [26 x %struct.Letter], [26 x %struct.Letter]* @alPhrase, i64 0, i64 %indvars.iv, i32 2
+ store i32 %42, i32* %43, align 8, !tbaa !12
+ %44 = zext i32 %.150 to i64
+ %.046. = shl i64 %.046.lcssa, %44
+ %45 = zext i32 %.152 to i64
+ %46 = getelementptr inbounds [2 x i64], [2 x i64]* @aqMainSign, i64 0, i64 %45
+ %47 = load i64, i64* %46, align 8, !tbaa !13
+ %48 = or i64 %47, %.046.
+ store i64 %48, i64* %46, align 8, !tbaa !13
+ %49 = load i32, i32* %24, align 16, !tbaa !10
+ %50 = zext i32 %49 to i64
+ %51 = shl i64 %50, %44
+ %52 = getelementptr inbounds [2 x i64], [2 x i64]* @aqMainMask, i64 0, i64 %45
+ %53 = load i64, i64* %52, align 8, !tbaa !13
+ %54 = or i64 %51, %53
+ store i64 %54, i64* %52, align 8, !tbaa !13
+ %55 = getelementptr inbounds [26 x %struct.Letter], [26 x %struct.Letter]* @alPhrase, i64 0, i64 %indvars.iv, i32 1
+ store i32 %.150, i32* %55, align 4, !tbaa !15
+ %56 = getelementptr inbounds [26 x %struct.Letter], [26 x %struct.Letter]* @alPhrase, i64 0, i64 %indvars.iv, i32 3
+ store i32 %.152, i32* %56, align 4, !tbaa !16
+ %57 = add nsw i32 %.150, %.048.lcssa
+ br label %58
+
+; <label>:58: ; preds = %40, %28
+ %.253 = phi i32 [ %.05160, %28 ], [ %.152, %40 ]
+ %.2 = phi i32 [ %.04961, %28 ], [ %57, %40 ]
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp ne i64 %indvars.iv.next, 26
+ br i1 %exitcond, label %.preheader, label %59
+
+; <label>:59: ; preds = %58
+ ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i32, i1) #2
+
+; Function Attrs: inlinehint nounwind readonly uwtable
+declare i32 @tolower(i32) local_unnamed_addr #3
+
+attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { argmemonly nounwind }
+attributes #3 = { inlinehint nounwind readonly uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #4 = { nounwind readnone }
+attributes #5 = { nounwind readonly }
+
+!llvm.ident = !{!0}
+
+!0 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git 9b9db7fa41a1905899dbcbcc6cbdd05d2511da8e) (/Users/dannyb/sources/llvm-clean a3908a41623f6ac14ba8c04613d6c64e0544bb5d)"}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+!5 = !{!3, !3, i64 0}
+!6 = !{!7, !7, i64 0}
+!7 = !{!"any pointer", !3, i64 0}
+!8 = !{!9, !9, i64 0}
+!9 = !{!"short", !3, i64 0}
+!10 = !{!11, !2, i64 0}
+!11 = !{!"", !2, i64 0, !2, i64 4, !2, i64 8, !2, i64 12}
+!12 = !{!11, !2, i64 8}
+!13 = !{!14, !14, i64 0}
+!14 = !{!"long", !3, i64 0}
+!15 = !{!11, !2, i64 4}
+!16 = !{!11, !2, i64 12}
diff --git a/test/Transforms/NewGVN/pr31501.ll b/test/Transforms/NewGVN/pr31501.ll
new file mode 100644
index 0000000000000..7122ade56eeb6
--- /dev/null
+++ b/test/Transforms/NewGVN/pr31501.ll
@@ -0,0 +1,136 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+%struct.foo = type { %struct.wombat.28*, %struct.zot, %struct.wombat.28* }
+%struct.zot = type { i64 }
+%struct.barney = type <{ %struct.wombat.28*, %struct.wibble, %struct.snork, %struct.quux.4, %struct.snork.10, %struct.ham.16*, %struct.wobble.23*, i32, i8, i8, [2 x i8] }>
+%struct.wibble = type { %struct.pluto, %struct.bar }
+%struct.pluto = type { %struct.quux }
+%struct.quux = type { %struct.eggs }
+%struct.eggs = type { %struct.zot.0, %struct.widget }
+%struct.zot.0 = type { i8*, i8*, i8* }
+%struct.widget = type { %struct.barney.1 }
+%struct.barney.1 = type { [8 x i8] }
+%struct.bar = type { [3 x %struct.widget] }
+%struct.snork = type <{ %struct.wobble, %struct.bar.3, [7 x i8] }>
+%struct.wobble = type { %struct.wombat }
+%struct.wombat = type { %struct.zot.2 }
+%struct.zot.2 = type { %struct.zot.0, %struct.ham }
+%struct.ham = type { %struct.barney.1 }
+%struct.bar.3 = type { i8 }
+%struct.quux.4 = type <{ %struct.quux.5, %struct.snork.9, [7 x i8] }>
+%struct.quux.5 = type { %struct.widget.6 }
+%struct.widget.6 = type { %struct.spam }
+%struct.spam = type { %struct.zot.0, %struct.ham.7 }
+%struct.ham.7 = type { %struct.barney.8 }
+%struct.barney.8 = type { [24 x i8] }
+%struct.snork.9 = type { i8 }
+%struct.snork.10 = type <{ %struct.foo.11, %struct.spam.15, [7 x i8] }>
+%struct.foo.11 = type { %struct.snork.12 }
+%struct.snork.12 = type { %struct.wombat.13 }
+%struct.wombat.13 = type { %struct.zot.0, %struct.wibble.14 }
+%struct.wibble.14 = type { %struct.barney.8 }
+%struct.spam.15 = type { i8 }
+%struct.ham.16 = type { %struct.pluto.17, %struct.pluto.17 }
+%struct.pluto.17 = type { %struct.bar.18 }
+%struct.bar.18 = type { %struct.baz*, %struct.zot.20, %struct.barney.22 }
+%struct.baz = type { %struct.wibble.19* }
+%struct.wibble.19 = type <{ %struct.baz, %struct.wibble.19*, %struct.baz*, i8, [7 x i8] }>
+%struct.zot.20 = type { %struct.ham.21 }
+%struct.ham.21 = type { %struct.baz }
+%struct.barney.22 = type { %struct.blam }
+%struct.blam = type { i64 }
+%struct.wobble.23 = type { %struct.spam.24, %struct.barney* }
+%struct.spam.24 = type { %struct.bar.25, %struct.zot.26* }
+%struct.bar.25 = type <{ i32 (...)**, i8, i8 }>
+%struct.zot.26 = type { i32 (...)**, i32, %struct.widget.27* }
+%struct.widget.27 = type { %struct.zot.26, %struct.zot.26* }
+%struct.wombat.28 = type <{ i32 (...)**, i8, i8, [6 x i8] }>
+
+; Function Attrs: norecurse nounwind ssp uwtable
+define weak_odr hidden %struct.foo* @quux(%struct.barney* %arg, %struct.wombat.28* %arg1) local_unnamed_addr #0 align 2 {
+; CHECK-LABEL: @quux(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds %struct.barney, %struct.barney* %arg, i64 0, i32 3, i32 0, i32 0, i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast %struct.spam* [[TMP]] to %struct.foo**
+; CHECK-NEXT: [[TMP3:%.*]] = load %struct.foo*, %struct.foo** [[TMP2]], align 8, !tbaa !2
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds %struct.barney, %struct.barney* %arg, i64 0, i32 3, i32 0, i32 0, i32 0, i32 0, i32 1
+; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8** [[TMP4]] to %struct.foo**
+; CHECK-NEXT: [[TMP6:%.*]] = load %struct.foo*, %struct.foo** [[TMP5]], align 8, !tbaa !7
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq %struct.foo* [[TMP3]], [[TMP6]]
+; CHECK-NEXT: br i1 [[TMP7]], label %bb21, label %bb8
+; CHECK: bb8:
+; CHECK-NEXT: br label %bb11
+; CHECK: bb9:
+; CHECK-NEXT: [[TMP10:%.*]] = icmp eq %struct.foo* [[TMP18:%.*]], [[TMP6]]
+; CHECK-NEXT: br i1 [[TMP10]], label %bb19, label %bb11
+; CHECK: bb11:
+; CHECK-NEXT: [[TMP12:%.*]] = phi %struct.foo* [ [[TMP17:%.*]], %bb9 ], [ undef, %bb8 ]
+; CHECK-NEXT: [[TMP13:%.*]] = phi %struct.foo* [ [[TMP18]], %bb9 ], [ [[TMP3]], %bb8 ]
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds %struct.foo, %struct.foo* [[TMP13]], i64 0, i32 0
+; CHECK-NEXT: [[TMP15:%.*]] = load %struct.wombat.28*, %struct.wombat.28** [[TMP14]], align 8, !tbaa !8
+; CHECK-NEXT: [[TMP16:%.*]] = icmp eq %struct.wombat.28* [[TMP15]], %arg1
+; CHECK-NEXT: [[TMP17]] = select i1 [[TMP16]], %struct.foo* [[TMP13]], %struct.foo* [[TMP12]]
+; CHECK-NEXT: [[TMP18]] = getelementptr inbounds %struct.foo, %struct.foo* [[TMP13]], i64 1
+; CHECK-NEXT: br i1 [[TMP16]], label %bb19, label %bb9
+; CHECK: bb19:
+; CHECK-NEXT: [[TMP20:%.*]] = phi %struct.foo* [ null, %bb9 ], [ [[TMP17]], %bb11 ]
+; CHECK-NEXT: br label %bb21
+; CHECK: bb21:
+; CHECK-NEXT: [[TMP22:%.*]] = phi %struct.foo* [ null, %bb ], [ [[TMP20]], %bb19 ]
+; CHECK-NEXT: ret %struct.foo* [[TMP22]]
+;
+bb:
+ %tmp = getelementptr inbounds %struct.barney, %struct.barney* %arg, i64 0, i32 3, i32 0, i32 0, i32 0
+ %tmp2 = bitcast %struct.spam* %tmp to %struct.foo**
+ %tmp3 = load %struct.foo*, %struct.foo** %tmp2, align 8, !tbaa !2
+ %tmp4 = getelementptr inbounds %struct.barney, %struct.barney* %arg, i64 0, i32 3, i32 0, i32 0, i32 0, i32 0, i32 1
+ %tmp5 = bitcast i8** %tmp4 to %struct.foo**
+ %tmp6 = load %struct.foo*, %struct.foo** %tmp5, align 8, !tbaa !7
+ %tmp7 = icmp eq %struct.foo* %tmp3, %tmp6
+ br i1 %tmp7, label %bb21, label %bb8
+
+bb8: ; preds = %bb
+ br label %bb11
+
+bb9: ; preds = %bb11
+ %tmp10 = icmp eq %struct.foo* %tmp18, %tmp6
+ br i1 %tmp10, label %bb19, label %bb11
+
+bb11: ; preds = %bb9, %bb8
+ %tmp12 = phi %struct.foo* [ %tmp17, %bb9 ], [ undef, %bb8 ]
+ %tmp13 = phi %struct.foo* [ %tmp18, %bb9 ], [ %tmp3, %bb8 ]
+ %tmp14 = getelementptr inbounds %struct.foo, %struct.foo* %tmp13, i64 0, i32 0
+ %tmp15 = load %struct.wombat.28*, %struct.wombat.28** %tmp14, align 8, !tbaa !8
+ %tmp16 = icmp eq %struct.wombat.28* %tmp15, %arg1
+ %tmp17 = select i1 %tmp16, %struct.foo* %tmp13, %struct.foo* %tmp12
+ %tmp18 = getelementptr inbounds %struct.foo, %struct.foo* %tmp13, i64 1
+ br i1 %tmp16, label %bb19, label %bb9
+
+bb19: ; preds = %bb11, %bb9
+ %tmp20 = phi %struct.foo* [ null, %bb9 ], [ %tmp17, %bb11 ]
+ br label %bb21
+
+bb21: ; preds = %bb19, %bb
+ %tmp22 = phi %struct.foo* [ null, %bb ], [ %tmp20, %bb19 ]
+ ret %struct.foo* %tmp22
+}
+
+attributes #0 = { norecurse nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!llvm.module.flags = !{!0}
+!llvm.ident = !{!1}
+
+!0 = !{i32 1, !"PIC Level", i32 2}
+!1 = !{!"clang version 4.0.0 (http://llvm.org/git/clang.git b63fa9e2bb8aac0a80c3e3467991c6b1a4b01e6a) (llvm/trunk 290779)"}
+!2 = !{!3, !4, i64 0}
+!3 = !{!"_ZTSN4llvm15SmallVectorBaseE", !4, i64 0, !4, i64 8, !4, i64 16}
+!4 = !{!"any pointer", !5, i64 0}
+!5 = !{!"omnipotent char", !6, i64 0}
+!6 = !{!"Simple C++ TBAA"}
+!7 = !{!3, !4, i64 8}
+!8 = !{!9, !4, i64 0}
+!9 = !{!"_ZTSN4llvm9RecordValE", !4, i64 0, !10, i64 8, !4, i64 16}
+!10 = !{!"_ZTSN4llvm14PointerIntPairIPNS_5RecTyELj1EbNS_21PointerLikeTypeTraitsIS2_EENS_18PointerIntPairInfoIS2_Lj1ES4_EEEE", !11, i64 0}
+!11 = !{!"long", !5, i64 0}
diff --git a/test/Transforms/NewGVN/pr31573.ll b/test/Transforms/NewGVN/pr31573.ll
new file mode 100644
index 0000000000000..0450b4b1299be
--- /dev/null
+++ b/test/Transforms/NewGVN/pr31573.ll
@@ -0,0 +1,42 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -basicaa -newgvn -S | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @patatino(i8* %blah) {
+; CHECK-LABEL: @patatino(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[WHILE_COND:%.*]]
+; CHECK: while.cond:
+; CHECK-NEXT: [[MEH:%.*]] = phi i8* [ [[BLAH:%.*]], [[ENTRY:%.*]] ], [ null, [[WHILE_BODY:%.*]] ]
+; CHECK-NEXT: switch i32 undef, label [[WHILE_BODY]] [
+; CHECK-NEXT: i32 666, label [[WHILE_END:%.*]]
+; CHECK-NEXT: ]
+; CHECK: while.body:
+; CHECK-NEXT: br label [[WHILE_COND]]
+; CHECK: while.end:
+; CHECK-NEXT: store i8 0, i8* [[MEH]], align 1
+; CHECK-NEXT: store i8 0, i8* [[BLAH]], align 1
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %while.cond
+
+while.cond:
+ %meh = phi i8* [ %blah, %entry ], [ null, %while.body ]
+ switch i32 undef, label %while.body [
+ i32 666, label %while.end
+ ]
+
+while.body:
+ br label %while.cond
+
+while.end:
+;; These two stores will initially be considered equivalent, but then proven not.
+;; the second store would previously end up deciding it's equivalent to a previous
+;; store, but it was really just finding an optimistic version of itself
+;; in the congruence class.
+ store i8 0, i8* %meh, align 1
+ store i8 0, i8* %blah, align 1
+ ret void
+}
diff --git a/test/lit.cfg b/test/lit.cfg
index e0881ef21626f..e9916b2a60e8c 100644
--- a/test/lit.cfg
+++ b/test/lit.cfg
@@ -231,7 +231,7 @@ config.substitutions.append( ('%ld64', ld64_cmd) )
config.substitutions.append( ('%ocamlc',
"%s ocamlc -cclib -L%s %s" %
(config.ocamlfind_executable, llvm_lib_dir, config.ocaml_flags)) )
-if config.have_ocamlopt in ('1', 'TRUE'):
+if config.have_ocamlopt:
config.substitutions.append( ('%ocamlopt',
"%s ocamlopt -cclib -L%s -cclib -Wl,-rpath,%s %s" %
(config.ocamlfind_executable, llvm_lib_dir, llvm_lib_dir, config.ocaml_flags)) )
@@ -377,6 +377,10 @@ else:
if loadable_module:
config.available_features.add('loadable_module')
+# Static libraries are not built if BUILD_SHARED_LIBS is ON.
+if not config.build_shared_libs:
+ config.available_features.add("static-libs")
+
# Sanitizers.
if 'Address' in config.llvm_use_sanitizer:
config.available_features.add("asan")
@@ -399,7 +403,7 @@ if lit_config.params.get("run_long_tests", None) == "true":
if not 'hexagon' in config.target_triple:
config.available_features.add("object-emission")
-if config.have_zlib == "1":
+if config.have_zlib:
config.available_features.add("zlib")
else:
config.available_features.add("nozlib")
@@ -455,7 +459,7 @@ if have_ld_plugin_support():
config.available_features.add('ld_plugin')
def have_ld64_plugin_support():
- if (config.llvm_tool_lto_build == 'OFF' or config.ld64_executable == ''):
+ if not config.llvm_tool_lto_build or config.ld64_executable == '':
return False
ld_cmd = subprocess.Popen([config.ld64_executable, '-v'], stderr = subprocess.PIPE)
diff --git a/test/lit.site.cfg.in b/test/lit.site.cfg.in
index 95f00038bc7e1..b6a8b8b17bca2 100644
--- a/test/lit.site.cfg.in
+++ b/test/lit.site.cfg.in
@@ -15,12 +15,12 @@ config.lit_tools_dir = "@LLVM_LIT_TOOLS_DIR@"
config.python_executable = "@PYTHON_EXECUTABLE@"
config.gold_executable = "@GOLD_EXECUTABLE@"
config.ld64_executable = "@LD64_EXECUTABLE@"
-config.llvm_tool_lto_build = "@LLVM_TOOL_LTO_BUILD@"
+config.llvm_tool_lto_build = @LLVM_TOOL_LTO_BUILD@
config.ocamlfind_executable = "@OCAMLFIND@"
-config.have_ocamlopt = "@HAVE_OCAMLOPT@"
-config.have_ocaml_ounit = "@HAVE_OCAML_OUNIT@"
+config.have_ocamlopt = @HAVE_OCAMLOPT@
+config.have_ocaml_ounit = @HAVE_OCAML_OUNIT@
config.ocaml_flags = "@OCAMLFLAGS@"
-config.include_go_tests = "@LLVM_INCLUDE_GO_TESTS@"
+config.include_go_tests = @LLVM_INCLUDE_GO_TESTS@
config.go_executable = "@GO_EXECUTABLE@"
config.enable_shared = @ENABLE_SHARED@
config.enable_assertions = @ENABLE_ASSERTIONS@
@@ -32,13 +32,13 @@ config.host_arch = "@HOST_ARCH@"
config.host_cc = "@HOST_CC@"
config.host_cxx = "@HOST_CXX@"
config.host_ldflags = "@HOST_LDFLAGS@"
-config.llvm_use_intel_jitevents = "@LLVM_USE_INTEL_JITEVENTS@"
+config.llvm_use_intel_jitevents = @LLVM_USE_INTEL_JITEVENTS@
config.llvm_use_sanitizer = "@LLVM_USE_SANITIZER@"
-config.have_zlib = "@HAVE_LIBZ@"
-config.have_libxar = "@HAVE_LIBXAR@"
+config.have_zlib = @HAVE_LIBZ@
+config.have_libxar = @HAVE_LIBXAR@
config.have_dia_sdk = @LLVM_ENABLE_DIA_SDK@
-config.enable_ffi = "@LLVM_ENABLE_FFI@"
-config.test_examples = "@ENABLE_EXAMPLES@"
+config.enable_ffi = @LLVM_ENABLE_FFI@
+config.build_shared_libs = @BUILD_SHARED_LIBS@
# Support substitution of the tools_dir with user parameters. This is
# used when we can't determine the tool dir at configuration time.
diff --git a/test/tools/llvm-config/system-libs.test b/test/tools/llvm-config/system-libs.test
index 6a1f3e39fa1ee..9105d242e049a 100644
--- a/test/tools/llvm-config/system-libs.test
+++ b/test/tools/llvm-config/system-libs.test
@@ -1,4 +1,5 @@
-RUN: llvm-config --system-libs 2>&1 | FileCheck %s
+RUN: llvm-config --link-static --system-libs 2>&1 | FileCheck %s
+REQUIRES: static-libs
UNSUPPORTED: system-windows
CHECK: -l
CHECK-NOT: error
diff --git a/test/tools/llvm-config/system-libs.windows.test b/test/tools/llvm-config/system-libs.windows.test
index cc976ea340697..2c6e03afa2d95 100644
--- a/test/tools/llvm-config/system-libs.windows.test
+++ b/test/tools/llvm-config/system-libs.windows.test
@@ -1,4 +1,5 @@
-RUN: llvm-config --system-libs 2>&1 | FileCheck %s
+RUN: llvm-config --link-static --system-libs 2>&1 | FileCheck %s
+REQUIRES: static-libs
REQUIRES: system-windows
CHECK-NOT: -l
CHECK: psapi.lib shell32.lib ole32.lib uuid.lib
diff --git a/test/tools/llvm-opt-report/Inputs/dm.c b/test/tools/llvm-opt-report/Inputs/dm.c
new file mode 100644
index 0000000000000..b031cd2b02918
--- /dev/null
+++ b/test/tools/llvm-opt-report/Inputs/dm.c
@@ -0,0 +1,13 @@
+void bar(void);
+void foo(int n) {
+ if (n) { bar(); } else { while (1) {} }
+}
+
+void quack(void) {
+ foo(0);
+}
+
+void quack2(void) {
+ foo(4);
+}
+
diff --git a/test/tools/llvm-opt-report/Inputs/dm.yaml b/test/tools/llvm-opt-report/Inputs/dm.yaml
new file mode 100644
index 0000000000000..5e6cc54a0a307
--- /dev/null
+++ b/test/tools/llvm-opt-report/Inputs/dm.yaml
@@ -0,0 +1,104 @@
+--- !Missed
+Pass: inline
+Name: NoDefinition
+DebugLoc: { File: Inputs/dm.c, Line: 3, Column: 12 }
+Function: foo
+Args:
+ - Callee: bar
+ - String: ' will not be inlined into '
+ - Caller: foo
+ DebugLoc: { File: Inputs/dm.c, Line: 2, Column: 0 }
+ - String: ' because its definition is unavailable'
+...
+--- !Analysis
+Pass: inline
+Name: CanBeInlined
+DebugLoc: { File: Inputs/dm.c, Line: 7, Column: 3 }
+Function: quack
+Args:
+ - Callee: foo
+ DebugLoc: { File: Inputs/dm.c, Line: 2, Column: 0 }
+ - String: ' can be inlined into '
+ - Caller: quack
+ DebugLoc: { File: Inputs/dm.c, Line: 6, Column: 0 }
+ - String: ' with cost='
+ - Cost: '-35'
+ - String: ' (threshold='
+ - Threshold: '375'
+ - String: ')'
+...
+--- !Passed
+Pass: inline
+Name: Inlined
+DebugLoc: { File: Inputs/dm.c, Line: 7, Column: 3 }
+Function: quack
+Args:
+ - Callee: foo
+ DebugLoc: { File: Inputs/dm.c, Line: 2, Column: 0 }
+ - String: ' inlined into '
+ - Caller: quack
+ DebugLoc: { File: Inputs/dm.c, Line: 6, Column: 0 }
+...
+--- !Analysis
+Pass: inline
+Name: CanBeInlined
+DebugLoc: { File: Inputs/dm.c, Line: 11, Column: 3 }
+Function: quack2
+Args:
+ - Callee: foo
+ DebugLoc: { File: Inputs/dm.c, Line: 2, Column: 0 }
+ - String: ' can be inlined into '
+ - Caller: quack2
+ DebugLoc: { File: Inputs/dm.c, Line: 10, Column: 0 }
+ - String: ' with cost='
+ - Cost: '-5'
+ - String: ' (threshold='
+ - Threshold: '375'
+ - String: ')'
+...
+--- !Passed
+Pass: inline
+Name: Inlined
+DebugLoc: { File: Inputs/dm.c, Line: 11, Column: 3 }
+Function: quack2
+Args:
+ - Callee: foo
+ DebugLoc: { File: Inputs/dm.c, Line: 2, Column: 0 }
+ - String: ' inlined into '
+ - Caller: quack2
+ DebugLoc: { File: Inputs/dm.c, Line: 10, Column: 0 }
+...
+--- !Analysis
+Pass: loop-vectorize
+Name: CFGNotUnderstood
+DebugLoc: { File: Inputs/dm.c, Line: 3, Column: 28 }
+Function: foo
+Args:
+ - String: 'loop not vectorized: '
+ - String: loop control flow is not understood by vectorizer
+...
+--- !Missed
+Pass: loop-vectorize
+Name: MissedDetails
+DebugLoc: { File: Inputs/dm.c, Line: 3, Column: 28 }
+Function: foo
+Args:
+ - String: loop not vectorized
+...
+--- !Analysis
+Pass: loop-vectorize
+Name: CFGNotUnderstood
+DebugLoc: { File: Inputs/dm.c, Line: 3, Column: 28 }
+Function: quack
+Args:
+ - String: 'loop not vectorized: '
+ - String: loop control flow is not understood by vectorizer
+...
+--- !Missed
+Pass: loop-vectorize
+Name: MissedDetails
+DebugLoc: { File: Inputs/dm.c, Line: 3, Column: 28 }
+Function: quack
+Args:
+ - String: loop not vectorized
+...
diff --git a/test/tools/llvm-opt-report/func-dm.test b/test/tools/llvm-opt-report/func-dm.test
new file mode 100644
index 0000000000000..133386e2b15b7
--- /dev/null
+++ b/test/tools/llvm-opt-report/func-dm.test
@@ -0,0 +1,17 @@
+RUN: llvm-opt-report -r %p %p/Inputs/dm.yaml | FileCheck -strict-whitespace %s
+
+; CHECK: < {{.*[/\]}}dm.c
+; CHECK-NEXT: 1 | void bar(void);
+; CHECK-NEXT: 2 | void foo(int n) {
+; CHECK-NEXT: 3 | if (n) { bar(); } else { while (1) {} }
+; CHECK-NEXT: 4 | }
+; CHECK-NEXT: 5 |
+; CHECK-NEXT: 6 | void quack(void) {
+; CHECK-NEXT: 7 I | foo(0);
+; CHECK-NEXT: 8 | }
+; CHECK-NEXT: 9 |
+; CHECK-NEXT: 10 | void quack2(void) {
+; CHECK-NEXT: 11 I | foo(4);
+; CHECK-NEXT: 12 | }
+; CHECK-NEXT: 13 |
+