summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/X86')
-rw-r--r--test/CodeGen/X86/2007-01-08-InstrSched.ll4
-rw-r--r--test/CodeGen/X86/GlobalISel/add-scalar.ll4
-rw-r--r--test/CodeGen/X86/GlobalISel/add-vec.ll111
-rw-r--r--test/CodeGen/X86/GlobalISel/binop.ll8
-rw-r--r--test/CodeGen/X86/GlobalISel/br.ll2
-rw-r--r--test/CodeGen/X86/GlobalISel/callingconv.ll8
-rw-r--r--test/CodeGen/X86/GlobalISel/cmp.ll2
-rw-r--r--test/CodeGen/X86/GlobalISel/constant.ll2
-rw-r--r--test/CodeGen/X86/GlobalISel/ext-x86-64.ll2
-rw-r--r--test/CodeGen/X86/GlobalISel/ext.ll4
-rw-r--r--test/CodeGen/X86/GlobalISel/frameIndex.ll12
-rw-r--r--test/CodeGen/X86/GlobalISel/gep.ll4
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-add-v128.mir119
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-add-v256.mir157
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-add-v512.mir139
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir119
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir120
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir120
-rw-r--r--test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll4
-rw-r--r--test/CodeGen/X86/GlobalISel/memop-scalar.ll4
-rw-r--r--test/CodeGen/X86/GlobalISel/memop-vec.ll4
-rw-r--r--test/CodeGen/X86/GlobalISel/mul-scalar.ll2
-rw-r--r--test/CodeGen/X86/GlobalISel/mul-vec.ll2
-rw-r--r--test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir55
-rw-r--r--test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir54
-rw-r--r--test/CodeGen/X86/GlobalISel/select-add-v128.mir195
-rw-r--r--test/CodeGen/X86/GlobalISel/select-add-v256.mir185
-rw-r--r--test/CodeGen/X86/GlobalISel/select-add-v512.mir130
-rw-r--r--test/CodeGen/X86/GlobalISel/select-br.mir4
-rw-r--r--test/CodeGen/X86/GlobalISel/select-cmp.mir2
-rw-r--r--test/CodeGen/X86/GlobalISel/select-constant.mir2
-rw-r--r--test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir2
-rw-r--r--test/CodeGen/X86/GlobalISel/select-ext.mir4
-rw-r--r--test/CodeGen/X86/GlobalISel/select-frameIndex.mir6
-rw-r--r--test/CodeGen/X86/GlobalISel/select-gep.mir2
-rw-r--r--test/CodeGen/X86/GlobalISel/select-sub-v128.mir195
-rw-r--r--test/CodeGen/X86/GlobalISel/select-sub-v256.mir185
-rw-r--r--test/CodeGen/X86/GlobalISel/select-sub-v512.mir130
-rw-r--r--test/CodeGen/X86/GlobalISel/select-trunc.mir92
-rw-r--r--test/CodeGen/X86/GlobalISel/sub-vec.ll111
-rw-r--r--test/CodeGen/X86/GlobalISel/trunc.ll2
-rw-r--r--test/CodeGen/X86/O0-pipeline.ll2
-rw-r--r--test/CodeGen/X86/addcarry.ll20
-rw-r--r--test/CodeGen/X86/avg.ll833
-rw-r--r--test/CodeGen/X86/avx-intrinsics-fast-isel.ll52
-rw-r--r--test/CodeGen/X86/avx.ll2
-rw-r--r--test/CodeGen/X86/avx512-cmp-kor-sequence.ll6
-rw-r--r--test/CodeGen/X86/avx512-cmp.ll81
-rw-r--r--test/CodeGen/X86/avx512-cvt.ll4
-rw-r--r--test/CodeGen/X86/avx512-ext.ll24
-rw-r--r--test/CodeGen/X86/avx512-fsel.ll24
-rw-r--r--test/CodeGen/X86/avx512-gather-scatter-intrin.ll10
-rwxr-xr-xtest/CodeGen/X86/avx512-i1test.ll5
-rw-r--r--test/CodeGen/X86/avx512-insert-extract.ll220
-rw-r--r--test/CodeGen/X86/avx512-insert-extract_i1.ll5
-rw-r--r--test/CodeGen/X86/avx512-intrinsics-upgrade.ll116
-rw-r--r--test/CodeGen/X86/avx512-intrinsics.ll422
-rw-r--r--test/CodeGen/X86/avx512-load-store.ll8
-rwxr-xr-xtest/CodeGen/X86/avx512-mask-bugfix.ll57
-rw-r--r--test/CodeGen/X86/avx512-mask-op.ll151
-rw-r--r--test/CodeGen/X86/avx512-mask-spills.ll40
-rw-r--r--test/CodeGen/X86/avx512-memfold.ll5
-rw-r--r--test/CodeGen/X86/avx512-regcall-NoMask.ll32
-rw-r--r--test/CodeGen/X86/avx512-scalar_mask.ll14
-rw-r--r--test/CodeGen/X86/avx512-select.ll4
-rw-r--r--test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll12
-rw-r--r--test/CodeGen/X86/avx512bw-intrinsics.ll16
-rw-r--r--test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll24
-rw-r--r--test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll2
-rw-r--r--test/CodeGen/X86/avx512cdvl-intrinsics.ll2
-rw-r--r--test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll9
-rw-r--r--test/CodeGen/X86/avx512dq-intrinsics.ll14
-rw-r--r--test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll10
-rw-r--r--test/CodeGen/X86/avx512dqvl-intrinsics.ll4
-rw-r--r--test/CodeGen/X86/avx512er-intrinsics.ll6
-rw-r--r--test/CodeGen/X86/avx512ifma-intrinsics.ll8
-rw-r--r--test/CodeGen/X86/avx512ifmavl-intrinsics.ll16
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll64
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics.ll28
-rw-r--r--test/CodeGen/X86/bitcast-setcc-128.ll823
-rw-r--r--test/CodeGen/X86/bitcast-setcc-256.ll363
-rw-r--r--test/CodeGen/X86/bswap_tree2.ll35
-rw-r--r--test/CodeGen/X86/constant-combines.ll16
-rw-r--r--test/CodeGen/X86/fast-isel-load-i1.ll4
-rw-r--r--test/CodeGen/X86/fma-fneg-combine.ll5
-rw-r--r--test/CodeGen/X86/fmsubadd-combine.ll193
-rw-r--r--test/CodeGen/X86/fold-tied-op.ll7
-rw-r--r--test/CodeGen/X86/fp128-i128.ll2
-rw-r--r--test/CodeGen/X86/haddsub-2.ll12
-rw-r--r--test/CodeGen/X86/leaFixup32.mir509
-rw-r--r--test/CodeGen/X86/leaFixup64.mir1041
-rw-r--r--test/CodeGen/X86/lrshrink.ll57
-rw-r--r--test/CodeGen/X86/madd.ll34
-rw-r--r--test/CodeGen/X86/masked_gather_scatter.ll34
-rw-r--r--test/CodeGen/X86/merge-consecutive-loads-128.ll16
-rw-r--r--test/CodeGen/X86/misched-matrix.ll4
-rw-r--r--test/CodeGen/X86/mul-i1024.ll3827
-rw-r--r--test/CodeGen/X86/mul-i256.ll94
-rw-r--r--test/CodeGen/X86/mul-i512.ll705
-rw-r--r--test/CodeGen/X86/oddshuffles.ll34
-rw-r--r--test/CodeGen/X86/overflow.ll8
-rw-r--r--test/CodeGen/X86/pmul.ll55
-rw-r--r--test/CodeGen/X86/pr27591.ll18
-rw-r--r--test/CodeGen/X86/pr28173.ll20
-rw-r--r--test/CodeGen/X86/pr29112.ll8
-rw-r--r--test/CodeGen/X86/pr31088.ll2
-rw-r--r--test/CodeGen/X86/pr32241.ll68
-rw-r--r--test/CodeGen/X86/pr32256.ll36
-rw-r--r--test/CodeGen/X86/pr32284.ll12
-rw-r--r--test/CodeGen/X86/pr32451.ll6
-rw-r--r--test/CodeGen/X86/rotate.ll16
-rw-r--r--test/CodeGen/X86/rtm.ll10
-rw-r--r--test/CodeGen/X86/sad.ll929
-rw-r--r--test/CodeGen/X86/select.ll28
-rw-r--r--test/CodeGen/X86/setcc-wide-types.ll56
-rw-r--r--test/CodeGen/X86/shrink_vmul_sse.ll2
-rw-r--r--test/CodeGen/X86/sse-intrinsics-fast-isel.ll10
-rw-r--r--test/CodeGen/X86/sse-scalar-fp-arith.ll8
-rw-r--r--test/CodeGen/X86/sse1.ll8
-rw-r--r--test/CodeGen/X86/sse3-avx-addsub-2.ll14
-rw-r--r--test/CodeGen/X86/sse41.ll8
-rw-r--r--test/CodeGen/X86/subcarry.ll137
-rw-r--r--test/CodeGen/X86/vec_int_to_fp.ll84
-rw-r--r--test/CodeGen/X86/vector-bitreverse.ll6
-rw-r--r--test/CodeGen/X86/vector-blend.ll4
-rw-r--r--test/CodeGen/X86/vector-sqrt.ll8
-rw-r--r--test/CodeGen/X86/x86-interleaved-access.ll14
-rw-r--r--test/CodeGen/X86/xmulo.ll8
-rw-r--r--test/CodeGen/X86/xor-select-i1-combine.ll6
129 files changed, 9381 insertions, 4684 deletions
diff --git a/test/CodeGen/X86/2007-01-08-InstrSched.ll b/test/CodeGen/X86/2007-01-08-InstrSched.ll
index 24aa5b98d0bb8..4ec703921e29f 100644
--- a/test/CodeGen/X86/2007-01-08-InstrSched.ll
+++ b/test/CodeGen/X86/2007-01-08-InstrSched.ll
@@ -13,10 +13,10 @@ define float @foo(float %x) nounwind {
; CHECK: mulss
; CHECK: mulss
-; CHECK: addss
; CHECK: mulss
-; CHECK: addss
; CHECK: mulss
; CHECK: addss
+; CHECK: addss
+; CHECK: addss
; CHECK: ret
}
diff --git a/test/CodeGen/X86/GlobalISel/add-scalar.ll b/test/CodeGen/X86/GlobalISel/add-scalar.ll
index 85db1c0e7e7a2..55c825464039c 100644
--- a/test/CodeGen/X86/GlobalISel/add-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/add-scalar.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
-; RUN: llc -mtriple=i386-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc -mtriple=i386-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
; X64-LABEL: test_add_i64:
diff --git a/test/CodeGen/X86/GlobalISel/add-vec.ll b/test/CodeGen/X86/GlobalISel/add-vec.ll
new file mode 100644
index 0000000000000..679a49d733a2f
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/add-vec.ll
@@ -0,0 +1,111 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=SKX
+
+define <16 x i8> @test_add_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) {
+; SKX-LABEL: test_add_v16i8:
+; SKX: # BB#0:
+; SKX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %ret = add <16 x i8> %arg1, %arg2
+ ret <16 x i8> %ret
+}
+
+define <8 x i16> @test_add_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
+; SKX-LABEL: test_add_v8i16:
+; SKX: # BB#0:
+; SKX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %ret = add <8 x i16> %arg1, %arg2
+ ret <8 x i16> %ret
+}
+
+define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
+; SKX-LABEL: test_add_v4i32:
+; SKX: # BB#0:
+; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %ret = add <4 x i32> %arg1, %arg2
+ ret <4 x i32> %ret
+}
+
+define <2 x i64> @test_add_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
+; SKX-LABEL: test_add_v2i64:
+; SKX: # BB#0:
+; SKX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %ret = add <2 x i64> %arg1, %arg2
+ ret <2 x i64> %ret
+}
+
+define <32 x i8> @test_add_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) {
+; SKX-LABEL: test_add_v32i8:
+; SKX: # BB#0:
+; SKX-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %ret = add <32 x i8> %arg1, %arg2
+ ret <32 x i8> %ret
+}
+
+define <16 x i16> @test_add_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
+; SKX-LABEL: test_add_v16i16:
+; SKX: # BB#0:
+; SKX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %ret = add <16 x i16> %arg1, %arg2
+ ret <16 x i16> %ret
+}
+
+define <8 x i32> @test_add_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
+; SKX-LABEL: test_add_v8i32:
+; SKX: # BB#0:
+; SKX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %ret = add <8 x i32> %arg1, %arg2
+ ret <8 x i32> %ret
+}
+
+define <4 x i64> @test_add_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
+; SKX-LABEL: test_add_v4i64:
+; SKX: # BB#0:
+; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %ret = add <4 x i64> %arg1, %arg2
+ ret <4 x i64> %ret
+}
+
+define <64 x i8> @test_add_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) {
+; SKX-LABEL: test_add_v64i8:
+; SKX: # BB#0:
+; SKX-NEXT: vpaddb %zmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %ret = add <64 x i8> %arg1, %arg2
+ ret <64 x i8> %ret
+}
+
+define <32 x i16> @test_add_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) {
+; SKX-LABEL: test_add_v32i16:
+; SKX: # BB#0:
+; SKX-NEXT: vpaddw %zmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %ret = add <32 x i16> %arg1, %arg2
+ ret <32 x i16> %ret
+}
+
+define <16 x i32> @test_add_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) {
+; SKX-LABEL: test_add_v16i32:
+; SKX: # BB#0:
+; SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %ret = add <16 x i32> %arg1, %arg2
+ ret <16 x i32> %ret
+}
+
+define <8 x i64> @test_add_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) {
+; SKX-LABEL: test_add_v8i64:
+; SKX: # BB#0:
+; SKX-NEXT: vpaddq %zmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %ret = add <8 x i64> %arg1, %arg2
+ ret <8 x i64> %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/binop.ll b/test/CodeGen/X86/GlobalISel/binop.ll
index 1aae1db8ab076..d7ae4435682f0 100644
--- a/test/CodeGen/X86/GlobalISel/binop.ll
+++ b/test/CodeGen/X86/GlobalISel/binop.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512F
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512VL
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512F
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512VL
define i64 @test_sub_i64(i64 %arg1, i64 %arg2) {
; ALL-LABEL: test_sub_i64:
diff --git a/test/CodeGen/X86/GlobalISel/br.ll b/test/CodeGen/X86/GlobalISel/br.ll
index faa6a03503373..387e8797f0cd0 100644
--- a/test/CodeGen/X86/GlobalISel/br.ll
+++ b/test/CodeGen/X86/GlobalISel/br.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -O0 -mtriple=x86_64-linux-gnu -global-isel %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64
+; RUN: llc -O0 -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64
define void @uncondbr() {
; CHECK-LABEL: uncondbr:
diff --git a/test/CodeGen/X86/GlobalISel/callingconv.ll b/test/CodeGen/X86/GlobalISel/callingconv.ll
index c7e4d91ac3c7b..997115d4d900e 100644
--- a/test/CodeGen/X86/GlobalISel/callingconv.ll
+++ b/test/CodeGen/X86/GlobalISel/callingconv.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=i386-linux-gnu -mattr=+sse2 -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 --check-prefix=X32_GISEL
-; RUN: llc -mtriple=i386-linux-gnu -mattr=+sse2 < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 --check-prefix=X32_ISEL
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_GISEL
-; RUN: llc -mtriple=x86_64-linux-gnu < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_ISEL
+; RUN: llc -mtriple=i386-linux-gnu -mattr=+sse2 -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 --check-prefix=X32_GISEL
+; RUN: llc -mtriple=i386-linux-gnu -mattr=+sse2 -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 --check-prefix=X32_ISEL
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_GISEL
+; RUN: llc -mtriple=x86_64-linux-gnu -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_ISEL
define i32 @test_ret_i32() {
; X32-LABEL: test_ret_i32:
diff --git a/test/CodeGen/X86/GlobalISel/cmp.ll b/test/CodeGen/X86/GlobalISel/cmp.ll
index 03692bb6b1de9..39fee409d785a 100644
--- a/test/CodeGen/X86/GlobalISel/cmp.ll
+++ b/test/CodeGen/X86/GlobalISel/cmp.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL
define i32 @test_icmp_eq_i8(i8 %a, i8 %b) {
; ALL-LABEL: test_icmp_eq_i8:
diff --git a/test/CodeGen/X86/GlobalISel/constant.ll b/test/CodeGen/X86/GlobalISel/constant.ll
index cab043a51f052..b550bb0bc7be6 100644
--- a/test/CodeGen/X86/GlobalISel/constant.ll
+++ b/test/CodeGen/X86/GlobalISel/constant.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
define i8 @const_i8() {
; ALL-LABEL: const_i8:
diff --git a/test/CodeGen/X86/GlobalISel/ext-x86-64.ll b/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
index 64cd0e70a4fdd..b08ac062fb4bb 100644
--- a/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
+++ b/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X64
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
; TODO merge with ext.ll after i64 sext suported on 32bit platform
diff --git a/test/CodeGen/X86/GlobalISel/ext.ll b/test/CodeGen/X86/GlobalISel/ext.ll
index 4d4e3b05ca28b..27aecd118b385 100644
--- a/test/CodeGen/X86/GlobalISel/ext.ll
+++ b/test/CodeGen/X86/GlobalISel/ext.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X64
-; RUN: llc -mtriple=i386-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X32
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
+; RUN: llc -mtriple=i386-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X32
define i32 @test_zext_i1(i32 %a) {
; X64-LABEL: test_zext_i1:
diff --git a/test/CodeGen/X86/GlobalISel/frameIndex.ll b/test/CodeGen/X86/GlobalISel/frameIndex.ll
index 2bb11adcc3b56..a9ec94defea87 100644
--- a/test/CodeGen/X86/GlobalISel/frameIndex.ll
+++ b/test/CodeGen/X86/GlobalISel/frameIndex.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X64
-; RUN: llc -mtriple=x86_64-linux-gnu < %s -o - | FileCheck %s --check-prefix=X64
-; RUN: llc -mtriple=i386-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=X32
-; RUN: llc -mtriple=i386-linux-gnu < %s -o - | FileCheck %s --check-prefix=X32
-; RUN: llc -mtriple=x86_64-linux-gnux32 -global-isel < %s -o - | FileCheck %s --check-prefix=X32ABI
-; RUN: llc -mtriple=x86_64-linux-gnux32 < %s -o - | FileCheck %s --check-prefix=X32ABI
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
+; RUN: llc -mtriple=x86_64-linux-gnu -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
+; RUN: llc -mtriple=i386-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X32
+; RUN: llc -mtriple=i386-linux-gnu -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X32
+; RUN: llc -mtriple=x86_64-linux-gnux32 -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X32ABI
+; RUN: llc -mtriple=x86_64-linux-gnux32 -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X32ABI
define i32* @allocai32() {
; X64-LABEL: allocai32:
diff --git a/test/CodeGen/X86/GlobalISel/gep.ll b/test/CodeGen/X86/GlobalISel/gep.ll
index bc5b0152b24ae..94da9fb46761b 100644
--- a/test/CodeGen/X86/GlobalISel/gep.ll
+++ b/test/CodeGen/X86/GlobalISel/gep.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64_GISEL
-; RUN: llc -mtriple=x86_64-linux-gnu < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64_GISEL
+; RUN: llc -mtriple=x86_64-linux-gnu -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
define i32* @test_gep_i8(i32 *%arr, i8 %ind) {
; X64_GISEL-LABEL: test_gep_i8:
diff --git a/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir b/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir
new file mode 100644
index 0000000000000..feba33ac91be3
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir
@@ -0,0 +1,119 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE2
+
+--- |
+ define void @test_add_v16i8() {
+ %ret = add <16 x i8> undef, undef
+ ret void
+ }
+
+ define void @test_add_v8i16() {
+ %ret = add <8 x i16> undef, undef
+ ret void
+ }
+
+ define void @test_add_v4i32() {
+ %ret = add <4 x i32> undef, undef
+ ret void
+ }
+
+ define void @test_add_v2i64() {
+ %ret = add <2 x i64> undef, undef
+ ret void
+ }
+...
+---
+name: test_add_v16i8
+# ALL-LABEL: name: test_add_v16i8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<16 x s8>) = IMPLICIT_DEF
+# ALL-NEXT: %1(<16 x s8>) = IMPLICIT_DEF
+# ALL-NEXT: %2(<16 x s8>) = G_ADD %0, %1
+# ALL-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<16 x s8>) = IMPLICIT_DEF
+ %1(<16 x s8>) = IMPLICIT_DEF
+ %2(<16 x s8>) = G_ADD %0, %1
+ RET 0
+
+...
+---
+name: test_add_v8i16
+# ALL-LABEL: name: test_add_v8i16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<8 x s16>) = IMPLICIT_DEF
+# ALL-NEXT: %1(<8 x s16>) = IMPLICIT_DEF
+# ALL-NEXT: %2(<8 x s16>) = G_ADD %0, %1
+# ALL-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<8 x s16>) = IMPLICIT_DEF
+ %1(<8 x s16>) = IMPLICIT_DEF
+ %2(<8 x s16>) = G_ADD %0, %1
+ RET 0
+
+...
+---
+name: test_add_v4i32
+# ALL-LABEL: name: test_add_v4i32
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<4 x s32>) = IMPLICIT_DEF
+# ALL-NEXT: %1(<4 x s32>) = IMPLICIT_DEF
+# ALL-NEXT: %2(<4 x s32>) = G_ADD %0, %1
+# ALL-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<4 x s32>) = IMPLICIT_DEF
+ %1(<4 x s32>) = IMPLICIT_DEF
+ %2(<4 x s32>) = G_ADD %0, %1
+ RET 0
+
+...
+---
+name: test_add_v2i64
+# ALL-LABEL: name: test_add_v2i64
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<2 x s64>) = IMPLICIT_DEF
+# ALL-NEXT: %1(<2 x s64>) = IMPLICIT_DEF
+# ALL-NEXT: %2(<2 x s64>) = G_ADD %0, %1
+# ALL-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<2 x s64>) = IMPLICIT_DEF
+ %1(<2 x s64>) = IMPLICIT_DEF
+ %2(<2 x s64>) = G_ADD %0, %1
+ RET 0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir b/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir
new file mode 100644
index 0000000000000..f7dc8031b4f5b
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir
@@ -0,0 +1,157 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX1
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX2
+
+--- |
+ define void @test_add_v32i8() {
+ %ret = add <32 x i8> undef, undef
+ ret void
+ }
+
+ define void @test_add_v16i16() {
+ %ret = add <16 x i16> undef, undef
+ ret void
+ }
+
+ define void @test_add_v8i32() {
+ %ret = add <8 x i32> undef, undef
+ ret void
+ }
+
+ define void @test_add_v4i64() {
+ %ret = add <4 x i64> undef, undef
+ ret void
+ }
+
+...
+---
+name: test_add_v32i8
+# ALL-LABEL: name: test_add_v32i8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# AVX1: %0(<32 x s8>) = IMPLICIT_DEF
+# AVX1-NEXT: %1(<32 x s8>) = IMPLICIT_DEF
+# AVX1-NEXT: %3(<16 x s8>), %4(<16 x s8>) = G_UNMERGE_VALUES %0(<32 x s8>)
+# AVX1-NEXT: %5(<16 x s8>), %6(<16 x s8>) = G_UNMERGE_VALUES %1(<32 x s8>)
+# AVX1-NEXT: %7(<16 x s8>) = G_ADD %3, %5
+# AVX1-NEXT: %8(<16 x s8>) = G_ADD %4, %6
+# AVX1-NEXT: %2(<32 x s8>) = G_MERGE_VALUES %7(<16 x s8>), %8(<16 x s8>)
+# AVX1-NEXT: RET 0
+#
+# AVX2: %0(<32 x s8>) = IMPLICIT_DEF
+# AVX2-NEXT: %1(<32 x s8>) = IMPLICIT_DEF
+# AVX2-NEXT: %2(<32 x s8>) = G_ADD %0, %1
+# AVX2-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<32 x s8>) = IMPLICIT_DEF
+ %1(<32 x s8>) = IMPLICIT_DEF
+ %2(<32 x s8>) = G_ADD %0, %1
+ RET 0
+
+...
+---
+name: test_add_v16i16
+# ALL-LABEL: name: test_add_v16i16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# AVX1: %0(<16 x s16>) = IMPLICIT_DEF
+# AVX1-NEXT: %1(<16 x s16>) = IMPLICIT_DEF
+# AVX1-NEXT: %3(<8 x s16>), %4(<8 x s16>) = G_UNMERGE_VALUES %0(<16 x s16>)
+# AVX1-NEXT: %5(<8 x s16>), %6(<8 x s16>) = G_UNMERGE_VALUES %1(<16 x s16>)
+# AVX1-NEXT: %7(<8 x s16>) = G_ADD %3, %5
+# AVX1-NEXT: %8(<8 x s16>) = G_ADD %4, %6
+# AVX1-NEXT: %2(<16 x s16>) = G_MERGE_VALUES %7(<8 x s16>), %8(<8 x s16>)
+# AVX1-NEXT: RET 0
+#
+# AVX2: %0(<16 x s16>) = IMPLICIT_DEF
+# AVX2-NEXT: %1(<16 x s16>) = IMPLICIT_DEF
+# AVX2-NEXT: %2(<16 x s16>) = G_ADD %0, %1
+# AVX2-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<16 x s16>) = IMPLICIT_DEF
+ %1(<16 x s16>) = IMPLICIT_DEF
+ %2(<16 x s16>) = G_ADD %0, %1
+ RET 0
+
+...
+---
+name: test_add_v8i32
+# ALL-LABEL: name: test_add_v8i32
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# AVX1: %0(<8 x s32>) = IMPLICIT_DEF
+# AVX1-NEXT: %1(<8 x s32>) = IMPLICIT_DEF
+# AVX1-NEXT: %3(<4 x s32>), %4(<4 x s32>) = G_UNMERGE_VALUES %0(<8 x s32>)
+# AVX1-NEXT: %5(<4 x s32>), %6(<4 x s32>) = G_UNMERGE_VALUES %1(<8 x s32>)
+# AVX1-NEXT: %7(<4 x s32>) = G_ADD %3, %5
+# AVX1-NEXT: %8(<4 x s32>) = G_ADD %4, %6
+# AVX1-NEXT: %2(<8 x s32>) = G_MERGE_VALUES %7(<4 x s32>), %8(<4 x s32>)
+# AVX1-NEXT: RET 0
+#
+# AVX2: %0(<8 x s32>) = IMPLICIT_DEF
+# AVX2-NEXT: %1(<8 x s32>) = IMPLICIT_DEF
+# AVX2-NEXT: %2(<8 x s32>) = G_ADD %0, %1
+# AVX2-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<8 x s32>) = IMPLICIT_DEF
+ %1(<8 x s32>) = IMPLICIT_DEF
+ %2(<8 x s32>) = G_ADD %0, %1
+ RET 0
+
+...
+---
+name: test_add_v4i64
+# ALL-LABEL: name: test_add_v4i64
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# AVX1: %0(<4 x s64>) = IMPLICIT_DEF
+# AVX1-NEXT: %1(<4 x s64>) = IMPLICIT_DEF
+# AVX1-NEXT: %3(<2 x s64>), %4(<2 x s64>) = G_UNMERGE_VALUES %0(<4 x s64>)
+# AVX1-NEXT: %5(<2 x s64>), %6(<2 x s64>) = G_UNMERGE_VALUES %1(<4 x s64>)
+# AVX1-NEXT: %7(<2 x s64>) = G_ADD %3, %5
+# AVX1-NEXT: %8(<2 x s64>) = G_ADD %4, %6
+# AVX1-NEXT: %2(<4 x s64>) = G_MERGE_VALUES %7(<2 x s64>), %8(<2 x s64>)
+# AVX1-NEXT: RET 0
+#
+# AVX2: %0(<4 x s64>) = IMPLICIT_DEF
+# AVX2-NEXT: %1(<4 x s64>) = IMPLICIT_DEF
+# AVX2-NEXT: %2(<4 x s64>) = G_ADD %0, %1
+# AVX2-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<4 x s64>) = IMPLICIT_DEF
+ %1(<4 x s64>) = IMPLICIT_DEF
+ %2(<4 x s64>) = G_ADD %0, %1
+ RET 0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir b/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir
new file mode 100644
index 0000000000000..2b8b51acaa55a
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir
@@ -0,0 +1,139 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512F
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512bw -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW
+
+--- |
+ define void @test_add_v64i8() {
+ %ret = add <64 x i8> undef, undef
+ ret void
+ }
+
+ define void @test_add_v32i16() {
+ %ret = add <32 x i16> undef, undef
+ ret void
+ }
+
+ define void @test_add_v16i32() {
+ %ret = add <16 x i32> undef, undef
+ ret void
+ }
+
+ define void @test_add_v8i64() {
+ %ret = add <8 x i64> undef, undef
+ ret void
+ }
+
+...
+---
+name: test_add_v64i8
+# ALL-LABEL: name: test_add_v64i8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# AVX512F: %0(<64 x s8>) = IMPLICIT_DEF
+# AVX512F-NEXT: %1(<64 x s8>) = IMPLICIT_DEF
+# AVX512F-NEXT: %3(<32 x s8>), %4(<32 x s8>) = G_UNMERGE_VALUES %0(<64 x s8>)
+# AVX512F-NEXT: %5(<32 x s8>), %6(<32 x s8>) = G_UNMERGE_VALUES %1(<64 x s8>)
+# AVX512F-NEXT: %7(<32 x s8>) = G_ADD %3, %5
+# AVX512F-NEXT: %8(<32 x s8>) = G_ADD %4, %6
+# AVX512F-NEXT: %2(<64 x s8>) = G_MERGE_VALUES %7(<32 x s8>), %8(<32 x s8>)
+# AVX512F-NEXT: RET 0
+#
+# AVX512BW: %0(<64 x s8>) = IMPLICIT_DEF
+# AVX512BW-NEXT: %1(<64 x s8>) = IMPLICIT_DEF
+# AVX512BW-NEXT: %2(<64 x s8>) = G_ADD %0, %1
+# AVX512BW-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<64 x s8>) = IMPLICIT_DEF
+ %1(<64 x s8>) = IMPLICIT_DEF
+ %2(<64 x s8>) = G_ADD %0, %1
+ RET 0
+
+...
+---
+name: test_add_v32i16
+# ALL-LABEL: name: test_add_v32i16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# AVX512F: %0(<32 x s16>) = IMPLICIT_DEF
+# AVX512F-NEXT: %1(<32 x s16>) = IMPLICIT_DEF
+# AVX512F-NEXT: %3(<16 x s16>), %4(<16 x s16>) = G_UNMERGE_VALUES %0(<32 x s16>)
+# AVX512F-NEXT: %5(<16 x s16>), %6(<16 x s16>) = G_UNMERGE_VALUES %1(<32 x s16>)
+# AVX512F-NEXT: %7(<16 x s16>) = G_ADD %3, %5
+# AVX512F-NEXT: %8(<16 x s16>) = G_ADD %4, %6
+# AVX512F-NEXT: %2(<32 x s16>) = G_MERGE_VALUES %7(<16 x s16>), %8(<16 x s16>)
+# AVX512F-NEXT: RET 0
+#
+# AVX512BW: %0(<32 x s16>) = IMPLICIT_DEF
+# AVX512BW-NEXT: %1(<32 x s16>) = IMPLICIT_DEF
+# AVX512BW-NEXT: %2(<32 x s16>) = G_ADD %0, %1
+# AVX512BW-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<32 x s16>) = IMPLICIT_DEF
+ %1(<32 x s16>) = IMPLICIT_DEF
+ %2(<32 x s16>) = G_ADD %0, %1
+ RET 0
+
+...
+---
+name: test_add_v16i32
+# ALL-LABEL: name: test_add_v16i32
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<16 x s32>) = IMPLICIT_DEF
+# ALL-NEXT: %1(<16 x s32>) = IMPLICIT_DEF
+# ALL-NEXT: %2(<16 x s32>) = G_ADD %0, %1
+# ALL-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<16 x s32>) = IMPLICIT_DEF
+ %1(<16 x s32>) = IMPLICIT_DEF
+ %2(<16 x s32>) = G_ADD %0, %1
+ RET 0
+
+...
+---
+name: test_add_v8i64
+# ALL-LABEL: name: test_add_v8i64
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<8 x s64>) = IMPLICIT_DEF
+# ALL-NEXT: %1(<8 x s64>) = IMPLICIT_DEF
+# ALL-NEXT: %2(<8 x s64>) = G_ADD %0, %1
+# ALL-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<8 x s64>) = IMPLICIT_DEF
+ %1(<8 x s64>) = IMPLICIT_DEF
+ %2(<8 x s64>) = G_ADD %0, %1
+ RET 0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir b/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir
new file mode 100644
index 0000000000000..2f90fc9a3c906
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir
@@ -0,0 +1,119 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE2
+
+--- |
+ define void @test_sub_v16i8() {
+ %ret = sub <16 x i8> undef, undef
+ ret void
+ }
+
+ define void @test_sub_v8i16() {
+ %ret = sub <8 x i16> undef, undef
+ ret void
+ }
+
+ define void @test_sub_v4i32() {
+ %ret = sub <4 x i32> undef, undef
+ ret void
+ }
+
+ define void @test_sub_v2i64() {
+ %ret = sub <2 x i64> undef, undef
+ ret void
+ }
+...
+---
+name: test_sub_v16i8
+# ALL-LABEL: name: test_sub_v16i8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<16 x s8>) = IMPLICIT_DEF
+# ALL-NEXT: %1(<16 x s8>) = IMPLICIT_DEF
+# ALL-NEXT: %2(<16 x s8>) = G_SUB %0, %1
+# ALL-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<16 x s8>) = IMPLICIT_DEF
+ %1(<16 x s8>) = IMPLICIT_DEF
+ %2(<16 x s8>) = G_SUB %0, %1
+ RET 0
+
+...
+---
+name: test_sub_v8i16
+# ALL-LABEL: name: test_sub_v8i16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<8 x s16>) = IMPLICIT_DEF
+# ALL-NEXT: %1(<8 x s16>) = IMPLICIT_DEF
+# ALL-NEXT: %2(<8 x s16>) = G_SUB %0, %1
+# ALL-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<8 x s16>) = IMPLICIT_DEF
+ %1(<8 x s16>) = IMPLICIT_DEF
+ %2(<8 x s16>) = G_SUB %0, %1
+ RET 0
+
+...
+---
+name: test_sub_v4i32
+# ALL-LABEL: name: test_sub_v4i32
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<4 x s32>) = IMPLICIT_DEF
+# ALL-NEXT: %1(<4 x s32>) = IMPLICIT_DEF
+# ALL-NEXT: %2(<4 x s32>) = G_SUB %0, %1
+# ALL-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<4 x s32>) = IMPLICIT_DEF
+ %1(<4 x s32>) = IMPLICIT_DEF
+ %2(<4 x s32>) = G_SUB %0, %1
+ RET 0
+
+...
+---
+name: test_sub_v2i64
+# ALL-LABEL: name: test_sub_v2i64
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<2 x s64>) = IMPLICIT_DEF
+# ALL-NEXT: %1(<2 x s64>) = IMPLICIT_DEF
+# ALL-NEXT: %2(<2 x s64>) = G_SUB %0, %1
+# ALL-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<2 x s64>) = IMPLICIT_DEF
+ %1(<2 x s64>) = IMPLICIT_DEF
+ %2(<2 x s64>) = G_SUB %0, %1
+ RET 0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir b/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir
new file mode 100644
index 0000000000000..9d07787b8ecb2
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir
@@ -0,0 +1,120 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX2
+# TODO: add tests for additional configuration after the legalization supported
+--- |
+ define void @test_sub_v32i8() {
+ %ret = sub <32 x i8> undef, undef
+ ret void
+ }
+
+ define void @test_sub_v16i16() {
+ %ret = sub <16 x i16> undef, undef
+ ret void
+ }
+
+ define void @test_sub_v8i32() {
+ %ret = sub <8 x i32> undef, undef
+ ret void
+ }
+
+ define void @test_sub_v4i64() {
+ %ret = sub <4 x i64> undef, undef
+ ret void
+ }
+
+...
+---
+name: test_sub_v32i8
+# ALL-LABEL: name: test_sub_v32i8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# AVX2: %0(<32 x s8>) = IMPLICIT_DEF
+# AVX2-NEXT: %1(<32 x s8>) = IMPLICIT_DEF
+# AVX2-NEXT: %2(<32 x s8>) = G_SUB %0, %1
+# AVX2-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<32 x s8>) = IMPLICIT_DEF
+ %1(<32 x s8>) = IMPLICIT_DEF
+ %2(<32 x s8>) = G_SUB %0, %1
+ RET 0
+
+...
+---
+name: test_sub_v16i16
+# ALL-LABEL: name: test_sub_v16i16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# AVX2: %0(<16 x s16>) = IMPLICIT_DEF
+# AVX2-NEXT: %1(<16 x s16>) = IMPLICIT_DEF
+# AVX2-NEXT: %2(<16 x s16>) = G_SUB %0, %1
+# AVX2-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<16 x s16>) = IMPLICIT_DEF
+ %1(<16 x s16>) = IMPLICIT_DEF
+ %2(<16 x s16>) = G_SUB %0, %1
+ RET 0
+
+...
+---
+name: test_sub_v8i32
+# ALL-LABEL: name: test_sub_v8i32
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# AVX2: %0(<8 x s32>) = IMPLICIT_DEF
+# AVX2-NEXT: %1(<8 x s32>) = IMPLICIT_DEF
+# AVX2-NEXT: %2(<8 x s32>) = G_SUB %0, %1
+# AVX2-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<8 x s32>) = IMPLICIT_DEF
+ %1(<8 x s32>) = IMPLICIT_DEF
+ %2(<8 x s32>) = G_SUB %0, %1
+ RET 0
+
+...
+---
+name: test_sub_v4i64
+# ALL-LABEL: name: test_sub_v4i64
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# AVX2: %0(<4 x s64>) = IMPLICIT_DEF
+# AVX2-NEXT: %1(<4 x s64>) = IMPLICIT_DEF
+# AVX2-NEXT: %2(<4 x s64>) = G_SUB %0, %1
+# AVX2-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<4 x s64>) = IMPLICIT_DEF
+ %1(<4 x s64>) = IMPLICIT_DEF
+ %2(<4 x s64>) = G_SUB %0, %1
+ RET 0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir b/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir
new file mode 100644
index 0000000000000..c88e074ca4131
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir
@@ -0,0 +1,120 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512bw -global-isel -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW
+# TODO: add tests for additional configuration after the legalization supported
+--- |
+ define void @test_sub_v64i8() {
+ %ret = sub <64 x i8> undef, undef
+ ret void
+ }
+
+ define void @test_sub_v32i16() {
+ %ret = sub <32 x i16> undef, undef
+ ret void
+ }
+
+ define void @test_sub_v16i32() {
+ %ret = sub <16 x i32> undef, undef
+ ret void
+ }
+
+ define void @test_sub_v8i64() {
+ %ret = sub <8 x i64> undef, undef
+ ret void
+ }
+
+...
+---
+name: test_sub_v64i8
+# ALL-LABEL: name: test_sub_v64i8
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# AVX512BW: %0(<64 x s8>) = IMPLICIT_DEF
+# AVX512BW-NEXT: %1(<64 x s8>) = IMPLICIT_DEF
+# AVX512BW-NEXT: %2(<64 x s8>) = G_SUB %0, %1
+# AVX512BW-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<64 x s8>) = IMPLICIT_DEF
+ %1(<64 x s8>) = IMPLICIT_DEF
+ %2(<64 x s8>) = G_SUB %0, %1
+ RET 0
+
+...
+---
+name: test_sub_v32i16
+# ALL-LABEL: name: test_sub_v32i16
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# AVX512BW: %0(<32 x s16>) = IMPLICIT_DEF
+# AVX512BW-NEXT: %1(<32 x s16>) = IMPLICIT_DEF
+# AVX512BW-NEXT: %2(<32 x s16>) = G_SUB %0, %1
+# AVX512BW-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<32 x s16>) = IMPLICIT_DEF
+ %1(<32 x s16>) = IMPLICIT_DEF
+ %2(<32 x s16>) = G_SUB %0, %1
+ RET 0
+
+...
+---
+name: test_sub_v16i32
+# ALL-LABEL: name: test_sub_v16i32
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<16 x s32>) = IMPLICIT_DEF
+# ALL-NEXT: %1(<16 x s32>) = IMPLICIT_DEF
+# ALL-NEXT: %2(<16 x s32>) = G_SUB %0, %1
+# ALL-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<16 x s32>) = IMPLICIT_DEF
+ %1(<16 x s32>) = IMPLICIT_DEF
+ %2(<16 x s32>) = G_SUB %0, %1
+ RET 0
+
+...
+---
+name: test_sub_v8i64
+# ALL-LABEL: name: test_sub_v8i64
+alignment: 4
+legalized: false
+regBankSelected: false
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+# ALL: %0(<8 x s64>) = IMPLICIT_DEF
+# ALL-NEXT: %1(<8 x s64>) = IMPLICIT_DEF
+# ALL-NEXT: %2(<8 x s64>) = G_SUB %0, %1
+# ALL-NEXT: RET 0
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<8 x s64>) = IMPLICIT_DEF
+ %1(<8 x s64>) = IMPLICIT_DEF
+ %2(<8 x s64>) = G_SUB %0, %1
+ RET 0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll b/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
index 49a7fd79f8b24..5df52c5a058b5 100644
--- a/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
+++ b/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=i386-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE_FAST
-; RUN: llc -mtriple=i386-linux-gnu -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE_GREEDY
+; RUN: llc -mtriple=i386-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE_FAST
+; RUN: llc -mtriple=i386-linux-gnu -regbankselect-greedy -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE_GREEDY
;TODO merge with x86-64 tests (many operations not suppored yet)
diff --git a/test/CodeGen/X86/GlobalISel/memop-scalar.ll b/test/CodeGen/X86/GlobalISel/memop-scalar.ll
index 3e45a9c9a49dd..d3d4b297a8029 100644
--- a/test/CodeGen/X86/GlobalISel/memop-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/memop-scalar.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_FAST
-; RUN: llc -mtriple=x86_64-linux-gnu -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_GREEDY
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_FAST
+; RUN: llc -mtriple=x86_64-linux-gnu -regbankselect-greedy -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_GREEDY
define i8 @test_load_i8(i8 * %p1) {
; ALL-LABEL: test_load_i8:
diff --git a/test/CodeGen/X86/GlobalISel/memop-vec.ll b/test/CodeGen/X86/GlobalISel/memop-vec.ll
index e218fded4d5f7..f1ffc15f4d031 100644
--- a/test/CodeGen/X86/GlobalISel/memop-vec.ll
+++ b/test/CodeGen/X86/GlobalISel/memop-vec.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SKX
-; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -regbankselect-greedy -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SKX
+; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SKX
+; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -regbankselect-greedy -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SKX
define <4 x i32> @test_load_v4i32_noalign(<4 x i32> * %p1) {
; ALL-LABEL: test_load_v4i32_noalign:
diff --git a/test/CodeGen/X86/GlobalISel/mul-scalar.ll b/test/CodeGen/X86/GlobalISel/mul-scalar.ll
index 529e81c43304b..450c3839797cd 100644
--- a/test/CodeGen/X86/GlobalISel/mul-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/mul-scalar.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
;TODO: instruction selection not supported yet
;define i8 @test_mul_i8(i8 %arg1, i8 %arg2) {
diff --git a/test/CodeGen/X86/GlobalISel/mul-vec.ll b/test/CodeGen/X86/GlobalISel/mul-vec.ll
index 83615a718528f..b2e211470f393 100644
--- a/test/CodeGen/X86/GlobalISel/mul-vec.ll
+++ b/test/CodeGen/X86/GlobalISel/mul-vec.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel < %s -o - | FileCheck %s --check-prefix=SKX
+; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=SKX
define <8 x i16> @test_mul_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
; SKX-LABEL: test_mul_v8i16:
diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir b/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
index 446db56b992c5..f925c836f3d1f 100644
--- a/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
+++ b/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
@@ -5,6 +5,15 @@
define void @test_mul_vec256() {
ret void
}
+
+ define void @test_add_vec256() {
+ ret void
+ }
+
+ define void @test_sub_vec256() {
+ ret void
+ }
+
...
---
name: test_mul_vec256
@@ -29,3 +38,49 @@ body: |
RET 0
...
+---
+name: test_add_vec256
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_add_vec256
+# CHECK: registers:
+# CHECK: - { id: 0, class: vecr }
+# CHECK: - { id: 1, class: vecr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<8 x s32>) = IMPLICIT_DEF
+ %1(<8 x s32>) = G_ADD %0, %0
+ RET 0
+
+...
+---
+name: test_sub_vec256
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_sub_vec256
+# CHECK: registers:
+# CHECK: - { id: 0, class: vecr }
+# CHECK: - { id: 1, class: vecr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<8 x s32>) = IMPLICIT_DEF
+ %1(<8 x s32>) = G_SUB %0, %0
+ RET 0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir b/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
index f824ee12dcfb8..e0c12ff44a2f6 100644
--- a/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
+++ b/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
@@ -7,6 +7,14 @@
ret void
}
+ define void @test_add_vec512() {
+ ret void
+ }
+
+ define void @test_sub_vec512() {
+ ret void
+ }
+
...
---
name: test_mul_vec512
@@ -31,3 +39,49 @@ body: |
RET 0
...
+---
+name: test_add_vec512
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_add_vec512
+# CHECK: registers:
+# CHECK: - { id: 0, class: vecr }
+# CHECK: - { id: 1, class: vecr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<16 x s32>) = IMPLICIT_DEF
+ %1(<16 x s32>) = G_ADD %0, %0
+ RET 0
+
+...
+---
+name: test_sub_vec512
+alignment: 4
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+# CHECK-LABEL: name: test_sub_vec512
+# CHECK: registers:
+# CHECK: - { id: 0, class: vecr }
+# CHECK: - { id: 1, class: vecr }
+registers:
+ - { id: 0, class: _ }
+ - { id: 1, class: _ }
+ - { id: 2, class: _ }
+body: |
+ bb.1 (%ir-block.0):
+
+ %0(<16 x s32>) = IMPLICIT_DEF
+ %1(<16 x s32>) = G_SUB %0, %0
+ RET 0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-add-v128.mir b/test/CodeGen/X86/GlobalISel/select-add-v128.mir
new file mode 100644
index 0000000000000..a39702340bc2f
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-add-v128.mir
@@ -0,0 +1,195 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=SSE2
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=AVX1
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl,+avx512bw -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BWVL
+
+--- |
+ define <16 x i8> @test_add_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) {
+ %ret = add <16 x i8> %arg1, %arg2
+ ret <16 x i8> %ret
+ }
+
+ define <8 x i16> @test_add_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
+ %ret = add <8 x i16> %arg1, %arg2
+ ret <8 x i16> %ret
+ }
+
+ define <4 x i32> @test_add_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
+ %ret = add <4 x i32> %arg1, %arg2
+ ret <4 x i32> %ret
+ }
+
+ define <2 x i64> @test_add_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
+ %ret = add <2 x i64> %arg1, %arg2
+ ret <2 x i64> %ret
+ }
+
+...
+---
+name: test_add_v16i8
+# ALL-LABEL: name: test_add_v16i8
+alignment: 4
+legalized: true
+regBankSelected: true
+# NOVL: registers:
+# NOVL-NEXT: - { id: 0, class: vr128 }
+# NOVL-NEXT: - { id: 1, class: vr128 }
+# NOVL-NEXT: - { id: 2, class: vr128 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr128 }
+# AVX512VL-NEXT: - { id: 1, class: vr128 }
+# AVX512VL-NEXT: - { id: 2, class: vr128 }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# SSE2: %2 = PADDBrr %0, %1
+#
+# AVX1: %2 = VPADDBrr %0, %1
+#
+# AVX512VL: %2 = VPADDBrr %0, %1
+#
+# AVX512BWVL: %2 = VPADDBZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<16 x s8>) = COPY %xmm0
+ %1(<16 x s8>) = COPY %xmm1
+ %2(<16 x s8>) = G_ADD %0, %1
+ %xmm0 = COPY %2(<16 x s8>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_add_v8i16
+# ALL-LABEL: name: test_add_v8i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# NOVL: registers:
+# NOVL-NEXT: - { id: 0, class: vr128 }
+# NOVL-NEXT: - { id: 1, class: vr128 }
+# NOVL-NEXT: - { id: 2, class: vr128 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr128 }
+# AVX512VL-NEXT: - { id: 1, class: vr128 }
+# AVX512VL-NEXT: - { id: 2, class: vr128 }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# SSE2: %2 = PADDWrr %0, %1
+#
+# AVX1: %2 = VPADDWrr %0, %1
+#
+# AVX512VL: %2 = VPADDWrr %0, %1
+#
+# AVX512BWVL: %2 = VPADDWZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<8 x s16>) = COPY %xmm0
+ %1(<8 x s16>) = COPY %xmm1
+ %2(<8 x s16>) = G_ADD %0, %1
+ %xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_add_v4i32
+# ALL-LABEL: name: test_add_v4i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# NOVL: registers:
+# NOVL-NEXT: - { id: 0, class: vr128 }
+# NOVL-NEXT: - { id: 1, class: vr128 }
+# NOVL-NEXT: - { id: 2, class: vr128 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr128x }
+# AVX512VL-NEXT: - { id: 1, class: vr128x }
+# AVX512VL-NEXT: - { id: 2, class: vr128x }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# SSE2: %2 = PADDDrr %0, %1
+#
+# AVX1: %2 = VPADDDrr %0, %1
+#
+# AVX512VL: %2 = VPADDDZ128rr %0, %1
+#
+# AVX512BWVL: %2 = VPADDDZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(<4 x s32>) = COPY %xmm1
+ %2(<4 x s32>) = G_ADD %0, %1
+ %xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_add_v2i64
+# ALL-LABEL: name: test_add_v2i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# NOVL: registers:
+# NOVL-NEXT: - { id: 0, class: vr128 }
+# NOVL-NEXT: - { id: 1, class: vr128 }
+# NOVL-NEXT: - { id: 2, class: vr128 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr128x }
+# AVX512VL-NEXT: - { id: 1, class: vr128x }
+# AVX512VL-NEXT: - { id: 2, class: vr128x }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# SSE2: %2 = PADDQrr %0, %1
+#
+# AVX1: %2 = VPADDQrr %0, %1
+#
+# AVX512VL: %2 = VPADDQZ128rr %0, %1
+#
+# AVX512BWVL: %2 = VPADDQZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<2 x s64>) = COPY %xmm0
+ %1(<2 x s64>) = COPY %xmm1
+ %2(<2 x s64>) = G_ADD %0, %1
+ %xmm0 = COPY %2(<2 x s64>)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-add-v256.mir b/test/CodeGen/X86/GlobalISel/select-add-v256.mir
new file mode 100644
index 0000000000000..7556c21041241
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-add-v256.mir
@@ -0,0 +1,185 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX2
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl,+avx512bw -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BWVL
+
+--- |
+ define <32 x i8> @test_add_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) {
+ %ret = add <32 x i8> %arg1, %arg2
+ ret <32 x i8> %ret
+ }
+
+ define <16 x i16> @test_add_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
+ %ret = add <16 x i16> %arg1, %arg2
+ ret <16 x i16> %ret
+ }
+
+ define <8 x i32> @test_add_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
+ %ret = add <8 x i32> %arg1, %arg2
+ ret <8 x i32> %ret
+ }
+
+ define <4 x i64> @test_add_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
+ %ret = add <4 x i64> %arg1, %arg2
+ ret <4 x i64> %ret
+ }
+...
+---
+name: test_add_v32i8
+# ALL-LABEL: name: test_add_v32i8
+alignment: 4
+legalized: true
+regBankSelected: true
+# AVX2: registers:
+# AVX2-NEXT: - { id: 0, class: vr256 }
+# AVX2-NEXT: - { id: 1, class: vr256 }
+# AVX2-NEXT: - { id: 2, class: vr256 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr256 }
+# AVX512VL-NEXT: - { id: 1, class: vr256 }
+# AVX512VL-NEXT: - { id: 2, class: vr256 }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr256x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# AVX2: %2 = VPADDBYrr %0, %1
+#
+# AVX512VL: %2 = VPADDBYrr %0, %1
+#
+# AVX512BWVL: %2 = VPADDBZ256rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<32 x s8>) = COPY %ymm0
+ %1(<32 x s8>) = COPY %ymm1
+ %2(<32 x s8>) = G_ADD %0, %1
+ %ymm0 = COPY %2(<32 x s8>)
+ RET 0, implicit %ymm0
+
+...
+---
+name: test_add_v16i16
+# ALL-LABEL: name: test_add_v16i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# AVX2: registers:
+# AVX2-NEXT: - { id: 0, class: vr256 }
+# AVX2-NEXT: - { id: 1, class: vr256 }
+# AVX2-NEXT: - { id: 2, class: vr256 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr256 }
+# AVX512VL-NEXT: - { id: 1, class: vr256 }
+# AVX512VL-NEXT: - { id: 2, class: vr256 }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr256x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# AVX2: %2 = VPADDWYrr %0, %1
+#
+# AVX512VL: %2 = VPADDWYrr %0, %1
+#
+# AVX512BWVL: %2 = VPADDWZ256rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<16 x s16>) = COPY %ymm0
+ %1(<16 x s16>) = COPY %ymm1
+ %2(<16 x s16>) = G_ADD %0, %1
+ %ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit %ymm0
+
+...
+---
+name: test_add_v8i32
+# ALL-LABEL: name: test_add_v8i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# AVX2: registers:
+# AVX2-NEXT: - { id: 0, class: vr256 }
+# AVX2-NEXT: - { id: 1, class: vr256 }
+# AVX2-NEXT: - { id: 2, class: vr256 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr256x }
+# AVX512VL-NEXT: - { id: 1, class: vr256x }
+# AVX512VL-NEXT: - { id: 2, class: vr256x }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr256x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# AVX2: %2 = VPADDDYrr %0, %1
+#
+# AVX512VL: %2 = VPADDDZ256rr %0, %1
+#
+# AVX512BWVL: %2 = VPADDDZ256rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<8 x s32>) = COPY %ymm0
+ %1(<8 x s32>) = COPY %ymm1
+ %2(<8 x s32>) = G_ADD %0, %1
+ %ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit %ymm0
+
+...
+---
+name: test_add_v4i64
+# ALL-LABEL: name: test_add_v4i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# AVX2: registers:
+# AVX2-NEXT: - { id: 0, class: vr256 }
+# AVX2-NEXT: - { id: 1, class: vr256 }
+# AVX2-NEXT: - { id: 2, class: vr256 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr256x }
+# AVX512VL-NEXT: - { id: 1, class: vr256x }
+# AVX512VL-NEXT: - { id: 2, class: vr256x }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr256x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# AVX2: %2 = VPADDQYrr %0, %1
+#
+# AVX512VL: %2 = VPADDQZ256rr %0, %1
+#
+# AVX512BWVL: %2 = VPADDQZ256rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<4 x s64>) = COPY %ymm0
+ %1(<4 x s64>) = COPY %ymm1
+ %2(<4 x s64>) = G_ADD %0, %1
+ %ymm0 = COPY %2(<4 x s64>)
+ RET 0, implicit %ymm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-add-v512.mir b/test/CodeGen/X86/GlobalISel/select-add-v512.mir
new file mode 100644
index 0000000000000..e90be4e996f83
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-add-v512.mir
@@ -0,0 +1,130 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
+
+--- |
+ define <64 x i8> @test_add_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) #0 {
+ %ret = add <64 x i8> %arg1, %arg2
+ ret <64 x i8> %ret
+ }
+
+ define <32 x i16> @test_add_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) #0 {
+ %ret = add <32 x i16> %arg1, %arg2
+ ret <32 x i16> %ret
+ }
+
+ define <16 x i32> @test_add_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) #1 {
+ %ret = add <16 x i32> %arg1, %arg2
+ ret <16 x i32> %ret
+ }
+
+ define <8 x i64> @test_add_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) #1 {
+ %ret = add <8 x i64> %arg1, %arg2
+ ret <8 x i64> %ret
+ }
+
+ attributes #0 = { "target-features"="+avx512f,+avx512bw" }
+ attributes #1 = { "target-features"="+avx512f" }
+...
+---
+name: test_add_v64i8
+# ALL-LABEL: name: test_add_v64i8
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: vr512 }
+# ALL-NEXT: - { id: 1, class: vr512 }
+# ALL-NEXT: - { id: 2, class: vr512 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %2 = VPADDBZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<64 x s8>) = COPY %zmm0
+ %1(<64 x s8>) = COPY %zmm1
+ %2(<64 x s8>) = G_ADD %0, %1
+ %zmm0 = COPY %2(<64 x s8>)
+ RET 0, implicit %zmm0
+
+...
+---
+name: test_add_v32i16
+# ALL-LABEL: name: test_add_v32i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: vr512 }
+# ALL-NEXT: - { id: 1, class: vr512 }
+# ALL-NEXT: - { id: 2, class: vr512 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %2 = VPADDWZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<32 x s16>) = COPY %zmm0
+ %1(<32 x s16>) = COPY %zmm1
+ %2(<32 x s16>) = G_ADD %0, %1
+ %zmm0 = COPY %2(<32 x s16>)
+ RET 0, implicit %zmm0
+
+...
+---
+name: test_add_v16i32
+# ALL-LABEL: name: test_add_v16i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: vr512 }
+# ALL-NEXT: - { id: 1, class: vr512 }
+# ALL-NEXT: - { id: 2, class: vr512 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %2 = VPADDDZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<16 x s32>) = COPY %zmm0
+ %1(<16 x s32>) = COPY %zmm1
+ %2(<16 x s32>) = G_ADD %0, %1
+ %zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit %zmm0
+
+...
+---
+name: test_add_v8i64
+# ALL-LABEL: name: test_add_v8i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: vr512 }
+# ALL-NEXT: - { id: 1, class: vr512 }
+# ALL-NEXT: - { id: 2, class: vr512 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %2 = VPADDQZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<8 x s64>) = COPY %zmm0
+ %1(<8 x s64>) = COPY %zmm1
+ %2(<8 x s64>) = G_ADD %0, %1
+ %zmm0 = COPY %2(<8 x s64>)
+ RET 0, implicit %zmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-br.mir b/test/CodeGen/X86/GlobalISel/select-br.mir
index 6d8cd2b1367dd..9d2a878e75754 100644
--- a/test/CodeGen/X86/GlobalISel/select-br.mir
+++ b/test/CodeGen/X86/GlobalISel/select-br.mir
@@ -1,5 +1,5 @@
-# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64
-# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64
+# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32
--- |
define void @uncondbr() {
diff --git a/test/CodeGen/X86/GlobalISel/select-cmp.mir b/test/CodeGen/X86/GlobalISel/select-cmp.mir
index 1d3da6cb88b95..a92c388c1db96 100644
--- a/test/CodeGen/X86/GlobalISel/select-cmp.mir
+++ b/test/CodeGen/X86/GlobalISel/select-cmp.mir
@@ -1,4 +1,4 @@
-# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
--- |
define i32 @test_icmp_eq_i8(i8 %a, i8 %b) {
diff --git a/test/CodeGen/X86/GlobalISel/select-constant.mir b/test/CodeGen/X86/GlobalISel/select-constant.mir
index f6b97b578b927..162de0264435b 100644
--- a/test/CodeGen/X86/GlobalISel/select-constant.mir
+++ b/test/CodeGen/X86/GlobalISel/select-constant.mir
@@ -1,4 +1,4 @@
-# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
--- |
define i8 @const_i8() {
diff --git a/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir b/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
index 0844701487bcc..d1a3abfd0f936 100644
--- a/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
+++ b/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
@@ -1,4 +1,4 @@
-# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
--- |
define i64 @test_zext_i1(i8 %a) {
diff --git a/test/CodeGen/X86/GlobalISel/select-ext.mir b/test/CodeGen/X86/GlobalISel/select-ext.mir
index 831d6efb75f12..dccc20e571008 100644
--- a/test/CodeGen/X86/GlobalISel/select-ext.mir
+++ b/test/CodeGen/X86/GlobalISel/select-ext.mir
@@ -1,5 +1,5 @@
-# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
-# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
--- |
define i32 @test_zext_i1(i1 %a) {
diff --git a/test/CodeGen/X86/GlobalISel/select-frameIndex.mir b/test/CodeGen/X86/GlobalISel/select-frameIndex.mir
index 2fa9ac23a7afa..1d641ba279aff 100644
--- a/test/CodeGen/X86/GlobalISel/select-frameIndex.mir
+++ b/test/CodeGen/X86/GlobalISel/select-frameIndex.mir
@@ -1,6 +1,6 @@
-# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64
-# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32
-# RUN: llc -mtriple=x86_64-linux-gnux32 -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32ABI
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64
+# RUN: llc -mtriple=i386-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32
+# RUN: llc -mtriple=x86_64-linux-gnux32 -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32ABI
--- |
define i32* @allocai32() {
diff --git a/test/CodeGen/X86/GlobalISel/select-gep.mir b/test/CodeGen/X86/GlobalISel/select-gep.mir
index 2c89b7057c3d2..c8a4dc80cb2cf 100644
--- a/test/CodeGen/X86/GlobalISel/select-gep.mir
+++ b/test/CodeGen/X86/GlobalISel/select-gep.mir
@@ -1,4 +1,4 @@
-# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
--- |
define i32* @test_gep_i32(i32* %arr) {
diff --git a/test/CodeGen/X86/GlobalISel/select-sub-v128.mir b/test/CodeGen/X86/GlobalISel/select-sub-v128.mir
new file mode 100644
index 0000000000000..d60d4155e29df
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-sub-v128.mir
@@ -0,0 +1,195 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=SSE2
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=AVX1
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl,+avx512bw -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BWVL
+
+--- |
+ define <16 x i8> @test_sub_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) {
+ %ret = sub <16 x i8> %arg1, %arg2
+ ret <16 x i8> %ret
+ }
+
+ define <8 x i16> @test_sub_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
+ %ret = sub <8 x i16> %arg1, %arg2
+ ret <8 x i16> %ret
+ }
+
+ define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
+ %ret = sub <4 x i32> %arg1, %arg2
+ ret <4 x i32> %ret
+ }
+
+ define <2 x i64> @test_sub_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
+ %ret = sub <2 x i64> %arg1, %arg2
+ ret <2 x i64> %ret
+ }
+
+...
+---
+name: test_sub_v16i8
+# ALL-LABEL: name: test_sub_v16i8
+alignment: 4
+legalized: true
+regBankSelected: true
+# NOVL: registers:
+# NOVL-NEXT: - { id: 0, class: vr128 }
+# NOVL-NEXT: - { id: 1, class: vr128 }
+# NOVL-NEXT: - { id: 2, class: vr128 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr128 }
+# AVX512VL-NEXT: - { id: 1, class: vr128 }
+# AVX512VL-NEXT: - { id: 2, class: vr128 }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# SSE2: %2 = PSUBBrr %0, %1
+#
+# AVX1: %2 = VPSUBBrr %0, %1
+#
+# AVX512VL: %2 = VPSUBBrr %0, %1
+#
+# AVX512BWVL: %2 = VPSUBBZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<16 x s8>) = COPY %xmm0
+ %1(<16 x s8>) = COPY %xmm1
+ %2(<16 x s8>) = G_SUB %0, %1
+ %xmm0 = COPY %2(<16 x s8>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_sub_v8i16
+# ALL-LABEL: name: test_sub_v8i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# NOVL: registers:
+# NOVL-NEXT: - { id: 0, class: vr128 }
+# NOVL-NEXT: - { id: 1, class: vr128 }
+# NOVL-NEXT: - { id: 2, class: vr128 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr128 }
+# AVX512VL-NEXT: - { id: 1, class: vr128 }
+# AVX512VL-NEXT: - { id: 2, class: vr128 }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# SSE2: %2 = PSUBWrr %0, %1
+#
+# AVX1: %2 = VPSUBWrr %0, %1
+#
+# AVX512VL: %2 = VPSUBWrr %0, %1
+#
+# AVX512BWVL: %2 = VPSUBWZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<8 x s16>) = COPY %xmm0
+ %1(<8 x s16>) = COPY %xmm1
+ %2(<8 x s16>) = G_SUB %0, %1
+ %xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_sub_v4i32
+# ALL-LABEL: name: test_sub_v4i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# NOVL: registers:
+# NOVL-NEXT: - { id: 0, class: vr128 }
+# NOVL-NEXT: - { id: 1, class: vr128 }
+# NOVL-NEXT: - { id: 2, class: vr128 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr128x }
+# AVX512VL-NEXT: - { id: 1, class: vr128x }
+# AVX512VL-NEXT: - { id: 2, class: vr128x }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# SSE2: %2 = PSUBDrr %0, %1
+#
+# AVX1: %2 = VPSUBDrr %0, %1
+#
+# AVX512VL: %2 = VPSUBDZ128rr %0, %1
+#
+# AVX512BWVL: %2 = VPSUBDZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<4 x s32>) = COPY %xmm0
+ %1(<4 x s32>) = COPY %xmm1
+ %2(<4 x s32>) = G_SUB %0, %1
+ %xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit %xmm0
+
+...
+---
+name: test_sub_v2i64
+# ALL-LABEL: name: test_sub_v2i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# NOVL: registers:
+# NOVL-NEXT: - { id: 0, class: vr128 }
+# NOVL-NEXT: - { id: 1, class: vr128 }
+# NOVL-NEXT: - { id: 2, class: vr128 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr128x }
+# AVX512VL-NEXT: - { id: 1, class: vr128x }
+# AVX512VL-NEXT: - { id: 2, class: vr128x }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr128x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr128x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# SSE2: %2 = PSUBQrr %0, %1
+#
+# AVX1: %2 = VPSUBQrr %0, %1
+#
+# AVX512VL: %2 = VPSUBQZ128rr %0, %1
+#
+# AVX512BWVL: %2 = VPSUBQZ128rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %xmm0, %xmm1
+
+ %0(<2 x s64>) = COPY %xmm0
+ %1(<2 x s64>) = COPY %xmm1
+ %2(<2 x s64>) = G_SUB %0, %1
+ %xmm0 = COPY %2(<2 x s64>)
+ RET 0, implicit %xmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-sub-v256.mir b/test/CodeGen/X86/GlobalISel/select-sub-v256.mir
new file mode 100644
index 0000000000000..fbc44997b4a2b
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-sub-v256.mir
@@ -0,0 +1,185 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX2
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512VL
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512vl,+avx512bw -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BWVL
+
+--- |
+ define <32 x i8> @test_sub_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) {
+ %ret = sub <32 x i8> %arg1, %arg2
+ ret <32 x i8> %ret
+ }
+
+ define <16 x i16> @test_sub_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
+ %ret = sub <16 x i16> %arg1, %arg2
+ ret <16 x i16> %ret
+ }
+
+ define <8 x i32> @test_sub_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
+ %ret = sub <8 x i32> %arg1, %arg2
+ ret <8 x i32> %ret
+ }
+
+ define <4 x i64> @test_sub_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
+ %ret = sub <4 x i64> %arg1, %arg2
+ ret <4 x i64> %ret
+ }
+...
+---
+name: test_sub_v32i8
+# ALL-LABEL: name: test_sub_v32i8
+alignment: 4
+legalized: true
+regBankSelected: true
+# AVX2: registers:
+# AVX2-NEXT: - { id: 0, class: vr256 }
+# AVX2-NEXT: - { id: 1, class: vr256 }
+# AVX2-NEXT: - { id: 2, class: vr256 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr256 }
+# AVX512VL-NEXT: - { id: 1, class: vr256 }
+# AVX512VL-NEXT: - { id: 2, class: vr256 }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr256x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# AVX2: %2 = VPSUBBYrr %0, %1
+#
+# AVX512VL: %2 = VPSUBBYrr %0, %1
+#
+# AVX512BWVL: %2 = VPSUBBZ256rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<32 x s8>) = COPY %ymm0
+ %1(<32 x s8>) = COPY %ymm1
+ %2(<32 x s8>) = G_SUB %0, %1
+ %ymm0 = COPY %2(<32 x s8>)
+ RET 0, implicit %ymm0
+
+...
+---
+name: test_sub_v16i16
+# ALL-LABEL: name: test_sub_v16i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# AVX2: registers:
+# AVX2-NEXT: - { id: 0, class: vr256 }
+# AVX2-NEXT: - { id: 1, class: vr256 }
+# AVX2-NEXT: - { id: 2, class: vr256 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr256 }
+# AVX512VL-NEXT: - { id: 1, class: vr256 }
+# AVX512VL-NEXT: - { id: 2, class: vr256 }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr256x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# AVX2: %2 = VPSUBWYrr %0, %1
+#
+# AVX512VL: %2 = VPSUBWYrr %0, %1
+#
+# AVX512BWVL: %2 = VPSUBWZ256rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<16 x s16>) = COPY %ymm0
+ %1(<16 x s16>) = COPY %ymm1
+ %2(<16 x s16>) = G_SUB %0, %1
+ %ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit %ymm0
+
+...
+---
+name: test_sub_v8i32
+# ALL-LABEL: name: test_sub_v8i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# AVX2: registers:
+# AVX2-NEXT: - { id: 0, class: vr256 }
+# AVX2-NEXT: - { id: 1, class: vr256 }
+# AVX2-NEXT: - { id: 2, class: vr256 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr256x }
+# AVX512VL-NEXT: - { id: 1, class: vr256x }
+# AVX512VL-NEXT: - { id: 2, class: vr256x }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr256x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# AVX2: %2 = VPSUBDYrr %0, %1
+#
+# AVX512VL: %2 = VPSUBDZ256rr %0, %1
+#
+# AVX512BWVL: %2 = VPSUBDZ256rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<8 x s32>) = COPY %ymm0
+ %1(<8 x s32>) = COPY %ymm1
+ %2(<8 x s32>) = G_SUB %0, %1
+ %ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit %ymm0
+
+...
+---
+name: test_sub_v4i64
+# ALL-LABEL: name: test_sub_v4i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# AVX2: registers:
+# AVX2-NEXT: - { id: 0, class: vr256 }
+# AVX2-NEXT: - { id: 1, class: vr256 }
+# AVX2-NEXT: - { id: 2, class: vr256 }
+#
+# AVX512VL: registers:
+# AVX512VL-NEXT: - { id: 0, class: vr256x }
+# AVX512VL-NEXT: - { id: 1, class: vr256x }
+# AVX512VL-NEXT: - { id: 2, class: vr256x }
+#
+# AVX512BWVL: registers:
+# AVX512BWVL-NEXT: - { id: 0, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 1, class: vr256x }
+# AVX512BWVL-NEXT: - { id: 2, class: vr256x }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# AVX2: %2 = VPSUBQYrr %0, %1
+#
+# AVX512VL: %2 = VPSUBQZ256rr %0, %1
+#
+# AVX512BWVL: %2 = VPSUBQZ256rr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %ymm0, %ymm1
+
+ %0(<4 x s64>) = COPY %ymm0
+ %1(<4 x s64>) = COPY %ymm1
+ %2(<4 x s64>) = G_SUB %0, %1
+ %ymm0 = COPY %2(<4 x s64>)
+ RET 0, implicit %ymm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-sub-v512.mir b/test/CodeGen/X86/GlobalISel/select-sub-v512.mir
new file mode 100644
index 0000000000000..dcd05f0569496
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/select-sub-v512.mir
@@ -0,0 +1,130 @@
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=ALL
+
+--- |
+ define <64 x i8> @test_sub_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) #0 {
+ %ret = sub <64 x i8> %arg1, %arg2
+ ret <64 x i8> %ret
+ }
+
+ define <32 x i16> @test_sub_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) #0 {
+ %ret = sub <32 x i16> %arg1, %arg2
+ ret <32 x i16> %ret
+ }
+
+ define <16 x i32> @test_sub_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) #1 {
+ %ret = sub <16 x i32> %arg1, %arg2
+ ret <16 x i32> %ret
+ }
+
+ define <8 x i64> @test_sub_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) #1 {
+ %ret = sub <8 x i64> %arg1, %arg2
+ ret <8 x i64> %ret
+ }
+
+ attributes #0 = { "target-features"="+avx512f,+avx512bw" }
+ attributes #1 = { "target-features"="+avx512f" }
+...
+---
+name: test_sub_v64i8
+# ALL-LABEL: name: test_sub_v64i8
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: vr512 }
+# ALL-NEXT: - { id: 1, class: vr512 }
+# ALL-NEXT: - { id: 2, class: vr512 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %2 = VPSUBBZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<64 x s8>) = COPY %zmm0
+ %1(<64 x s8>) = COPY %zmm1
+ %2(<64 x s8>) = G_SUB %0, %1
+ %zmm0 = COPY %2(<64 x s8>)
+ RET 0, implicit %zmm0
+
+...
+---
+name: test_sub_v32i16
+# ALL-LABEL: name: test_sub_v32i16
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: vr512 }
+# ALL-NEXT: - { id: 1, class: vr512 }
+# ALL-NEXT: - { id: 2, class: vr512 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %2 = VPSUBWZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<32 x s16>) = COPY %zmm0
+ %1(<32 x s16>) = COPY %zmm1
+ %2(<32 x s16>) = G_SUB %0, %1
+ %zmm0 = COPY %2(<32 x s16>)
+ RET 0, implicit %zmm0
+
+...
+---
+name: test_sub_v16i32
+# ALL-LABEL: name: test_sub_v16i32
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: vr512 }
+# ALL-NEXT: - { id: 1, class: vr512 }
+# ALL-NEXT: - { id: 2, class: vr512 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %2 = VPSUBDZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<16 x s32>) = COPY %zmm0
+ %1(<16 x s32>) = COPY %zmm1
+ %2(<16 x s32>) = G_SUB %0, %1
+ %zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit %zmm0
+
+...
+---
+name: test_sub_v8i64
+# ALL-LABEL: name: test_sub_v8i64
+alignment: 4
+legalized: true
+regBankSelected: true
+# ALL: registers:
+# ALL-NEXT: - { id: 0, class: vr512 }
+# ALL-NEXT: - { id: 1, class: vr512 }
+# ALL-NEXT: - { id: 2, class: vr512 }
+registers:
+ - { id: 0, class: vecr }
+ - { id: 1, class: vecr }
+ - { id: 2, class: vecr }
+# ALL: %2 = VPSUBQZrr %0, %1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: %zmm0, %zmm1
+
+ %0(<8 x s64>) = COPY %zmm0
+ %1(<8 x s64>) = COPY %zmm1
+ %2(<8 x s64>) = G_SUB %0, %1
+ %zmm0 = COPY %2(<8 x s64>)
+ RET 0, implicit %zmm0
+
+...
diff --git a/test/CodeGen/X86/GlobalISel/select-trunc.mir b/test/CodeGen/X86/GlobalISel/select-trunc.mir
index 714340248ff6f..9b90543d65596 100644
--- a/test/CodeGen/X86/GlobalISel/select-trunc.mir
+++ b/test/CodeGen/X86/GlobalISel/select-trunc.mir
@@ -1,4 +1,4 @@
-# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select %s -o - | FileCheck %s --check-prefix=CHECK
+# RUN: llc -mtriple=x86_64-linux-gnu -global-isel -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK
--- |
define i1 @trunc_i32toi1(i32 %a) {
%r = trunc i32 %a to i1
@@ -33,19 +33,20 @@
...
---
name: trunc_i32toi1
+# CHECK-LABEL: name: trunc_i32toi1
alignment: 4
legalized: true
regBankSelected: true
-selected: false
-# CHECK-LABEL: name: trunc_i32toi1
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gr32 }
-# CHECK-NEXT: - { id: 1, class: gr8 }
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32 }
+# CHECK-NEXT: - { id: 1, class: gr8 }
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# CHECK: body:
-# CHECK: %1 = COPY %0.sub_8
+# CHECK: %0 = COPY %edi
+# CHECK-NEXT: %1 = COPY %0.sub_8bit
+# CHECK-NEXT: %al = COPY %1
+# CHECK-NEXT: RET 0, implicit %al
body: |
bb.1 (%ir-block.0):
liveins: %edi
@@ -58,19 +59,20 @@ body: |
...
---
name: trunc_i32toi8
+# CHECK-LABEL: name: trunc_i32toi8
alignment: 4
legalized: true
regBankSelected: true
-selected: false
-# CHECK-LABEL: name: trunc_i32toi8
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gr32 }
-# CHECK-NEXT: - { id: 1, class: gr8 }
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32 }
+# CHECK-NEXT: - { id: 1, class: gr8 }
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# CHECK: body:
-# CHECK: %1 = COPY %0.sub_8
+# CHECK: %0 = COPY %edi
+# CHECK-NEXT: %1 = COPY %0.sub_8bit
+# CHECK-NEXT: %al = COPY %1
+# CHECK-NEXT: RET 0, implicit %al
body: |
bb.1 (%ir-block.0):
liveins: %edi
@@ -83,19 +85,20 @@ body: |
...
---
name: trunc_i32toi16
+# CHECK-LABEL: name: trunc_i32toi16
alignment: 4
legalized: true
regBankSelected: true
-selected: false
-# CHECK-LABEL: name: trunc_i32toi16
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gr32 }
-# CHECK-NEXT: - { id: 1, class: gr16 }
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr32 }
+# CHECK-NEXT: - { id: 1, class: gr16 }
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# CHECK: body:
-# CHECK: %1 = COPY %0.sub_16
+# CHECK: %0 = COPY %edi
+# CHECK-NEXT: %1 = COPY %0.sub_16bit
+# CHECK-NEXT: %ax = COPY %1
+# CHECK-NEXT: RET 0, implicit %ax
body: |
bb.1 (%ir-block.0):
liveins: %edi
@@ -108,19 +111,20 @@ body: |
...
---
name: trunc_i64toi8
+# CHECK-LABEL: name: trunc_i64toi8
alignment: 4
legalized: true
regBankSelected: true
-selected: false
-# CHECK-LABEL: name: trunc_i64toi8
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gr64 }
-# CHECK-NEXT: - { id: 1, class: gr8 }
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr64_with_sub_8bit }
+# CHECK-NEXT: - { id: 1, class: gr8 }
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# CHECK: body:
-# CHECK: %1 = COPY %0.sub_8
+# CHECK: %0 = COPY %rdi
+# CHECK-NEXT: %1 = COPY %0.sub_8bit
+# CHECK-NEXT: %al = COPY %1
+# CHECK-NEXT: RET 0, implicit %al
body: |
bb.1 (%ir-block.0):
liveins: %rdi
@@ -133,19 +137,20 @@ body: |
...
---
name: trunc_i64toi16
+# CHECK-LABEL: name: trunc_i64toi16
alignment: 4
legalized: true
regBankSelected: true
-selected: false
-# CHECK-LABEL: name: trunc_i64toi16
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gr64 }
-# CHECK-NEXT: - { id: 1, class: gr16 }
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr64 }
+# CHECK-NEXT: - { id: 1, class: gr16 }
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# CHECK: body:
-# CHECK: %1 = COPY %0.sub_16
+# CHECK: %0 = COPY %rdi
+# CHECK-NEXT: %1 = COPY %0.sub_16bit
+# CHECK-NEXT: %ax = COPY %1
+# CHECK-NEXT: RET 0, implicit %ax
body: |
bb.1 (%ir-block.0):
liveins: %rdi
@@ -158,19 +163,20 @@ body: |
...
---
name: trunc_i64toi32
+# CHECK-LABEL: name: trunc_i64toi32
alignment: 4
legalized: true
regBankSelected: true
-selected: false
-# CHECK-LABEL: name: trunc_i64toi32
-# CHECK: registers:
-# CHECK-NEXT: - { id: 0, class: gr64 }
-# CHECK-NEXT: - { id: 1, class: gr32 }
+# CHECK: registers:
+# CHECK-NEXT: - { id: 0, class: gr64 }
+# CHECK-NEXT: - { id: 1, class: gr32 }
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# CHECK: body:
-# CHECK: %1 = COPY %0.sub_32
+# CHECK: %0 = COPY %rdi
+# CHECK-NEXT: %1 = COPY %0.sub_32bit
+# CHECK-NEXT: %eax = COPY %1
+# CHECK-NEXT: RET 0, implicit %eax
body: |
bb.1 (%ir-block.0):
liveins: %rdi
diff --git a/test/CodeGen/X86/GlobalISel/sub-vec.ll b/test/CodeGen/X86/GlobalISel/sub-vec.ll
new file mode 100644
index 0000000000000..9caf18f0c0c7d
--- /dev/null
+++ b/test/CodeGen/X86/GlobalISel/sub-vec.ll
@@ -0,0 +1,111 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-linux-gnu -mcpu=skx -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=SKX
+
+define <16 x i8> @test_sub_v16i8(<16 x i8> %arg1, <16 x i8> %arg2) {
+; SKX-LABEL: test_sub_v16i8:
+; SKX: # BB#0:
+; SKX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %ret = sub <16 x i8> %arg1, %arg2
+ ret <16 x i8> %ret
+}
+
+define <8 x i16> @test_sub_v8i16(<8 x i16> %arg1, <8 x i16> %arg2) {
+; SKX-LABEL: test_sub_v8i16:
+; SKX: # BB#0:
+; SKX-NEXT: vpsubw %xmm1, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %ret = sub <8 x i16> %arg1, %arg2
+ ret <8 x i16> %ret
+}
+
+define <4 x i32> @test_sub_v4i32(<4 x i32> %arg1, <4 x i32> %arg2) {
+; SKX-LABEL: test_sub_v4i32:
+; SKX: # BB#0:
+; SKX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %ret = sub <4 x i32> %arg1, %arg2
+ ret <4 x i32> %ret
+}
+
+define <2 x i64> @test_sub_v2i64(<2 x i64> %arg1, <2 x i64> %arg2) {
+; SKX-LABEL: test_sub_v2i64:
+; SKX: # BB#0:
+; SKX-NEXT: vpsubq %xmm1, %xmm0, %xmm0
+; SKX-NEXT: retq
+ %ret = sub <2 x i64> %arg1, %arg2
+ ret <2 x i64> %ret
+}
+
+define <32 x i8> @test_sub_v32i8(<32 x i8> %arg1, <32 x i8> %arg2) {
+; SKX-LABEL: test_sub_v32i8:
+; SKX: # BB#0:
+; SKX-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %ret = sub <32 x i8> %arg1, %arg2
+ ret <32 x i8> %ret
+}
+
+define <16 x i16> @test_sub_v16i16(<16 x i16> %arg1, <16 x i16> %arg2) {
+; SKX-LABEL: test_sub_v16i16:
+; SKX: # BB#0:
+; SKX-NEXT: vpsubw %ymm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %ret = sub <16 x i16> %arg1, %arg2
+ ret <16 x i16> %ret
+}
+
+define <8 x i32> @test_sub_v8i32(<8 x i32> %arg1, <8 x i32> %arg2) {
+; SKX-LABEL: test_sub_v8i32:
+; SKX: # BB#0:
+; SKX-NEXT: vpsubd %ymm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %ret = sub <8 x i32> %arg1, %arg2
+ ret <8 x i32> %ret
+}
+
+define <4 x i64> @test_sub_v4i64(<4 x i64> %arg1, <4 x i64> %arg2) {
+; SKX-LABEL: test_sub_v4i64:
+; SKX: # BB#0:
+; SKX-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; SKX-NEXT: retq
+ %ret = sub <4 x i64> %arg1, %arg2
+ ret <4 x i64> %ret
+}
+
+define <64 x i8> @test_sub_v64i8(<64 x i8> %arg1, <64 x i8> %arg2) {
+; SKX-LABEL: test_sub_v64i8:
+; SKX: # BB#0:
+; SKX-NEXT: vpsubb %zmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %ret = sub <64 x i8> %arg1, %arg2
+ ret <64 x i8> %ret
+}
+
+define <32 x i16> @test_sub_v32i16(<32 x i16> %arg1, <32 x i16> %arg2) {
+; SKX-LABEL: test_sub_v32i16:
+; SKX: # BB#0:
+; SKX-NEXT: vpsubw %zmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %ret = sub <32 x i16> %arg1, %arg2
+ ret <32 x i16> %ret
+}
+
+define <16 x i32> @test_sub_v16i32(<16 x i32> %arg1, <16 x i32> %arg2) {
+; SKX-LABEL: test_sub_v16i32:
+; SKX: # BB#0:
+; SKX-NEXT: vpsubd %zmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %ret = sub <16 x i32> %arg1, %arg2
+ ret <16 x i32> %ret
+}
+
+define <8 x i64> @test_sub_v8i64(<8 x i64> %arg1, <8 x i64> %arg2) {
+; SKX-LABEL: test_sub_v8i64:
+; SKX: # BB#0:
+; SKX-NEXT: vpsubq %zmm1, %zmm0, %zmm0
+; SKX-NEXT: retq
+ %ret = sub <8 x i64> %arg1, %arg2
+ ret <8 x i64> %ret
+}
+
diff --git a/test/CodeGen/X86/GlobalISel/trunc.ll b/test/CodeGen/X86/GlobalISel/trunc.ll
index a56fc3b5a87f4..6c0f01673afc0 100644
--- a/test/CodeGen/X86/GlobalISel/trunc.ll
+++ b/test/CodeGen/X86/GlobalISel/trunc.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel < %s -o - | FileCheck %s --check-prefix=CHECK
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=CHECK
define i1 @trunc_i32toi1(i32 %a) {
; CHECK-LABEL: trunc_i32toi1:
diff --git a/test/CodeGen/X86/O0-pipeline.ll b/test/CodeGen/X86/O0-pipeline.ll
index 262cb96ca6d87..874e3e379d8e8 100644
--- a/test/CodeGen/X86/O0-pipeline.ll
+++ b/test/CodeGen/X86/O0-pipeline.ll
@@ -4,8 +4,8 @@
; CHECK-LABEL: Pass Arguments:
; CHECK-NEXT: Target Library Information
-; CHECK-NEXT: Target Transform Information
; CHECK-NEXT: Target Pass Configuration
+; CHECK-NEXT: Target Transform Information
; CHECK-NEXT: Type-Based Alias Analysis
; CHECK-NEXT: Scoped NoAlias Alias Analysis
; CHECK-NEXT: Assumption Cache Tracker
diff --git a/test/CodeGen/X86/addcarry.ll b/test/CodeGen/X86/addcarry.ll
index be550e3fe2d16..3f4ee362e230f 100644
--- a/test/CodeGen/X86/addcarry.ll
+++ b/test/CodeGen/X86/addcarry.ll
@@ -86,12 +86,12 @@ entry:
define %scalar @pr31719(%scalar* nocapture readonly %this, %scalar %arg.b) {
; CHECK-LABEL: pr31719:
; CHECK: # BB#0: # %entry
+; CHECK-NEXT: xorl %r10d, %r10d
; CHECK-NEXT: addq 8(%rsi), %rcx
-; CHECK-NEXT: sbbq %r10, %r10
-; CHECK-NEXT: andl $1, %r10d
+; CHECK-NEXT: setb %r10b
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: addq 16(%rsi), %r8
-; CHECK-NEXT: sbbq %rax, %rax
-; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: setb %al
; CHECK-NEXT: addq 24(%rsi), %r9
; CHECK-NEXT: addq (%rsi), %rdx
; CHECK-NEXT: adcq $0, %rcx
@@ -190,9 +190,9 @@ entry:
define i64 @shiftadd(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: shiftadd:
; CHECK: # BB#0: # %entry
+; CHECK-NEXT: leaq (%rdx,%rcx), %rax
; CHECK-NEXT: addq %rsi, %rdi
-; CHECK-NEXT: adcq %rcx, %rdx
-; CHECK-NEXT: movq %rdx, %rax
+; CHECK-NEXT: adcq $0, %rax
; CHECK-NEXT: retq
entry:
%0 = zext i64 %a to i128
@@ -213,12 +213,12 @@ define %S @readd(%S* nocapture readonly %this, %S %arg.b) {
; CHECK-NEXT: addq (%rsi), %rdx
; CHECK-NEXT: movq 8(%rsi), %r10
; CHECK-NEXT: adcq $0, %r10
-; CHECK-NEXT: sbbq %rax, %rax
-; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: setb %al
+; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: addq %rcx, %r10
; CHECK-NEXT: adcq 16(%rsi), %rax
-; CHECK-NEXT: sbbq %rcx, %rcx
-; CHECK-NEXT: andl $1, %ecx
+; CHECK-NEXT: setb %cl
+; CHECK-NEXT: movzbl %cl, %ecx
; CHECK-NEXT: addq %r8, %rax
; CHECK-NEXT: adcq 24(%rsi), %rcx
; CHECK-NEXT: addq %r9, %rcx
diff --git a/test/CodeGen/X86/avg.ll b/test/CodeGen/X86/avg.ll
index aa28ef5175ed6..2aaf14001758f 100644
--- a/test/CodeGen/X86/avg.ll
+++ b/test/CodeGen/X86/avg.ll
@@ -135,87 +135,88 @@ define void @avg_v16i8(<16 x i8>* %a, <16 x i8>* %b) {
define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) {
; SSE2-LABEL: avg_v32i8:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm3
-; SSE2-NEXT: movdqa 16(%rdi), %xmm8
+; SSE2-NEXT: movdqa (%rdi), %xmm8
+; SSE2-NEXT: movdqa 16(%rdi), %xmm11
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: movdqa %xmm8, %xmm7
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm7, %xmm11
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
; SSE2-NEXT: movdqa %xmm8, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm10, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm8, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: movdqa %xmm11, %xmm15
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm4[8],xmm15[9],xmm4[9],xmm15[10],xmm4[10],xmm15[11],xmm4[11],xmm15[12],xmm4[12],xmm15[13],xmm4[13],xmm15[14],xmm4[14],xmm15[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm15, %xmm14
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm4[4],xmm14[5],xmm4[5],xmm14[6],xmm4[6],xmm14[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm11, %xmm9
; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
-; SSE2-NEXT: paddd %xmm6, %xmm9
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm5, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; SSE2-NEXT: paddd %xmm12, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm3, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; SSE2-NEXT: paddd %xmm11, %xmm6
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm7, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
-; SSE2-NEXT: paddd %xmm10, %xmm7
+; SSE2-NEXT: movdqa %xmm1, %xmm13
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm4[4],xmm13[5],xmm4[5],xmm13[6],xmm4[6],xmm13[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm8, %xmm1
+; SSE2-NEXT: paddd %xmm11, %xmm1
+; SSE2-NEXT: paddd %xmm9, %xmm13
+; SSE2-NEXT: paddd %xmm15, %xmm2
+; SSE2-NEXT: paddd %xmm14, %xmm5
+; SSE2-NEXT: paddd %xmm8, %xmm0
+; SSE2-NEXT: paddd %xmm12, %xmm6
+; SSE2-NEXT: paddd %xmm10, %xmm3
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1]
-; SSE2-NEXT: paddd %xmm4, %xmm9
-; SSE2-NEXT: paddd %xmm4, %xmm2
-; SSE2-NEXT: paddd %xmm4, %xmm5
-; SSE2-NEXT: paddd %xmm4, %xmm0
-; SSE2-NEXT: paddd %xmm4, %xmm6
-; SSE2-NEXT: paddd %xmm4, %xmm3
; SSE2-NEXT: paddd %xmm4, %xmm7
+; SSE2-NEXT: paddd %xmm4, %xmm3
+; SSE2-NEXT: paddd %xmm4, %xmm6
+; SSE2-NEXT: paddd %xmm4, %xmm0
+; SSE2-NEXT: paddd %xmm4, %xmm5
+; SSE2-NEXT: paddd %xmm4, %xmm2
+; SSE2-NEXT: paddd %xmm4, %xmm13
; SSE2-NEXT: paddd %xmm4, %xmm1
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: psrld $1, %xmm3
-; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: psrld $1, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: packuswb %xmm7, %xmm3
; SSE2-NEXT: psrld $1, %xmm0
-; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: pand %xmm4, %xmm6
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm6, %xmm0
+; SSE2-NEXT: packuswb %xmm3, %xmm0
; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: psrld $1, %xmm9
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE2-NEXT: pand %xmm4, %xmm9
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: packuswb %xmm9, %xmm2
+; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: packuswb %xmm5, %xmm0
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm3
-; SSE2-NEXT: packuswb %xmm6, %xmm3
-; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm5, %xmm2
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: psrld $1, %xmm13
+; SSE2-NEXT: pand %xmm4, %xmm13
; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: packuswb %xmm7, %xmm1
-; SSE2-NEXT: packuswb %xmm3, %xmm1
+; SSE2-NEXT: packuswb %xmm13, %xmm1
+; SSE2-NEXT: packuswb %xmm2, %xmm1
; SSE2-NEXT: movdqu %xmm1, (%rax)
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
@@ -258,183 +259,198 @@ define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) {
define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
; SSE2-LABEL: avg_v64i8:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm6
-; SSE2-NEXT: movdqa 16(%rdi), %xmm2
-; SSE2-NEXT: movdqa 32(%rdi), %xmm1
-; SSE2-NEXT: movdqa 48(%rdi), %xmm0
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa (%rsi), %xmm5
-; SSE2-NEXT: movdqa 16(%rsi), %xmm13
-; SSE2-NEXT: movdqa 32(%rsi), %xmm11
+; SSE2-NEXT: subq $152, %rsp
+; SSE2-NEXT: .Lcfi0:
+; SSE2-NEXT: .cfi_def_cfa_offset 160
+; SSE2-NEXT: movdqa (%rdi), %xmm1
+; SSE2-NEXT: movdqa 16(%rdi), %xmm4
+; SSE2-NEXT: movdqa 32(%rdi), %xmm5
+; SSE2-NEXT: movdqa 48(%rdi), %xmm6
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm6, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm4, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm4, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm4, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm6, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm2, %xmm15
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm0[8],xmm15[9],xmm0[9],xmm15[10],xmm0[10],xmm15[11],xmm0[11],xmm15[12],xmm0[12],xmm15[13],xmm0[13],xmm15[14],xmm0[14],xmm15[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm15, %xmm14
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm8
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm5, %xmm10
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm0[8],xmm10[9],xmm0[9],xmm10[10],xmm0[10],xmm10[11],xmm0[11],xmm10[12],xmm0[12],xmm10[13],xmm0[13],xmm10[14],xmm0[14],xmm10[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm10, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm7, %xmm3
-; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm4, %xmm10
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm4, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm5, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm12, %xmm3
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3],xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm6, %xmm5
; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm13, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm4, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm14, %xmm12
-; SSE2-NEXT: movdqa %xmm7, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm15, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3],xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm13, %xmm15
+; SSE2-NEXT: movdqa %xmm6, %xmm8
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm0[8],xmm8[9],xmm0[9],xmm8[10],xmm0[10],xmm8[11],xmm0[11],xmm8[12],xmm0[12],xmm8[13],xmm0[13],xmm8[14],xmm0[14],xmm8[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm8, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3],xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm6, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm6, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa (%rsi), %xmm14
+; SSE2-NEXT: movdqa %xmm14, %xmm7
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm0[8],xmm7[9],xmm0[9],xmm7[10],xmm0[10],xmm7[11],xmm0[11],xmm7[12],xmm0[12],xmm7[13],xmm0[13],xmm7[14],xmm0[14],xmm7[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm7, %xmm15
; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm8, %xmm15
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1],xmm13[2],xmm0[2],xmm13[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm2, %xmm13
-; SSE2-NEXT: movdqa %xmm11, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm6, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3],xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm14, %xmm9
; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm0[4],xmm9[5],xmm0[5],xmm9[6],xmm0[6],xmm9[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm5, %xmm9
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
+; SSE2-NEXT: movdqa 16(%rsi), %xmm12
+; SSE2-NEXT: movdqa %xmm12, %xmm6
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm6, %xmm13
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm7, %xmm6
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3],xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm11, %xmm14
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm0[4],xmm14[5],xmm0[5],xmm14[6],xmm0[6],xmm14[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm2, %xmm14
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm5, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm1, %xmm11
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: movdqa 48(%rsi), %xmm7
-; SSE2-NEXT: movdqa %xmm7, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm8
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm1, %xmm8
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3],xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm12, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1],xmm12[2],xmm0[2],xmm12[3],xmm0[3]
+; SSE2-NEXT: movdqa 32(%rsi), %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm5, %xmm11
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm0[4],xmm11[5],xmm0[5],xmm11[6],xmm0[6],xmm11[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3],xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; SSE2-NEXT: movdqa %xmm7, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm1, %xmm5
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm2, %xmm7
+; SSE2-NEXT: movdqa 48(%rsi), %xmm1
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; SSE2-NEXT: movdqa %xmm4, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: paddd %xmm8, %xmm4
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Folded Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Folded Reload
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Folded Reload
+; SSE2-NEXT: paddd (%rsp), %xmm11 # 16-byte Folded Reload
+; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm12 # 16-byte Folded Reload
+; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm10 # 16-byte Folded Reload
+; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm6 # 16-byte Folded Reload
+; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm13 # 16-byte Folded Reload
+; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm14 # 16-byte Folded Reload
+; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm9 # 16-byte Folded Reload
+; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
+; SSE2-NEXT: paddd {{[0-9]+}}(%rsp), %xmm15 # 16-byte Folded Reload
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: paddd %xmm0, %xmm10
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm0, %xmm2
-; SSE2-NEXT: paddd %xmm0, %xmm12
-; SSE2-NEXT: paddd %xmm0, %xmm4
; SSE2-NEXT: paddd %xmm0, %xmm15
-; SSE2-NEXT: paddd %xmm0, %xmm13
+; SSE2-NEXT: paddd %xmm0, %xmm7
; SSE2-NEXT: paddd %xmm0, %xmm9
-; SSE2-NEXT: paddd %xmm0, %xmm6
; SSE2-NEXT: paddd %xmm0, %xmm14
+; SSE2-NEXT: paddd %xmm0, %xmm13
+; SSE2-NEXT: paddd %xmm0, %xmm6
+; SSE2-NEXT: paddd %xmm0, %xmm10
+; SSE2-NEXT: paddd %xmm0, %xmm12
; SSE2-NEXT: paddd %xmm0, %xmm11
+; SSE2-NEXT: paddd %xmm0, %xmm5
+; SSE2-NEXT: paddd %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: paddd %xmm0, %xmm2
; SSE2-NEXT: paddd %xmm0, %xmm8
+; SSE2-NEXT: paddd %xmm0, %xmm4
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: paddd %xmm0, %xmm5
-; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: psrld $1, %xmm10
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm0, %xmm10
-; SSE2-NEXT: packuswb %xmm1, %xmm10
-; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: packuswb %xmm1, %xmm2
-; SSE2-NEXT: packuswb %xmm10, %xmm2
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: psrld $1, %xmm4
-; SSE2-NEXT: psrld $1, %xmm12
-; SSE2-NEXT: pand %xmm0, %xmm12
-; SSE2-NEXT: pand %xmm0, %xmm4
-; SSE2-NEXT: packuswb %xmm12, %xmm4
-; SSE2-NEXT: psrld $1, %xmm13
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: psrld $1, %xmm15
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
; SSE2-NEXT: pand %xmm0, %xmm15
-; SSE2-NEXT: pand %xmm0, %xmm13
-; SSE2-NEXT: packuswb %xmm15, %xmm13
-; SSE2-NEXT: packuswb %xmm4, %xmm13
-; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: pand %xmm0, %xmm7
+; SSE2-NEXT: packuswb %xmm15, %xmm7
+; SSE2-NEXT: psrld $1, %xmm14
; SSE2-NEXT: psrld $1, %xmm9
; SSE2-NEXT: pand %xmm0, %xmm9
+; SSE2-NEXT: pand %xmm0, %xmm14
+; SSE2-NEXT: packuswb %xmm9, %xmm14
+; SSE2-NEXT: packuswb %xmm7, %xmm14
+; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: psrld $1, %xmm13
+; SSE2-NEXT: pand %xmm0, %xmm13
; SSE2-NEXT: pand %xmm0, %xmm6
-; SSE2-NEXT: packuswb %xmm9, %xmm6
+; SSE2-NEXT: packuswb %xmm13, %xmm6
+; SSE2-NEXT: psrld $1, %xmm12
+; SSE2-NEXT: psrld $1, %xmm10
+; SSE2-NEXT: pand %xmm0, %xmm10
+; SSE2-NEXT: pand %xmm0, %xmm12
+; SSE2-NEXT: packuswb %xmm10, %xmm12
+; SSE2-NEXT: packuswb %xmm6, %xmm12
+; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: psrld $1, %xmm11
-; SSE2-NEXT: psrld $1, %xmm14
-; SSE2-NEXT: pand %xmm0, %xmm14
; SSE2-NEXT: pand %xmm0, %xmm11
-; SSE2-NEXT: packuswb %xmm14, %xmm11
-; SSE2-NEXT: packuswb %xmm6, %xmm11
-; SSE2-NEXT: psrld $1, %xmm3
-; SSE2-NEXT: psrld $1, %xmm8
-; SSE2-NEXT: pand %xmm0, %xmm8
-; SSE2-NEXT: pand %xmm0, %xmm3
-; SSE2-NEXT: packuswb %xmm8, %xmm3
-; SSE2-NEXT: psrld $1, %xmm7
+; SSE2-NEXT: pand %xmm0, %xmm5
+; SSE2-NEXT: packuswb %xmm11, %xmm5
+; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
+; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: pand %xmm0, %xmm6
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: packuswb %xmm6, %xmm2
+; SSE2-NEXT: packuswb %xmm5, %xmm2
+; SSE2-NEXT: psrld $1, %xmm4
+; SSE2-NEXT: movdqa %xmm8, %xmm5
; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pand %xmm0, %xmm5
-; SSE2-NEXT: pand %xmm0, %xmm7
-; SSE2-NEXT: packuswb %xmm5, %xmm7
-; SSE2-NEXT: packuswb %xmm3, %xmm7
-; SSE2-NEXT: movdqu %xmm7, (%rax)
-; SSE2-NEXT: movdqu %xmm11, (%rax)
-; SSE2-NEXT: movdqu %xmm13, (%rax)
+; SSE2-NEXT: pand %xmm0, %xmm4
+; SSE2-NEXT: packuswb %xmm5, %xmm4
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: pand %xmm0, %xmm5
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: packuswb %xmm5, %xmm1
+; SSE2-NEXT: packuswb %xmm4, %xmm1
; SSE2-NEXT: movdqu %xmm1, (%rax)
+; SSE2-NEXT: movdqu %xmm2, (%rax)
+; SSE2-NEXT: movdqu %xmm12, (%rax)
+; SSE2-NEXT: movdqu %xmm14, (%rax)
+; SSE2-NEXT: addq $152, %rsp
; SSE2-NEXT: retq
;
; AVX2-LABEL: avg_v64i8:
@@ -448,21 +464,21 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm12 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm13 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm14 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpaddd %ymm15, %ymm7, %ymm7
+; AVX2-NEXT: vpaddd %ymm14, %ymm6, %ymm6
+; AVX2-NEXT: vpaddd %ymm13, %ymm5, %ymm5
+; AVX2-NEXT: vpaddd %ymm12, %ymm4, %ymm4
+; AVX2-NEXT: vpaddd %ymm11, %ymm3, %ymm3
+; AVX2-NEXT: vpaddd %ymm10, %ymm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm9, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm8, %ymm0, %ymm0
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm8, %ymm1, %ymm1
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm8, %ymm2, %ymm2
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm8, %ymm3, %ymm3
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm8, %ymm4, %ymm4
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm8, %ymm5, %ymm5
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm8, %ymm6, %ymm6
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpaddd %ymm8, %ymm7, %ymm7
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm8
; AVX2-NEXT: vpaddd %ymm8, %ymm0, %ymm9
; AVX2-NEXT: vpaddd %ymm8, %ymm1, %ymm10
@@ -524,13 +540,13 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) {
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
+; AVX512F-NEXT: vpaddd %zmm7, %zmm3, %zmm3
+; AVX512F-NEXT: vpaddd %zmm6, %zmm2, %zmm2
+; AVX512F-NEXT: vpaddd %zmm5, %zmm1, %zmm1
; AVX512F-NEXT: vpaddd %zmm4, %zmm0, %zmm0
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpaddd %zmm4, %zmm1, %zmm1
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpaddd %zmm4, %zmm2, %zmm2
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpaddd %zmm4, %zmm3, %zmm3
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm4
; AVX512F-NEXT: vpaddd %zmm4, %zmm0, %zmm0
; AVX512F-NEXT: vpaddd %zmm4, %zmm1, %zmm1
@@ -657,27 +673,27 @@ define void @avg_v8i16(<8 x i16>* %a, <8 x i16>* %b) {
define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) {
; SSE2-LABEL: avg_v16i16:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm2
-; SSE2-NEXT: movdqa 16(%rdi), %xmm4
+; SSE2-NEXT: movdqa (%rdi), %xmm4
+; SSE2-NEXT: movdqa 16(%rdi), %xmm5
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: movdqa %xmm4, %xmm8
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
+; SSE2-NEXT: movdqa %xmm5, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; SSE2-NEXT: paddd %xmm6, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; SSE2-NEXT: paddd %xmm5, %xmm1
; SSE2-NEXT: paddd %xmm7, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; SSE2-NEXT: paddd %xmm4, %xmm1
+; SSE2-NEXT: paddd %xmm4, %xmm0
+; SSE2-NEXT: paddd %xmm8, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1]
; SSE2-NEXT: paddd %xmm4, %xmm3
; SSE2-NEXT: paddd %xmm4, %xmm0
@@ -739,79 +755,80 @@ define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) {
define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) {
; SSE2-LABEL: avg_v32i16:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm4
-; SSE2-NEXT: movdqa 16(%rdi), %xmm11
-; SSE2-NEXT: movdqa 32(%rdi), %xmm10
+; SSE2-NEXT: movdqa (%rdi), %xmm10
+; SSE2-NEXT: movdqa 16(%rdi), %xmm9
+; SSE2-NEXT: movdqa 32(%rdi), %xmm11
; SSE2-NEXT: movdqa 48(%rdi), %xmm8
-; SSE2-NEXT: movdqa (%rsi), %xmm9
+; SSE2-NEXT: movdqa (%rsi), %xmm14
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
; SSE2-NEXT: movdqa 32(%rsi), %xmm2
; SSE2-NEXT: movdqa 48(%rsi), %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm4, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm11, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm10, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm10, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm9, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm11, %xmm15
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm8, %xmm13
; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm9, %xmm7
+; SSE2-NEXT: movdqa %xmm14, %xmm7
; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm6, %xmm7
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm4, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm5, %xmm6
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm11, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm12, %xmm5
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm10, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm13, %xmm4
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
; SSE2-NEXT: paddd %xmm8, %xmm3
+; SSE2-NEXT: paddd %xmm13, %xmm4
+; SSE2-NEXT: paddd %xmm11, %xmm2
+; SSE2-NEXT: paddd %xmm15, %xmm5
+; SSE2-NEXT: paddd %xmm9, %xmm1
+; SSE2-NEXT: paddd %xmm12, %xmm6
+; SSE2-NEXT: paddd %xmm10, %xmm14
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: paddd %xmm0, %xmm9
+; SSE2-NEXT: paddd %xmm0, %xmm14
; SSE2-NEXT: paddd %xmm0, %xmm6
; SSE2-NEXT: paddd %xmm0, %xmm1
; SSE2-NEXT: paddd %xmm0, %xmm5
; SSE2-NEXT: paddd %xmm0, %xmm2
; SSE2-NEXT: paddd %xmm0, %xmm4
; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: psrld $1, %xmm3
-; SSE2-NEXT: psrld $1, %xmm4
-; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: psrld $1, %xmm5
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: psrld $1, %xmm6
-; SSE2-NEXT: psrld $1, %xmm9
+; SSE2-NEXT: psrld $1, %xmm14
; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: pslld $16, %xmm7
; SSE2-NEXT: psrad $16, %xmm7
-; SSE2-NEXT: pslld $16, %xmm9
-; SSE2-NEXT: psrad $16, %xmm9
-; SSE2-NEXT: packssdw %xmm7, %xmm9
+; SSE2-NEXT: pslld $16, %xmm14
+; SSE2-NEXT: psrad $16, %xmm14
+; SSE2-NEXT: packssdw %xmm7, %xmm14
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: psrld $1, %xmm6
; SSE2-NEXT: pslld $16, %xmm6
; SSE2-NEXT: psrad $16, %xmm6
; SSE2-NEXT: pslld $16, %xmm1
; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: packssdw %xmm6, %xmm1
+; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pslld $16, %xmm5
; SSE2-NEXT: psrad $16, %xmm5
; SSE2-NEXT: pslld $16, %xmm2
; SSE2-NEXT: psrad $16, %xmm2
; SSE2-NEXT: packssdw %xmm5, %xmm2
+; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: psrld $1, %xmm4
; SSE2-NEXT: pslld $16, %xmm4
; SSE2-NEXT: psrad $16, %xmm4
; SSE2-NEXT: pslld $16, %xmm3
@@ -820,7 +837,7 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) {
; SSE2-NEXT: movdqu %xmm3, (%rax)
; SSE2-NEXT: movdqu %xmm2, (%rax)
; SSE2-NEXT: movdqu %xmm1, (%rax)
-; SSE2-NEXT: movdqu %xmm9, (%rax)
+; SSE2-NEXT: movdqu %xmm14, (%rax)
; SSE2-NEXT: retq
;
; AVX2-LABEL: avg_v32i16:
@@ -830,13 +847,13 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) {
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpaddd %ymm7, %ymm3, %ymm3
+; AVX2-NEXT: vpaddd %ymm6, %ymm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpaddd %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1
@@ -867,9 +884,9 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) {
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512F-NEXT: vpaddd %zmm3, %zmm1, %zmm1
; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm2
; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1
@@ -1030,87 +1047,88 @@ define void @avg_v16i8_2(<16 x i8>* %a, <16 x i8>* %b) {
define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) {
; SSE2-LABEL: avg_v32i8_2:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm3
-; SSE2-NEXT: movdqa 16(%rdi), %xmm8
+; SSE2-NEXT: movdqa (%rdi), %xmm8
+; SSE2-NEXT: movdqa 16(%rdi), %xmm11
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm5, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: movdqa %xmm8, %xmm7
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm7, %xmm11
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
; SSE2-NEXT: movdqa %xmm8, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm4[8],xmm10[9],xmm4[9],xmm10[10],xmm4[10],xmm10[11],xmm4[11],xmm10[12],xmm4[12],xmm10[13],xmm4[13],xmm10[14],xmm4[14],xmm10[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm10, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm4[0],xmm10[1],xmm4[1],xmm10[2],xmm4[2],xmm10[3],xmm4[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3],xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm8, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1],xmm8[2],xmm4[2],xmm8[3],xmm4[3]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: movdqa %xmm11, %xmm15
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm4[8],xmm15[9],xmm4[9],xmm15[10],xmm4[10],xmm15[11],xmm4[11],xmm15[12],xmm4[12],xmm15[13],xmm4[13],xmm15[14],xmm4[14],xmm15[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm15, %xmm14
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm4[4],xmm14[5],xmm4[5],xmm14[6],xmm4[6],xmm14[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1],xmm15[2],xmm4[2],xmm15[3],xmm4[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3],xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm11, %xmm9
; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
-; SSE2-NEXT: paddd %xmm6, %xmm9
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm5, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
-; SSE2-NEXT: paddd %xmm12, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm3, %xmm0
-; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1],xmm11[2],xmm4[2],xmm11[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
-; SSE2-NEXT: paddd %xmm11, %xmm6
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm7, %xmm3
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm2, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
-; SSE2-NEXT: movdqa %xmm1, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
-; SSE2-NEXT: paddd %xmm10, %xmm7
+; SSE2-NEXT: movdqa %xmm1, %xmm13
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm4[4],xmm13[5],xmm4[5],xmm13[6],xmm4[6],xmm13[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm8, %xmm1
+; SSE2-NEXT: paddd %xmm11, %xmm1
+; SSE2-NEXT: paddd %xmm9, %xmm13
+; SSE2-NEXT: paddd %xmm15, %xmm2
+; SSE2-NEXT: paddd %xmm14, %xmm5
+; SSE2-NEXT: paddd %xmm8, %xmm0
+; SSE2-NEXT: paddd %xmm12, %xmm6
+; SSE2-NEXT: paddd %xmm10, %xmm3
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1]
-; SSE2-NEXT: paddd %xmm4, %xmm9
-; SSE2-NEXT: paddd %xmm4, %xmm2
-; SSE2-NEXT: paddd %xmm4, %xmm5
-; SSE2-NEXT: paddd %xmm4, %xmm0
-; SSE2-NEXT: paddd %xmm4, %xmm6
-; SSE2-NEXT: paddd %xmm4, %xmm3
; SSE2-NEXT: paddd %xmm4, %xmm7
+; SSE2-NEXT: paddd %xmm4, %xmm3
+; SSE2-NEXT: paddd %xmm4, %xmm6
+; SSE2-NEXT: paddd %xmm4, %xmm0
+; SSE2-NEXT: paddd %xmm4, %xmm5
+; SSE2-NEXT: paddd %xmm4, %xmm2
+; SSE2-NEXT: paddd %xmm4, %xmm13
; SSE2-NEXT: paddd %xmm4, %xmm1
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: psrld $1, %xmm3
-; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: psrld $1, %xmm7
+; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
+; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm3
+; SSE2-NEXT: packuswb %xmm7, %xmm3
; SSE2-NEXT: psrld $1, %xmm0
-; SSE2-NEXT: psrld $1, %xmm5
+; SSE2-NEXT: psrld $1, %xmm6
+; SSE2-NEXT: pand %xmm4, %xmm6
+; SSE2-NEXT: pand %xmm4, %xmm0
+; SSE2-NEXT: packuswb %xmm6, %xmm0
+; SSE2-NEXT: packuswb %xmm3, %xmm0
; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: psrld $1, %xmm9
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
-; SSE2-NEXT: pand %xmm4, %xmm9
-; SSE2-NEXT: pand %xmm4, %xmm2
-; SSE2-NEXT: packuswb %xmm9, %xmm2
+; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pand %xmm4, %xmm5
-; SSE2-NEXT: pand %xmm4, %xmm0
-; SSE2-NEXT: packuswb %xmm5, %xmm0
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm4, %xmm6
-; SSE2-NEXT: pand %xmm4, %xmm3
-; SSE2-NEXT: packuswb %xmm6, %xmm3
-; SSE2-NEXT: pand %xmm4, %xmm7
+; SSE2-NEXT: pand %xmm4, %xmm2
+; SSE2-NEXT: packuswb %xmm5, %xmm2
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: psrld $1, %xmm13
+; SSE2-NEXT: pand %xmm4, %xmm13
; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: packuswb %xmm7, %xmm1
-; SSE2-NEXT: packuswb %xmm3, %xmm1
+; SSE2-NEXT: packuswb %xmm13, %xmm1
+; SSE2-NEXT: packuswb %xmm2, %xmm1
; SSE2-NEXT: movdqu %xmm1, (%rax)
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
@@ -1494,27 +1512,27 @@ define void @avg_v8i16_2(<8 x i16>* %a, <8 x i16>* %b) {
define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) {
; SSE2-LABEL: avg_v16i16_2:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm2
-; SSE2-NEXT: movdqa 16(%rdi), %xmm4
+; SSE2-NEXT: movdqa (%rdi), %xmm4
+; SSE2-NEXT: movdqa 16(%rdi), %xmm5
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: movdqa %xmm2, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3]
-; SSE2-NEXT: movdqa %xmm4, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: movdqa %xmm4, %xmm8
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
+; SSE2-NEXT: movdqa %xmm5, %xmm7
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
-; SSE2-NEXT: paddd %xmm6, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3]
-; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3]
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm6[4],xmm2[5],xmm6[5],xmm2[6],xmm6[6],xmm2[7],xmm6[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; SSE2-NEXT: paddd %xmm5, %xmm1
; SSE2-NEXT: paddd %xmm7, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; SSE2-NEXT: paddd %xmm4, %xmm1
+; SSE2-NEXT: paddd %xmm4, %xmm0
+; SSE2-NEXT: paddd %xmm8, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1]
; SSE2-NEXT: paddd %xmm4, %xmm3
; SSE2-NEXT: paddd %xmm4, %xmm0
@@ -1576,79 +1594,80 @@ define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) {
define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) {
; SSE2-LABEL: avg_v32i16_2:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa (%rdi), %xmm4
-; SSE2-NEXT: movdqa 16(%rdi), %xmm11
-; SSE2-NEXT: movdqa 32(%rdi), %xmm10
+; SSE2-NEXT: movdqa (%rdi), %xmm10
+; SSE2-NEXT: movdqa 16(%rdi), %xmm9
+; SSE2-NEXT: movdqa 32(%rdi), %xmm11
; SSE2-NEXT: movdqa 48(%rdi), %xmm8
-; SSE2-NEXT: movdqa (%rsi), %xmm9
+; SSE2-NEXT: movdqa (%rsi), %xmm14
; SSE2-NEXT: movdqa 16(%rsi), %xmm1
; SSE2-NEXT: movdqa 32(%rsi), %xmm2
; SSE2-NEXT: movdqa 48(%rsi), %xmm3
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: movdqa %xmm4, %xmm6
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm11, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm10, %xmm12
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm10, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
+; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm9, %xmm12
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
+; SSE2-NEXT: movdqa %xmm11, %xmm15
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm0[0],xmm11[1],xmm0[1],xmm11[2],xmm0[2],xmm11[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm8, %xmm13
; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm0[4],xmm13[5],xmm0[5],xmm13[6],xmm0[6],xmm13[7],xmm0[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1],xmm8[2],xmm0[2],xmm8[3],xmm0[3]
-; SSE2-NEXT: movdqa %xmm9, %xmm7
+; SSE2-NEXT: movdqa %xmm14, %xmm7
; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm6, %xmm7
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm4, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1],xmm14[2],xmm0[2],xmm14[3],xmm0[3]
; SSE2-NEXT: movdqa %xmm1, %xmm6
; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm5, %xmm6
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm11, %xmm1
; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm12, %xmm5
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE2-NEXT: paddd %xmm10, %xmm2
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
-; SSE2-NEXT: paddd %xmm13, %xmm4
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
; SSE2-NEXT: paddd %xmm8, %xmm3
+; SSE2-NEXT: paddd %xmm13, %xmm4
+; SSE2-NEXT: paddd %xmm11, %xmm2
+; SSE2-NEXT: paddd %xmm15, %xmm5
+; SSE2-NEXT: paddd %xmm9, %xmm1
+; SSE2-NEXT: paddd %xmm12, %xmm6
+; SSE2-NEXT: paddd %xmm10, %xmm14
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Folded Reload
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: paddd %xmm0, %xmm9
+; SSE2-NEXT: paddd %xmm0, %xmm14
; SSE2-NEXT: paddd %xmm0, %xmm6
; SSE2-NEXT: paddd %xmm0, %xmm1
; SSE2-NEXT: paddd %xmm0, %xmm5
; SSE2-NEXT: paddd %xmm0, %xmm2
; SSE2-NEXT: paddd %xmm0, %xmm4
; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: psrld $1, %xmm3
-; SSE2-NEXT: psrld $1, %xmm4
-; SSE2-NEXT: psrld $1, %xmm2
-; SSE2-NEXT: psrld $1, %xmm5
-; SSE2-NEXT: psrld $1, %xmm1
-; SSE2-NEXT: psrld $1, %xmm6
-; SSE2-NEXT: psrld $1, %xmm9
+; SSE2-NEXT: psrld $1, %xmm14
; SSE2-NEXT: psrld $1, %xmm7
; SSE2-NEXT: pslld $16, %xmm7
; SSE2-NEXT: psrad $16, %xmm7
-; SSE2-NEXT: pslld $16, %xmm9
-; SSE2-NEXT: psrad $16, %xmm9
-; SSE2-NEXT: packssdw %xmm7, %xmm9
+; SSE2-NEXT: pslld $16, %xmm14
+; SSE2-NEXT: psrad $16, %xmm14
+; SSE2-NEXT: packssdw %xmm7, %xmm14
+; SSE2-NEXT: psrld $1, %xmm1
+; SSE2-NEXT: psrld $1, %xmm6
; SSE2-NEXT: pslld $16, %xmm6
; SSE2-NEXT: psrad $16, %xmm6
; SSE2-NEXT: pslld $16, %xmm1
; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: packssdw %xmm6, %xmm1
+; SSE2-NEXT: psrld $1, %xmm2
+; SSE2-NEXT: psrld $1, %xmm5
; SSE2-NEXT: pslld $16, %xmm5
; SSE2-NEXT: psrad $16, %xmm5
; SSE2-NEXT: pslld $16, %xmm2
; SSE2-NEXT: psrad $16, %xmm2
; SSE2-NEXT: packssdw %xmm5, %xmm2
+; SSE2-NEXT: psrld $1, %xmm3
+; SSE2-NEXT: psrld $1, %xmm4
; SSE2-NEXT: pslld $16, %xmm4
; SSE2-NEXT: psrad $16, %xmm4
; SSE2-NEXT: pslld $16, %xmm3
@@ -1657,7 +1676,7 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) {
; SSE2-NEXT: movdqu %xmm3, (%rax)
; SSE2-NEXT: movdqu %xmm2, (%rax)
; SSE2-NEXT: movdqu %xmm1, (%rax)
-; SSE2-NEXT: movdqu %xmm9, (%rax)
+; SSE2-NEXT: movdqu %xmm14, (%rax)
; SSE2-NEXT: retq
;
; AVX2-LABEL: avg_v32i16_2:
@@ -1667,13 +1686,13 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) {
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm6 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm7 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; AVX2-NEXT: vpaddd %ymm7, %ymm3, %ymm3
+; AVX2-NEXT: vpaddd %ymm6, %ymm2, %ymm2
+; AVX2-NEXT: vpaddd %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2
-; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; AVX2-NEXT: vpaddd %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4
; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1
@@ -1704,9 +1723,9 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) {
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVX512F-NEXT: vpaddd %zmm3, %zmm1, %zmm1
; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm2
; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0
; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1
diff --git a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
index 3cadbe2a8db31..ff5a2371a1452 100644
--- a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
@@ -2244,11 +2244,11 @@ define <4 x double> @test_mm256_set_pd(double %a0, double %a1, double %a2, doubl
; X32: # BB#0:
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; X32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; X32-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
-; X32-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; X32-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; X32-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; X32-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_pd:
@@ -2269,19 +2269,19 @@ define <8 x float> @test_mm256_set_ps(float %a0, float %a1, float %a2, float %a3
; X32: # BB#0:
; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
-; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
-; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
-; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
-; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X32-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss {{.*#+}} xmm6 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss {{.*#+}} xmm7 = mem[0],zero,zero,zero
+; X32-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
+; X32-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0],xmm4[3]
+; X32-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1,2],xmm7[0]
+; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
+; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
+; X32-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_ps:
@@ -2881,10 +2881,10 @@ define <4 x double> @test_mm256_setr_pd(double %a0, double %a1, double %a2, doub
; X32: # BB#0:
; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; X32-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; X32-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
-; X32-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; X32-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; X32-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; X32-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm3[0],xmm2[0]
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
@@ -2908,16 +2908,16 @@ define <8 x float> @test_mm256_setr_ps(float %a0, float %a1, float %a2, float %a
; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X32-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss {{.*#+}} xmm5 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss {{.*#+}} xmm6 = mem[0],zero,zero,zero
+; X32-NEXT: vmovss {{.*#+}} xmm7 = mem[0],zero,zero,zero
; X32-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; X32-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
-; X32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; X32-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
-; X32-NEXT: vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3]
-; X32-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0,1],xmm2[0],xmm3[3]
-; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[0]
+; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm7[0],xmm6[0],xmm7[2,3]
+; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm5[0],xmm1[3]
+; X32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
diff --git a/test/CodeGen/X86/avx.ll b/test/CodeGen/X86/avx.ll
index 647b7a8f4dfca..341dd867e4ff4 100644
--- a/test/CodeGen/X86/avx.ll
+++ b/test/CodeGen/X86/avx.ll
@@ -113,11 +113,11 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
; CHECK-NOT: mov
; CHECK: insertps $48
; CHECK: insertps $48
-; CHECK: vaddps
; CHECK: insertps $48
; CHECK: insertps $48
; CHECK: vaddps
; CHECK: vaddps
+; CHECK: vaddps
; CHECK-NEXT: ret
%1 = getelementptr inbounds float, float* %fb, i64 %index
%2 = load float, float* %1, align 4
diff --git a/test/CodeGen/X86/avx512-cmp-kor-sequence.ll b/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
index e29cf09718ad9..63b0281a73399 100644
--- a/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
+++ b/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
@@ -13,10 +13,10 @@ define zeroext i16 @cmp_kor_seq_16(<16 x float> %a, <16 x float> %b, <16 x float
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vcmpgeps %zmm4, %zmm0, %k0
; CHECK-NEXT: vcmpgeps %zmm4, %zmm1, %k1
+; CHECK-NEXT: vcmpgeps %zmm4, %zmm2, %k2
+; CHECK-NEXT: vcmpgeps %zmm4, %zmm3, %k3
; CHECK-NEXT: korw %k1, %k0, %k0
-; CHECK-NEXT: vcmpgeps %zmm4, %zmm2, %k1
-; CHECK-NEXT: vcmpgeps %zmm4, %zmm3, %k2
-; CHECK-NEXT: korw %k2, %k1, %k1
+; CHECK-NEXT: korw %k3, %k2, %k1
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
diff --git a/test/CodeGen/X86/avx512-cmp.ll b/test/CodeGen/X86/avx512-cmp.ll
index c1b64743f8985..eae7b94f5135c 100644
--- a/test/CodeGen/X86/avx512-cmp.ll
+++ b/test/CodeGen/X86/avx512-cmp.ll
@@ -47,16 +47,20 @@ l2:
ret float %c1
}
-; FIXME: Can use vcmpeqss and extract from the mask here in AVX512.
define i32 @test3(float %a, float %b) {
-; ALL-LABEL: test3:
-; ALL: ## BB#0:
-; ALL-NEXT: vucomiss %xmm1, %xmm0
-; ALL-NEXT: setnp %al
-; ALL-NEXT: sete %cl
-; ALL-NEXT: andb %al, %cl
-; ALL-NEXT: movzbl %cl, %eax
-; ALL-NEXT: retq
+; KNL-LABEL: test3:
+; KNL: ## BB#0:
+; KNL-NEXT: vcmpeqss %xmm1, %xmm0, %k0
+; KNL-NEXT: kmovw %k0, %eax
+; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: retq
+;
+; SKX-LABEL: test3:
+; SKX: ## BB#0:
+; SKX-NEXT: vcmpeqss %xmm1, %xmm0, %k0
+; SKX-NEXT: kmovd %k0, %eax
+; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: retq
%cmp10.i = fcmp oeq float %a, %b
%conv11.i = zext i1 %cmp10.i to i32
@@ -69,7 +73,7 @@ define float @test5(float %p) #0 {
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
; ALL-NEXT: vucomiss %xmm1, %xmm0
; ALL-NEXT: jne LBB3_1
-; ALL-NEXT: jp LBB3_1
+; ALL-NEXT: jp LBB3_1
; ALL-NEXT: ## BB#2: ## %return
; ALL-NEXT: retq
; ALL-NEXT: LBB3_1: ## %if.end
@@ -158,47 +162,22 @@ B:
}
define i32 @test10(i64 %b, i64 %c, i1 %d) {
-; KNL-LABEL: test10:
-; KNL: ## BB#0:
-; KNL-NEXT: andl $1, %edx
-; KNL-NEXT: kmovw %edx, %k0
-; KNL-NEXT: cmpq %rsi, %rdi
-; KNL-NEXT: sete %al
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: korw %k1, %k0, %k1
-; KNL-NEXT: kxorw %k1, %k0, %k0
-; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: testb %al, %al
-; KNL-NEXT: je LBB8_1
-; KNL-NEXT: ## BB#2: ## %if.end.i
-; KNL-NEXT: movl $6, %eax
-; KNL-NEXT: retq
-; KNL-NEXT: LBB8_1: ## %if.then.i
-; KNL-NEXT: movl $5, %eax
-; KNL-NEXT: retq
-;
-; SKX-LABEL: test10:
-; SKX: ## BB#0:
-; SKX-NEXT: andl $1, %edx
-; SKX-NEXT: kmovd %edx, %k0
-; SKX-NEXT: cmpq %rsi, %rdi
-; SKX-NEXT: sete %al
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovd %eax, %k1
-; SKX-NEXT: korw %k1, %k0, %k1
-; SKX-NEXT: kxorw %k1, %k0, %k0
-; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: testb %al, %al
-; SKX-NEXT: je LBB8_1
-; SKX-NEXT: ## BB#2: ## %if.end.i
-; SKX-NEXT: movl $6, %eax
-; SKX-NEXT: retq
-; SKX-NEXT: LBB8_1: ## %if.then.i
-; SKX-NEXT: movl $5, %eax
-; SKX-NEXT: retq
+; ALL-LABEL: test10:
+; ALL: ## BB#0:
+; ALL-NEXT: movl %edx, %eax
+; ALL-NEXT: andb $1, %al
+; ALL-NEXT: cmpq %rsi, %rdi
+; ALL-NEXT: sete %cl
+; ALL-NEXT: orb %dl, %cl
+; ALL-NEXT: andb $1, %cl
+; ALL-NEXT: cmpb %cl, %al
+; ALL-NEXT: je LBB8_1
+; ALL-NEXT: ## BB#2: ## %if.end.i
+; ALL-NEXT: movl $6, %eax
+; ALL-NEXT: retq
+; ALL-NEXT: LBB8_1: ## %if.then.i
+; ALL-NEXT: movl $5, %eax
+; ALL-NEXT: retq
%cmp8.i = icmp eq i64 %b, %c
%or1 = or i1 %d, %cmp8.i
diff --git a/test/CodeGen/X86/avx512-cvt.ll b/test/CodeGen/X86/avx512-cvt.ll
index 2b55372f30667..33ac15de9de9a 100644
--- a/test/CodeGen/X86/avx512-cvt.ll
+++ b/test/CodeGen/X86/avx512-cvt.ll
@@ -1552,10 +1552,10 @@ define <2 x float> @uitofp_2i1_float(<2 x i32> %a) {
; NOVL-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808]
; NOVL-NEXT: vpxor %xmm1, %xmm0, %xmm0
; NOVL-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
-; NOVL-NEXT: vpextrq $1, %xmm0, %rax
+; NOVL-NEXT: vpextrb $8, %xmm0, %eax
; NOVL-NEXT: andl $1, %eax
; NOVL-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm1
-; NOVL-NEXT: vmovq %xmm0, %rax
+; NOVL-NEXT: vpextrb $0, %xmm0, %eax
; NOVL-NEXT: andl $1, %eax
; NOVL-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm0
; NOVL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
diff --git a/test/CodeGen/X86/avx512-ext.ll b/test/CodeGen/X86/avx512-ext.ll
index b31b00e54e83a..2145f5fb09a81 100644
--- a/test/CodeGen/X86/avx512-ext.ll
+++ b/test/CodeGen/X86/avx512-ext.ll
@@ -1434,26 +1434,26 @@ define <8 x i32> @sext_8i1_8i32(<8 x i32> %a1, <8 x i32> %a2) nounwind {
define i16 @trunc_i32_to_i1(i32 %a) {
; KNL-LABEL: trunc_i32_to_i1:
; KNL: ## BB#0:
-; KNL-NEXT: andl $1, %edi
-; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: movw $-4, %ax
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: kshiftrw $1, %k1, %k1
-; KNL-NEXT: kshiftlw $1, %k1, %k1
-; KNL-NEXT: korw %k0, %k1, %k0
+; KNL-NEXT: kmovw %eax, %k0
+; KNL-NEXT: kshiftrw $1, %k0, %k0
+; KNL-NEXT: kshiftlw $1, %k0, %k0
+; KNL-NEXT: andl $1, %edi
+; KNL-NEXT: kmovw %edi, %k1
+; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_i32_to_i1:
; SKX: ## BB#0:
-; SKX-NEXT: andl $1, %edi
-; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: movw $-4, %ax
-; SKX-NEXT: kmovd %eax, %k1
-; SKX-NEXT: kshiftrw $1, %k1, %k1
-; SKX-NEXT: kshiftlw $1, %k1, %k1
-; SKX-NEXT: korw %k0, %k1, %k0
+; SKX-NEXT: kmovd %eax, %k0
+; SKX-NEXT: kshiftrw $1, %k0, %k0
+; SKX-NEXT: kshiftlw $1, %k0, %k0
+; SKX-NEXT: andl $1, %edi
+; SKX-NEXT: kmovw %edi, %k1
+; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; SKX-NEXT: retq
diff --git a/test/CodeGen/X86/avx512-fsel.ll b/test/CodeGen/X86/avx512-fsel.ll
index a9b8914ee1fe6..7777ba7954169 100644
--- a/test/CodeGen/X86/avx512-fsel.ll
+++ b/test/CodeGen/X86/avx512-fsel.ll
@@ -10,25 +10,11 @@ define i32 @test(float %a, float %b) {
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: Lcfi0:
; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: vucomiss %xmm1, %xmm0
-; CHECK-NEXT: setp %al
-; CHECK-NEXT: setne %cl
-; CHECK-NEXT: setnp %dl
-; CHECK-NEXT: sete %sil
-; CHECK-NEXT: andb %dl, %sil
-; CHECK-NEXT: ## implicit-def: %EDI
-; CHECK-NEXT: movb %sil, %dil
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k0
-; CHECK-NEXT: orb %al, %cl
-; CHECK-NEXT: ## implicit-def: %EDI
-; CHECK-NEXT: movb %cl, %dil
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: kmovw %k1, %edi
-; CHECK-NEXT: movb %dil, %al
-; CHECK-NEXT: testb $1, %al
-; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
+; CHECK-NEXT: vcmpeqss %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
+; CHECK-NEXT: movb %al, %cl
+; CHECK-NEXT: xorb $-1, %cl
+; CHECK-NEXT: testb $1, %cl
; CHECK-NEXT: jne LBB0_1
; CHECK-NEXT: jmp LBB0_2
; CHECK-NEXT: LBB0_1: ## %L_0
diff --git a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
index c03623a2f0359..4890afec2164b 100644
--- a/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
+++ b/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
@@ -852,16 +852,16 @@ define <16 x float> @gather_mask_test(<16 x i32> %ind, <16 x float> %src, i8* %b
; CHECK-NEXT: kxorw %k0, %k0, %k1
; CHECK-NEXT: vmovaps %zmm1, %zmm3
; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm3 {%k1}
-; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm2
; CHECK-NEXT: movw $1, %ax
; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: vmovaps %zmm1, %zmm3
-; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm3 {%k1}
+; CHECK-NEXT: vmovaps %zmm1, %zmm4
+; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm4 {%k1}
; CHECK-NEXT: movw $220, %ax
; CHECK-NEXT: kmovd %eax, %k1
; CHECK-NEXT: vgatherdps (%rdi,%zmm0,4), %zmm1 {%k1}
-; CHECK-NEXT: vaddps %zmm3, %zmm1, %zmm0
-; CHECK-NEXT: vaddps %zmm2, %zmm0, %zmm0
+; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm0
+; CHECK-NEXT: vaddps %zmm4, %zmm1, %zmm1
+; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 -1, i32 4)
%res1 = call <16 x float> @llvm.x86.avx512.gather.dps.512 (<16 x float> %src, i8* %base, <16 x i32>%ind, i16 0, i32 4)
diff --git a/test/CodeGen/X86/avx512-i1test.ll b/test/CodeGen/X86/avx512-i1test.ll
index 69fafdfff9aac..321f26674e1e3 100755
--- a/test/CodeGen/X86/avx512-i1test.ll
+++ b/test/CodeGen/X86/avx512-i1test.ll
@@ -66,14 +66,13 @@ L_30: ; preds = %bb51, %L_10
define i64 @func2(i1 zeroext %i, i32 %j) {
; CHECK-LABEL: func2:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: je .LBB1_1
; CHECK-NEXT: # BB#2: # %if.then
; CHECK-NEXT: jmp bar # TAILCALL
; CHECK-NEXT: .LBB1_1: # %return
-; CHECK-NEXT: orq $-2, %rdi
-; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: orq $-2, %rax
; CHECK-NEXT: retq
entry:
%tobool = icmp eq i32 %j, 0
diff --git a/test/CodeGen/X86/avx512-insert-extract.ll b/test/CodeGen/X86/avx512-insert-extract.ll
index 87928348a851a..29a5325a0ae98 100644
--- a/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/test/CodeGen/X86/avx512-insert-extract.ll
@@ -260,8 +260,7 @@ define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) {
; KNL-NEXT: kshiftlw $11, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: testb %al, %al
+; KNL-NEXT: testb $1, %al
; KNL-NEXT: je LBB10_2
; KNL-NEXT: ## BB#1: ## %A
; KNL-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -276,8 +275,7 @@ define <16 x i32> @test11(<16 x i32>%a, <16 x i32>%b) {
; SKX-NEXT: kshiftlw $11, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: testb %al, %al
+; SKX-NEXT: testb $1, %al
; SKX-NEXT: je LBB10_2
; SKX-NEXT: ## BB#1: ## %A
; SKX-NEXT: vmovdqa64 %zmm1, %zmm0
@@ -299,13 +297,10 @@ define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) {
; KNL-LABEL: test12:
; KNL: ## BB#0:
; KNL-NEXT: vpcmpgtq %zmm0, %zmm2, %k0
-; KNL-NEXT: vpcmpgtq %zmm1, %zmm3, %k1
-; KNL-NEXT: kunpckbw %k0, %k1, %k0
; KNL-NEXT: kshiftlw $15, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: testb %al, %al
+; KNL-NEXT: testb $1, %al
; KNL-NEXT: cmoveq %rsi, %rdi
; KNL-NEXT: movq %rdi, %rax
; KNL-NEXT: retq
@@ -313,13 +308,10 @@ define i64 @test12(<16 x i64>%a, <16 x i64>%b, i64 %a1, i64 %b1) {
; SKX-LABEL: test12:
; SKX: ## BB#0:
; SKX-NEXT: vpcmpgtq %zmm0, %zmm2, %k0
-; SKX-NEXT: vpcmpgtq %zmm1, %zmm3, %k1
-; SKX-NEXT: kunpckbw %k0, %k1, %k0
-; SKX-NEXT: kshiftlw $15, %k0, %k0
-; SKX-NEXT: kshiftrw $15, %k0, %k0
+; SKX-NEXT: kshiftlb $7, %k0, %k0
+; SKX-NEXT: kshiftrb $7, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: testb %al, %al
+; SKX-NEXT: testb $1, %al
; SKX-NEXT: cmoveq %rsi, %rdi
; SKX-NEXT: movq %rdi, %rax
; SKX-NEXT: vzeroupper
@@ -335,13 +327,13 @@ define i16 @test13(i32 %a, i32 %b) {
; KNL: ## BB#0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
+; KNL-NEXT: movw $-4, %cx
+; KNL-NEXT: kmovw %ecx, %k0
+; KNL-NEXT: kshiftrw $1, %k0, %k0
+; KNL-NEXT: kshiftlw $1, %k0, %k0
; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k0
-; KNL-NEXT: movw $-4, %ax
; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: kshiftrw $1, %k1, %k1
-; KNL-NEXT: kshiftlw $1, %k1, %k1
-; KNL-NEXT: korw %k0, %k1, %k0
+; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; KNL-NEXT: retq
@@ -350,13 +342,13 @@ define i16 @test13(i32 %a, i32 %b) {
; SKX: ## BB#0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
+; SKX-NEXT: movw $-4, %cx
+; SKX-NEXT: kmovd %ecx, %k0
+; SKX-NEXT: kshiftrw $1, %k0, %k0
+; SKX-NEXT: kshiftlw $1, %k0, %k0
; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: movw $-4, %ax
-; SKX-NEXT: kmovd %eax, %k1
-; SKX-NEXT: kshiftrw $1, %k1, %k1
-; SKX-NEXT: kshiftlw $1, %k1, %k1
-; SKX-NEXT: korw %k0, %k1, %k0
+; SKX-NEXT: kmovw %eax, %k1
+; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; SKX-NEXT: retq
@@ -373,8 +365,7 @@ define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) {
; KNL-NEXT: kshiftlw $11, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: testb %al, %al
+; KNL-NEXT: testb $1, %al
; KNL-NEXT: cmoveq %rsi, %rdi
; KNL-NEXT: movq %rdi, %rax
; KNL-NEXT: retq
@@ -385,8 +376,7 @@ define i64 @test14(<8 x i64>%a, <8 x i64>%b, i64 %a1, i64 %b1) {
; SKX-NEXT: kshiftlb $3, %k0, %k0
; SKX-NEXT: kshiftrb $7, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: testb %al, %al
+; SKX-NEXT: testb $1, %al
; SKX-NEXT: cmoveq %rsi, %rdi
; SKX-NEXT: movq %rdi, %rax
; SKX-NEXT: vzeroupper
@@ -424,14 +414,13 @@ define i16 @test15(i1 *%addr) {
define i16 @test16(i1 *%addr, i16 %a) {
; KNL-LABEL: test16:
; KNL: ## BB#0:
-; KNL-NEXT: movzbl (%rdi), %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: kmovw %esi, %k2
+; KNL-NEXT: movb (%rdi), %al
+; KNL-NEXT: kmovw %esi, %k1
+; KNL-NEXT: kmovw %eax, %k2
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; KNL-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15]
-; KNL-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; KNL-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
; KNL-NEXT: vpslld $31, %zmm2, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -440,14 +429,13 @@ define i16 @test16(i1 *%addr, i16 %a) {
;
; SKX-LABEL: test16:
; SKX: ## BB#0:
-; SKX-NEXT: movzbl (%rdi), %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: kmovd %esi, %k1
+; SKX-NEXT: movb (%rdi), %al
+; SKX-NEXT: kmovd %esi, %k0
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpmovm2d %k1, %zmm0
; SKX-NEXT: vpmovm2d %k0, %zmm1
; SKX-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,1,2,3,4,5,6,7,8,9,16,11,12,13,14,15]
-; SKX-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
+; SKX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
; SKX-NEXT: vpmovd2m %zmm2, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
@@ -463,14 +451,13 @@ define i16 @test16(i1 *%addr, i16 %a) {
define i8 @test17(i1 *%addr, i8 %a) {
; KNL-LABEL: test17:
; KNL: ## BB#0:
-; KNL-NEXT: movzbl (%rdi), %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: kmovw %esi, %k2
+; KNL-NEXT: movb (%rdi), %al
+; KNL-NEXT: kmovw %esi, %k1
+; KNL-NEXT: kmovw %eax, %k2
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7]
-; KNL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; KNL-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -479,14 +466,13 @@ define i8 @test17(i1 *%addr, i8 %a) {
;
; SKX-LABEL: test17:
; SKX: ## BB#0:
-; SKX-NEXT: movzbl (%rdi), %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: kmovd %esi, %k1
+; SKX-NEXT: movb (%rdi), %al
+; SKX-NEXT: kmovd %esi, %k0
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpmovm2q %k1, %zmm0
; SKX-NEXT: vpmovm2q %k0, %zmm1
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,8,5,6,7]
-; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
+; SKX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; SKX-NEXT: vpmovq2m %zmm2, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
@@ -1283,12 +1269,11 @@ define i32 @test_insertelement_v32i1(i32 %a, i32 %b, <32 x i32> %x , <32 x i32>
; SKX: ## BB#0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
-; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: vpcmpltud %zmm2, %zmm0, %k0
+; SKX-NEXT: vpcmpltud %zmm3, %zmm1, %k1
+; SKX-NEXT: kunpckwd %k0, %k1, %k0
+; SKX-NEXT: vpmovm2w %k0, %zmm0
; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: vpcmpltud %zmm2, %zmm0, %k1
-; SKX-NEXT: vpcmpltud %zmm3, %zmm1, %k2
-; SKX-NEXT: kunpckwd %k1, %k2, %k1
-; SKX-NEXT: vpmovm2w %k1, %zmm0
; SKX-NEXT: vpmovm2w %k0, %zmm1
; SKX-NEXT: vmovdqu16 {{.*#+}} zmm2 = [0,1,2,3,32,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31]
; SKX-NEXT: vpermi2w %zmm1, %zmm0, %zmm2
@@ -1308,33 +1293,29 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y)
; KNL: ## BB#0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
-; KNL-NEXT: vpextrd $1, %xmm0, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k2
-; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
-; KNL-NEXT: vmovd %xmm0, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k2
-; KNL-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k2} {z}
+; KNL-NEXT: vpextrb $4, %xmm0, %ecx
+; KNL-NEXT: kmovw %ecx, %k1
+; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; KNL-NEXT: vpextrb $0, %xmm0, %ecx
+; KNL-NEXT: kmovw %ecx, %k1
+; KNL-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,8,2,3,4,5,6,7]
; KNL-NEXT: vpermi2q %zmm1, %zmm2, %zmm3
; KNL-NEXT: vpsllq $63, %zmm3, %zmm1
-; KNL-NEXT: vptestmq %zmm1, %zmm1, %k2
-; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
+; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
+; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogq $255, %zmm2, %zmm2, %zmm2 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,8,3,4,5,6,7]
; KNL-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
; KNL-NEXT: vpsllq $63, %zmm3, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
-; KNL-NEXT: vpextrd $3, %xmm0, %eax
-; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: vpextrb $12, %xmm0, %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,8,4,5,6,7]
@@ -1349,10 +1330,9 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y)
; SKX: ## BB#0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
-; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
+; SKX-NEXT: vpmovm2d %k0, %xmm0
; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: vpcmpltud %xmm1, %xmm0, %k1
-; SKX-NEXT: vpmovm2d %k1, %xmm0
; SKX-NEXT: vpmovm2d %k0, %xmm1
; SKX-NEXT: vpbroadcastq %xmm1, %xmm1
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
@@ -1373,16 +1353,14 @@ define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y)
; KNL: ## BB#0:
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0
-; KNL-NEXT: vmovq %xmm0, %rax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k2
-; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k2} {z}
+; KNL-NEXT: vpextrb $0, %xmm0, %ecx
+; KNL-NEXT: kmovw %ecx, %k1
+; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
+; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,8,2,3,4,5,6,7]
; KNL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
@@ -1396,13 +1374,12 @@ define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y)
; SKX: ## BB#0:
; SKX-NEXT: cmpl %esi, %edi
; SKX-NEXT: setb %al
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k1
+; SKX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: kshiftlw $1, %k1, %k1
-; SKX-NEXT: kshiftrw $1, %k1, %k1
; SKX-NEXT: kshiftlw $1, %k0, %k0
-; SKX-NEXT: korw %k0, %k1, %k0
+; SKX-NEXT: kshiftrw $1, %k0, %k0
+; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; SKX-NEXT: retq
@@ -1422,8 +1399,10 @@ define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; KNL-NEXT: vpextrb $0, %xmm0, %eax
-; KNL-NEXT: addb $4, %al
-; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: andb $1, %al
+; KNL-NEXT: movb $4, %cl
+; KNL-NEXT: subb %al, %cl
+; KNL-NEXT: movzbl %cl, %eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_v2i1:
@@ -1432,11 +1411,10 @@ define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
; SKX-NEXT: kshiftlw $15, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: cmpb $1, %al
-; SKX-NEXT: movb $3, %al
-; SKX-NEXT: adcb $0, %al
-; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: andb $1, %al
+; SKX-NEXT: movb $4, %cl
+; SKX-NEXT: subb %al, %cl
+; SKX-NEXT: movzbl %cl, %eax
; SKX-NEXT: retq
%t1 = icmp ugt <2 x i64> %a, %b
%t2 = extractelement <2 x i1> %t1, i32 0
@@ -1452,8 +1430,10 @@ define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) {
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
; KNL-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; KNL-NEXT: vpextrb $0, %xmm0, %eax
-; KNL-NEXT: addb $4, %al
-; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: andb $1, %al
+; KNL-NEXT: movb $4, %cl
+; KNL-NEXT: subb %al, %cl
+; KNL-NEXT: movzbl %cl, %eax
; KNL-NEXT: retq
;
; SKX-LABEL: extractelement_v2i1_alt:
@@ -1462,11 +1442,10 @@ define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) {
; SKX-NEXT: kshiftlw $15, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: cmpb $1, %al
-; SKX-NEXT: movb $3, %al
-; SKX-NEXT: adcb $0, %al
-; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: andb $1, %al
+; SKX-NEXT: movb $4, %cl
+; SKX-NEXT: subb %al, %cl
+; SKX-NEXT: movzbl %cl, %eax
; SKX-NEXT: retq
%t1 = icmp ugt <2 x i64> %a, %b
%t2 = extractelement <2 x i1> %t1, i32 0
@@ -1535,8 +1514,10 @@ define zeroext i8 @test_extractelement_v64i1(<64 x i8> %a, <64 x i8> %b) {
; KNL-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
; KNL-NEXT: vpextrb $15, %xmm0, %eax
-; KNL-NEXT: addb $4, %al
-; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: andb $1, %al
+; KNL-NEXT: movb $4, %cl
+; KNL-NEXT: subb %al, %cl
+; KNL-NEXT: movzbl %cl, %eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_extractelement_v64i1:
@@ -1544,11 +1525,10 @@ define zeroext i8 @test_extractelement_v64i1(<64 x i8> %a, <64 x i8> %b) {
; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
; SKX-NEXT: kshiftrq $63, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: cmpb $1, %al
-; SKX-NEXT: movb $3, %al
-; SKX-NEXT: adcb $0, %al
-; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: andb $1, %al
+; SKX-NEXT: movb $4, %cl
+; SKX-NEXT: subb %al, %cl
+; SKX-NEXT: movzbl %cl, %eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%t1 = icmp ugt <64 x i8> %a, %b
@@ -1566,8 +1546,10 @@ define zeroext i8 @extractelement_v64i1_alt(<64 x i8> %a, <64 x i8> %b) {
; KNL-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; KNL-NEXT: vextracti128 $1, %ymm0, %xmm0
; KNL-NEXT: vpextrb $15, %xmm0, %eax
-; KNL-NEXT: addb $4, %al
-; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: andb $1, %al
+; KNL-NEXT: movb $4, %cl
+; KNL-NEXT: subb %al, %cl
+; KNL-NEXT: movzbl %cl, %eax
; KNL-NEXT: retq
;
; SKX-LABEL: extractelement_v64i1_alt:
@@ -1575,11 +1557,10 @@ define zeroext i8 @extractelement_v64i1_alt(<64 x i8> %a, <64 x i8> %b) {
; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
; SKX-NEXT: kshiftrq $63, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: cmpb $1, %al
-; SKX-NEXT: movb $3, %al
-; SKX-NEXT: adcb $0, %al
-; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: andb $1, %al
+; SKX-NEXT: movb $4, %cl
+; SKX-NEXT: subb %al, %cl
+; SKX-NEXT: movzbl %cl, %eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%t1 = icmp ugt <64 x i8> %a, %b
@@ -2332,7 +2313,7 @@ define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b,
; SKX-NEXT: vpmovm2q %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
; SKX-NEXT: andl $1, %edi
-; SKX-NEXT: movl -24(%rsp,%rdi,8), %eax
+; SKX-NEXT: movzbl -24(%rsp,%rdi,8), %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: retq
%t1 = icmp ugt <2 x i64> %a, %b
@@ -2362,7 +2343,7 @@ define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b,
; SKX-NEXT: vpmovm2d %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
; SKX-NEXT: andl $3, %edi
-; SKX-NEXT: movl -24(%rsp,%rdi,4), %eax
+; SKX-NEXT: movzbl -24(%rsp,%rdi,4), %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: retq
%t1 = icmp ugt <4 x i32> %a, %b
@@ -2391,7 +2372,7 @@ define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b,
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vmovdqa64 %zmm0, (%rsp)
; KNL-NEXT: andl $7, %edi
-; KNL-NEXT: movl (%rsp,%rdi,8), %eax
+; KNL-NEXT: movzbl (%rsp,%rdi,8), %eax
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
@@ -2414,7 +2395,7 @@ define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b,
; SKX-NEXT: vpmovm2q %k0, %zmm0
; SKX-NEXT: vmovdqa64 %zmm0, (%rsp)
; SKX-NEXT: andl $7, %edi
-; SKX-NEXT: movl (%rsp,%rdi,8), %eax
+; SKX-NEXT: movzbl (%rsp,%rdi,8), %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: movq %rbp, %rsp
; SKX-NEXT: popq %rbp
@@ -2444,7 +2425,7 @@ define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> %
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vmovdqa32 %zmm0, (%rsp)
; KNL-NEXT: andl $15, %edi
-; KNL-NEXT: movl (%rsp,%rdi,4), %eax
+; KNL-NEXT: movzbl (%rsp,%rdi,4), %eax
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
@@ -2467,7 +2448,7 @@ define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> %
; SKX-NEXT: vpmovm2d %k0, %zmm0
; SKX-NEXT: vmovdqa32 %zmm0, (%rsp)
; SKX-NEXT: andl $15, %edi
-; SKX-NEXT: movl (%rsp,%rdi,4), %eax
+; SKX-NEXT: movzbl (%rsp,%rdi,4), %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: movq %rbp, %rsp
; SKX-NEXT: popq %rbp
@@ -2500,9 +2481,8 @@ define zeroext i8 @test_extractelement_varible_v32i1(<32 x i8> %a, <32 x i8> %b,
; KNL-NEXT: vmovdqa %ymm0, (%rsp)
; KNL-NEXT: andl $31, %edi
; KNL-NEXT: movq %rsp, %rax
-; KNL-NEXT: movb (%rdi,%rax), %al
-; KNL-NEXT: andb $1, %al
-; KNL-NEXT: movzbl %al, %eax
+; KNL-NEXT: movzbl (%rdi,%rax), %eax
+; KNL-NEXT: andl $1, %eax
; KNL-NEXT: movq %rbp, %rsp
; KNL-NEXT: popq %rbp
; KNL-NEXT: retq
@@ -2524,7 +2504,7 @@ define zeroext i8 @test_extractelement_varible_v32i1(<32 x i8> %a, <32 x i8> %b,
; SKX-NEXT: vpmovm2w %k0, %zmm0
; SKX-NEXT: vmovdqu16 %zmm0, (%rsp)
; SKX-NEXT: andl $31, %edi
-; SKX-NEXT: movzwl (%rsp,%rdi,2), %eax
+; SKX-NEXT: movzbl (%rsp,%rdi,2), %eax
; SKX-NEXT: andl $1, %eax
; SKX-NEXT: movq %rbp, %rsp
; SKX-NEXT: popq %rbp
diff --git a/test/CodeGen/X86/avx512-insert-extract_i1.ll b/test/CodeGen/X86/avx512-insert-extract_i1.ll
index a1d1a7dae1900..a099b80898ee3 100644
--- a/test/CodeGen/X86/avx512-insert-extract_i1.ll
+++ b/test/CodeGen/X86/avx512-insert-extract_i1.ll
@@ -22,9 +22,8 @@ define zeroext i8 @test_extractelement_varible_v64i1(<64 x i8> %a, <64 x i8> %b,
; SKX-NEXT: vmovdqu8 %zmm0, (%rsp)
; SKX-NEXT: andl $63, %edi
; SKX-NEXT: movq %rsp, %rax
-; SKX-NEXT: movb (%rdi,%rax), %al
-; SKX-NEXT: andb $1, %al
-; SKX-NEXT: movzbl %al, %eax
+; SKX-NEXT: movzbl (%rdi,%rax), %eax
+; SKX-NEXT: andl $1, %eax
; SKX-NEXT: movq %rbp, %rsp
; SKX-NEXT: popq %rbp
; SKX-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/avx512-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
index 56962ca2671d2..32da0a70218e3 100644
--- a/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
@@ -9,8 +9,8 @@ define <16 x float> @test_x86_vbroadcast_ss_ps_512(<4 x float> %a0, <16 x float>
; CHECK-NEXT: vbroadcastss %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vbroadcastss %xmm0, %zmm1 {%k1}
-; CHECK-NEXT: vaddps %zmm1, %zmm2, %zmm1
; CHECK-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vaddps %zmm1, %zmm2, %zmm1
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -30,8 +30,8 @@ define <8 x double> @test_x86_vbroadcast_sd_pd_512(<2 x double> %a0, <8 x double
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
-; CHECK-NEXT: vaddpd %zmm1, %zmm2, %zmm1
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vaddpd %zmm1, %zmm2, %zmm1
; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
@@ -51,8 +51,8 @@ define <16 x i32>@test_int_x86_avx512_pbroadcastd_512(<4 x i32> %x0, <16 x i32>
; CHECK-NEXT: vpbroadcastd %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpbroadcastd %xmm0, %zmm1 {%k1}
-; CHECK-NEXT: vpaddd %zmm1, %zmm2, %zmm1
; CHECK-NEXT: vpbroadcastd %xmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddd %zmm1, %zmm2, %zmm1
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.pbroadcastd.512(<4 x i32> %x0, <16 x i32> %x1, i16 -1)
@@ -71,8 +71,8 @@ define <8 x i64>@test_int_x86_avx512_pbroadcastq_512(<2 x i64> %x0, <8 x i64> %x
; CHECK-NEXT: vpbroadcastq %xmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpbroadcastq %xmm0, %zmm1 {%k1}
-; CHECK-NEXT: vpaddq %zmm1, %zmm2, %zmm1
; CHECK-NEXT: vpbroadcastq %xmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm1, %zmm2, %zmm1
; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.pbroadcastq.512(<2 x i64> %x0, <8 x i64> %x1,i8 -1)
@@ -91,8 +91,8 @@ define <16 x float>@test_int_x86_avx512_mask_movsldup_512(<16 x float> %x0, <16
; CHECK-NEXT: vmovsldup {{.*#+}} zmm2 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
-; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.movsldup.512(<16 x float> %x0, <16 x float> %x1, i16 %x2)
@@ -111,8 +111,8 @@ define <16 x float>@test_int_x86_avx512_mask_movshdup_512(<16 x float> %x0, <16
; CHECK-NEXT: vmovshdup {{.*#+}} zmm2 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
-; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.movshdup.512(<16 x float> %x0, <16 x float> %x1, i16 %x2)
@@ -131,8 +131,8 @@ define <8 x double>@test_int_x86_avx512_mask_movddup_512(<8 x double> %x0, <8 x
; CHECK-NEXT: vmovddup {{.*#+}} zmm2 = zmm0[0,0,2,2,4,4,6,6]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6]
-; CHECK-NEXT: vaddpd %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
+; CHECK-NEXT: vaddpd %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.movddup.512(<8 x double> %x0, <8 x double> %x1, i8 %x2)
@@ -671,9 +671,9 @@ define <8 x i64>@test_int_x86_avx512_mask_punpcklqd_q_512(<8 x i64> %x0, <8 x i6
; CHECK-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
-; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm2
; CHECK-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
-; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
+; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm1
+; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.punpcklqd.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.punpcklqd.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
@@ -1616,9 +1616,9 @@ define <8 x double>@test_int_x86_avx512_mask_shuf_pd_512(<8 x double> %x0, <8 x
; CHECK-NEXT: vshufpd {{.*#+}} zmm3 = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6]
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vshufpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6]
-; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm2
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6]
-; CHECK-NEXT: vaddpd %zmm0, %zmm2, %zmm0
+; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm1
+; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.shuf.pd.512(<8 x double> %x0, <8 x double> %x1, i32 22, <8 x double> %x3, i8 %x4)
%res1 = call <8 x double> @llvm.x86.avx512.mask.shuf.pd.512(<8 x double> %x0, <8 x double> %x1, i32 22, <8 x double> %x3, i8 -1)
@@ -2031,8 +2031,8 @@ define <8 x i64>@test_int_x86_avx512_mask_psrl_qi_512(<8 x i64> %x0, i32 %x1, <8
; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm2
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64> %x0, i32 4, <8 x i64> %x2, i8 %x3)
@@ -2051,8 +2051,8 @@ define <16 x i32>@test_int_x86_avx512_mask_psrl_di_512(<16 x i32> %x0, i32 %x1,
; CHECK-NEXT: vpsrld $4, %zmm0, %zmm2
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vpsrld $4, %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vpsrld $4, %zmm0, %zmm0 {%k1} {z}
+; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32> %x0, i32 4, <16 x i32> %x2, i16 %x3)
@@ -2651,8 +2651,8 @@ define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool(<16
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpermilps {{.*#+}} zmm2 {%k1} = zmm0[2,3,0,1,7,6,5,4,9,8,11,10,12,13,14,15]
; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
-; CHECK-NEXT: vaddps %zmm1, %zmm2, %zmm1
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
+; CHECK-NEXT: vaddps %zmm1, %zmm2, %zmm1
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 3, i32 2, i32 1, i32 0, i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3>, <16 x float> %x2, i16 %x3)
@@ -2881,23 +2881,23 @@ define <4 x float> @test_mask_vextractf32x4(<4 x float> %b, <16 x float> %a, i8
; CHECK-LABEL: test_mask_vextractf32x4:
; CHECK: ## BB#0:
; CHECK-NEXT: vextractf32x4 $2, %zmm1, %xmm1
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: kshiftlw $12, %k1, %k0
-; CHECK-NEXT: kshiftrw $15, %k0, %k0
-; CHECK-NEXT: kshiftlw $13, %k1, %k2
+; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: kshiftlw $12, %k0, %k1
+; CHECK-NEXT: kshiftrw $15, %k1, %k1
+; CHECK-NEXT: kshiftlw $13, %k0, %k2
; CHECK-NEXT: kshiftrw $15, %k2, %k2
-; CHECK-NEXT: kshiftlw $15, %k1, %k3
+; CHECK-NEXT: kshiftlw $15, %k0, %k3
; CHECK-NEXT: kshiftrw $15, %k3, %k3
-; CHECK-NEXT: kshiftlw $14, %k1, %k1
-; CHECK-NEXT: kshiftrw $15, %k1, %k1
-; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kshiftlw $14, %k0, %k0
+; CHECK-NEXT: kshiftrw $15, %k0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: kmovw %k3, %ecx
; CHECK-NEXT: vmovd %ecx, %xmm2
-; CHECK-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
+; CHECK-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
; CHECK-NEXT: kmovw %k2, %eax
-; CHECK-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vpinsrd $3, %eax, %xmm2, %xmm2
+; CHECK-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
; CHECK-NEXT: vpslld $31, %xmm2, %xmm2
; CHECK-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
@@ -2911,23 +2911,23 @@ define <4 x i64> @test_mask_vextracti64x4(<4 x i64> %b, <8 x i64> %a, i8 %mask)
; CHECK-LABEL: test_mask_vextracti64x4:
; CHECK: ## BB#0:
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm1
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: kshiftlw $12, %k1, %k0
-; CHECK-NEXT: kshiftrw $15, %k0, %k0
-; CHECK-NEXT: kshiftlw $13, %k1, %k2
+; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: kshiftlw $12, %k0, %k1
+; CHECK-NEXT: kshiftrw $15, %k1, %k1
+; CHECK-NEXT: kshiftlw $13, %k0, %k2
; CHECK-NEXT: kshiftrw $15, %k2, %k2
-; CHECK-NEXT: kshiftlw $15, %k1, %k3
+; CHECK-NEXT: kshiftlw $15, %k0, %k3
; CHECK-NEXT: kshiftrw $15, %k3, %k3
-; CHECK-NEXT: kshiftlw $14, %k1, %k1
-; CHECK-NEXT: kshiftrw $15, %k1, %k1
-; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kshiftlw $14, %k0, %k0
+; CHECK-NEXT: kshiftrw $15, %k0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: kmovw %k3, %ecx
; CHECK-NEXT: vmovd %ecx, %xmm2
-; CHECK-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
+; CHECK-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2
; CHECK-NEXT: kmovw %k2, %eax
-; CHECK-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vpinsrd $3, %eax, %xmm2, %xmm2
+; CHECK-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
+; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2
; CHECK-NEXT: vpslld $31, %xmm2, %xmm2
; CHECK-NEXT: vpmovsxdq %xmm2, %ymm2
; CHECK-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
@@ -2942,23 +2942,23 @@ define <4 x i32> @test_maskz_vextracti32x4(<16 x i32> %a, i8 %mask) {
; CHECK-LABEL: test_maskz_vextracti32x4:
; CHECK: ## BB#0:
; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm0
-; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: kshiftlw $12, %k1, %k0
-; CHECK-NEXT: kshiftrw $15, %k0, %k0
-; CHECK-NEXT: kshiftlw $13, %k1, %k2
+; CHECK-NEXT: kmovw %edi, %k0
+; CHECK-NEXT: kshiftlw $12, %k0, %k1
+; CHECK-NEXT: kshiftrw $15, %k1, %k1
+; CHECK-NEXT: kshiftlw $13, %k0, %k2
; CHECK-NEXT: kshiftrw $15, %k2, %k2
-; CHECK-NEXT: kshiftlw $15, %k1, %k3
+; CHECK-NEXT: kshiftlw $15, %k0, %k3
; CHECK-NEXT: kshiftrw $15, %k3, %k3
-; CHECK-NEXT: kshiftlw $14, %k1, %k1
-; CHECK-NEXT: kshiftrw $15, %k1, %k1
-; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: kshiftlw $14, %k0, %k0
+; CHECK-NEXT: kshiftrw $15, %k0, %k0
+; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: kmovw %k3, %ecx
; CHECK-NEXT: vmovd %ecx, %xmm1
-; CHECK-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
+; CHECK-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
; CHECK-NEXT: kmovw %k2, %eax
-; CHECK-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
+; CHECK-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
+; CHECK-NEXT: kmovw %k1, %eax
+; CHECK-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
; CHECK-NEXT: vpslld $31, %xmm1, %xmm1
; CHECK-NEXT: vpsrad $31, %xmm1, %xmm1
; CHECK-NEXT: vpand %xmm0, %xmm1, %xmm0
@@ -2989,9 +2989,9 @@ define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, <
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm2
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT: vaddps %zmm2, %zmm0, %zmm0
+; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm1
+; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.insertf32x4.512(<16 x float> %x0, <4 x float> %x1, i32 1, <16 x float> %x3, i16 %x4)
%res1 = call <16 x float> @llvm.x86.avx512.mask.insertf32x4.512(<16 x float> %x0, <4 x float> %x1, i32 1, <16 x float> %x3, i16 -1)
@@ -3010,9 +3010,9 @@ define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm2
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0
+; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm1
+; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.inserti32x4.512(<16 x i32> %x0, <4 x i32> %x1, i32 1, <16 x i32> %x3, i16 %x4)
%res1 = call <16 x i32> @llvm.x86.avx512.mask.inserti32x4.512(<16 x i32> %x0, <4 x i32> %x1, i32 1, <16 x i32> %x3, i16 -1)
@@ -3030,9 +3030,9 @@ define <8 x double>@test_int_x86_avx512_mask_insertf64x4_512(<8 x double> %x0, <
; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm2
; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT: vaddpd %zmm2, %zmm0, %zmm0
+; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm1
+; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.insertf64x4.512(<8 x double> %x0, <4 x double> %x1, i32 1, <8 x double> %x3, i8 %x4)
%res1 = call <8 x double> @llvm.x86.avx512.mask.insertf64x4.512(<8 x double> %x0, <4 x double> %x1, i32 1, <8 x double> %x3, i8 -1)
@@ -3050,9 +3050,9 @@ define <8 x i64>@test_int_x86_avx512_mask_inserti64x4_512(<8 x i64> %x0, <4 x i6
; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm2 {%k1}
-; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm2
; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 {%k1} {z}
-; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
+; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm1
+; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.inserti64x4.512(<8 x i64> %x0, <4 x i64> %x1, i32 1, <8 x i64> %x3, i8 %x4)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.inserti64x4.512(<8 x i64> %x0, <4 x i64> %x1, i32 1, <8 x i64> %x3, i8 -1)
diff --git a/test/CodeGen/X86/avx512-intrinsics.ll b/test/CodeGen/X86/avx512-intrinsics.ll
index f800d01064ba9..563cad04b8c2d 100644
--- a/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/test/CodeGen/X86/avx512-intrinsics.ll
@@ -112,6 +112,8 @@ define i16 @unpckbw_test(i16 %a0, i16 %a1) {
}
declare i16 @llvm.x86.avx512.kxnor.w(i16, i16) nounwind readnone
+; TODO: the two kxnor instructions here a no op and should be elimintaed,
+; probably by FoldConstantArithmetic in SelectionDAG.
define i16 @test_kxnor(i16 %a0, i16 %a1) {
; CHECK-LABEL: test_kxnor:
; CHECK: ## BB#0:
@@ -121,6 +123,8 @@ define i16 @test_kxnor(i16 %a0, i16 %a1) {
; CHECK-NEXT: kmovw %eax, %k2
; CHECK-NEXT: kxorw %k0, %k1, %k0
; CHECK-NEXT: kxorw %k0, %k2, %k0
+; CHECK-NEXT: kxnorw %k0, %k0, %k1
+; CHECK-NEXT: kxnorw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
@@ -269,16 +273,15 @@ declare <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>, <4 x float>, <4 x
define <4 x float> @test_sqrt_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_sqrt_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm3
; CHECK-NEXT: vsqrtss %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vsqrtss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vaddps %xmm2, %xmm3, %xmm2
-; CHECK-NEXT: vsqrtss {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
+; CHECK-NEXT: vsqrtss {ru-sae}, %xmm1, %xmm0, %xmm4 {%k1} {z}
; CHECK-NEXT: vsqrtss {rz-sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vaddps %xmm2, %xmm3, %xmm1
+; CHECK-NEXT: vaddps %xmm0, %xmm4, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%res0 = call <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask.sqrt.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 1)
@@ -296,16 +299,15 @@ declare <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>, <2 x double>, <
define <2 x double> @test_sqrt_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_sqrt_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm3
; CHECK-NEXT: vsqrtsd %xmm1, %xmm0, %xmm3 {%k1}
; CHECK-NEXT: vsqrtsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vaddpd %xmm2, %xmm3, %xmm2
-; CHECK-NEXT: vsqrtsd {ru-sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
+; CHECK-NEXT: vsqrtsd {ru-sae}, %xmm1, %xmm0, %xmm4 {%k1} {z}
; CHECK-NEXT: vsqrtsd {rz-sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
-; CHECK-NEXT: vaddpd %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vaddpd %xmm2, %xmm3, %xmm1
+; CHECK-NEXT: vaddpd %xmm0, %xmm4, %xmm0
+; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%res0 = call <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.sqrt.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 1)
@@ -477,11 +479,11 @@ declare i64 @llvm.x86.avx512.cvttss2usi64(<4 x float>, i32) nounwind readnone
define i64 @test_x86_avx512_cvtsd2usi64(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtsd2usi64:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtsd2usi %xmm0, %rax
-; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %rcx
-; CHECK-NEXT: addq %rax, %rcx
-; CHECK-NEXT: vcvtsd2usi {rd-sae}, %xmm0, %rax
+; CHECK-NEXT: vcvtsd2usi %xmm0, %rcx
+; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %rax
+; CHECK-NEXT: vcvtsd2usi {rd-sae}, %xmm0, %rdx
; CHECK-NEXT: addq %rcx, %rax
+; CHECK-NEXT: addq %rdx, %rax
; CHECK-NEXT: retq
%res = call i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double> %a0, i32 4)
@@ -496,11 +498,11 @@ declare i64 @llvm.x86.avx512.vcvtsd2usi64(<2 x double>, i32) nounwind readnone
define i64 @test_x86_avx512_cvtsd2si64(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtsd2si64:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtsd2si %xmm0, %rax
-; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %rcx
-; CHECK-NEXT: addq %rax, %rcx
-; CHECK-NEXT: vcvtsd2si {rd-sae}, %xmm0, %rax
+; CHECK-NEXT: vcvtsd2si %xmm0, %rcx
+; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %rax
+; CHECK-NEXT: vcvtsd2si {rd-sae}, %xmm0, %rdx
; CHECK-NEXT: addq %rcx, %rax
+; CHECK-NEXT: addq %rdx, %rax
; CHECK-NEXT: retq
%res = call i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double> %a0, i32 4)
@@ -515,11 +517,11 @@ declare i64 @llvm.x86.avx512.vcvtsd2si64(<2 x double>, i32) nounwind readnone
define i64 @test_x86_avx512_cvtss2usi64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtss2usi64:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtss2usi %xmm0, %rax
-; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %rcx
-; CHECK-NEXT: addq %rax, %rcx
-; CHECK-NEXT: vcvtss2usi {rd-sae}, %xmm0, %rax
+; CHECK-NEXT: vcvtss2usi %xmm0, %rcx
+; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %rax
+; CHECK-NEXT: vcvtss2usi {rd-sae}, %xmm0, %rdx
; CHECK-NEXT: addq %rcx, %rax
+; CHECK-NEXT: addq %rdx, %rax
; CHECK-NEXT: retq
%res = call i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float> %a0, i32 4)
@@ -534,11 +536,11 @@ declare i64 @llvm.x86.avx512.vcvtss2usi64(<4 x float>, i32) nounwind readnone
define i64 @test_x86_avx512_cvtss2si64(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtss2si64:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtss2si %xmm0, %rax
-; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %rcx
-; CHECK-NEXT: addq %rax, %rcx
-; CHECK-NEXT: vcvtss2si {rd-sae}, %xmm0, %rax
+; CHECK-NEXT: vcvtss2si %xmm0, %rcx
+; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %rax
+; CHECK-NEXT: vcvtss2si {rd-sae}, %xmm0, %rdx
; CHECK-NEXT: addq %rcx, %rax
+; CHECK-NEXT: addq %rdx, %rax
; CHECK-NEXT: retq
%res = call i64 @llvm.x86.avx512.vcvtss2si64(<4 x float> %a0, i32 4)
@@ -553,11 +555,11 @@ declare i64 @llvm.x86.avx512.vcvtss2si64(<4 x float>, i32) nounwind readnone
define i32 @test_x86_avx512_cvtsd2usi32(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtsd2usi32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtsd2usi %xmm0, %eax
-; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %ecx
-; CHECK-NEXT: addl %eax, %ecx
-; CHECK-NEXT: vcvtsd2usi {rd-sae}, %xmm0, %eax
+; CHECK-NEXT: vcvtsd2usi %xmm0, %ecx
+; CHECK-NEXT: vcvtsd2usi {rz-sae}, %xmm0, %eax
+; CHECK-NEXT: vcvtsd2usi {rd-sae}, %xmm0, %edx
; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: addl %edx, %eax
; CHECK-NEXT: retq
%res = call i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double> %a0, i32 4)
@@ -572,11 +574,11 @@ declare i32 @llvm.x86.avx512.vcvtsd2usi32(<2 x double>, i32) nounwind readnone
define i32 @test_x86_avx512_cvtsd2si32(<2 x double> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtsd2si32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtsd2si %xmm0, %eax
-; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %ecx
-; CHECK-NEXT: addl %eax, %ecx
-; CHECK-NEXT: vcvtsd2si {rd-sae}, %xmm0, %eax
+; CHECK-NEXT: vcvtsd2si %xmm0, %ecx
+; CHECK-NEXT: vcvtsd2si {rz-sae}, %xmm0, %eax
+; CHECK-NEXT: vcvtsd2si {rd-sae}, %xmm0, %edx
; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: addl %edx, %eax
; CHECK-NEXT: retq
%res = call i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double> %a0, i32 4)
@@ -591,11 +593,11 @@ declare i32 @llvm.x86.avx512.vcvtsd2si32(<2 x double>, i32) nounwind readnone
define i32 @test_x86_avx512_cvtss2usi32(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtss2usi32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtss2usi %xmm0, %eax
-; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %ecx
-; CHECK-NEXT: addl %eax, %ecx
-; CHECK-NEXT: vcvtss2usi {rd-sae}, %xmm0, %eax
+; CHECK-NEXT: vcvtss2usi %xmm0, %ecx
+; CHECK-NEXT: vcvtss2usi {rz-sae}, %xmm0, %eax
+; CHECK-NEXT: vcvtss2usi {rd-sae}, %xmm0, %edx
; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: addl %edx, %eax
; CHECK-NEXT: retq
%res = call i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float> %a0, i32 4)
@@ -610,11 +612,11 @@ declare i32 @llvm.x86.avx512.vcvtss2usi32(<4 x float>, i32) nounwind readnone
define i32 @test_x86_avx512_cvtss2si32(<4 x float> %a0) {
; CHECK-LABEL: test_x86_avx512_cvtss2si32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcvtss2si %xmm0, %eax
-; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %ecx
-; CHECK-NEXT: addl %eax, %ecx
-; CHECK-NEXT: vcvtss2si {rd-sae}, %xmm0, %eax
+; CHECK-NEXT: vcvtss2si %xmm0, %ecx
+; CHECK-NEXT: vcvtss2si {rz-sae}, %xmm0, %eax
+; CHECK-NEXT: vcvtss2si {rd-sae}, %xmm0, %edx
; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: addl %edx, %eax
; CHECK-NEXT: retq
%res = call i32 @llvm.x86.avx512.vcvtss2si32(<4 x float> %a0, i32 4)
@@ -683,9 +685,8 @@ define <16 x i16> @test_x86_vcvtps2ph_256(<16 x float> %a0, <16 x i16> %src, i16
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtps2ph $2, %zmm0, %ymm1 {%k1}
; CHECK-NEXT: vcvtps2ph $2, %zmm0, %ymm2 {%k1} {z}
-; CHECK-NEXT: vpaddw %ymm1, %ymm2, %ymm1
; CHECK-NEXT: vcvtps2ph $2, %zmm0, (%rsi)
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpaddw %ymm1, %ymm2, %ymm0
; CHECK-NEXT: retq
%res1 = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 2, <16 x i16> zeroinitializer, i16 -1)
%res2 = call <16 x i16> @llvm.x86.avx512.mask.vcvtps2ph.512(<16 x float> %a0, i32 2, <16 x i16> zeroinitializer, i16 %mask)
@@ -2215,7 +2216,6 @@ declare <4 x float> @llvm.x86.avx512.mask.add.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_rn:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2227,7 +2227,6 @@ define <4 x float> @test_mask_add_ss_rn(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_rd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2239,7 +2238,6 @@ define <4 x float> @test_mask_add_ss_rd(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_ru:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2251,7 +2249,6 @@ define <4 x float> @test_mask_add_ss_ru(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_rz:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2263,7 +2260,6 @@ define <4 x float> @test_mask_add_ss_rz(<4 x float> %a0, <4 x float> %a1, <4 x f
define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_current:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2275,7 +2271,6 @@ define <4 x float> @test_mask_add_ss_current(<4 x float> %a0, <4 x float> %a1, <
define <4 x float> @test_maskz_add_ss_rn(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_add_ss_rn:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddss {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2295,7 +2290,6 @@ define <4 x float> @test_add_ss_rn(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_ss_current_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
@@ -2312,7 +2306,6 @@ define <4 x float> @test_mask_add_ss_current_memfold(<4 x float> %a0, float* %a1
define <4 x float> @test_maskz_add_ss_current_memfold(<4 x float> %a0, float* %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_add_ss_current_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2330,7 +2323,6 @@ declare <2 x double> @llvm.x86.avx512.mask.add.sd.round(<2 x double>, <2 x doubl
define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_rn:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2342,7 +2334,6 @@ define <2 x double> @test_mask_add_sd_rn(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_rd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {rd-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2354,7 +2345,6 @@ define <2 x double> @test_mask_add_sd_rd(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_ru:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {ru-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2366,7 +2356,6 @@ define <2 x double> @test_mask_add_sd_ru(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_rz:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2378,7 +2367,6 @@ define <2 x double> @test_mask_add_sd_rz(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_current:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2390,7 +2378,6 @@ define <2 x double> @test_mask_add_sd_current(<2 x double> %a0, <2 x double> %a1
define <2 x double> @test_maskz_add_sd_rn(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_add_sd_rn:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vaddsd {rn-sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2410,7 +2397,6 @@ define <2 x double> @test_add_sd_rn(<2 x double> %a0, <2 x double> %a1) {
define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_add_sd_current_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddsd (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
@@ -2425,7 +2411,6 @@ define <2 x double> @test_mask_add_sd_current_memfold(<2 x double> %a0, double*
define <2 x double> @test_maskz_add_sd_current_memfold(<2 x double> %a0, double* %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_add_sd_current_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddsd (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2441,7 +2426,6 @@ declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_ss_sae:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2453,7 +2437,6 @@ define <4 x float> @test_mask_max_ss_sae(<4 x float> %a0, <4 x float> %a1, <4 x
define <4 x float> @test_maskz_max_ss_sae(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_ss_sae:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxss {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2473,7 +2456,6 @@ define <4 x float> @test_max_ss_sae(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxss %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovaps %xmm2, %xmm0
@@ -2485,7 +2467,6 @@ define <4 x float> @test_mask_max_ss(<4 x float> %a0, <4 x float> %a1, <4 x floa
define <4 x float> @test_maskz_max_ss(<4 x float> %a0, <4 x float> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxss %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2505,7 +2486,6 @@ define <4 x float> @test_max_ss(<4 x float> %a0, <4 x float> %a1) {
define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, float* %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_ss_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
@@ -2522,7 +2502,6 @@ define <4 x float> @test_mask_max_ss_memfold(<4 x float> %a0, float* %a1, <4 x f
define <4 x float> @test_maskz_max_ss_memfold(<4 x float> %a0, float* %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_ss_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2539,7 +2518,6 @@ declare <2 x double> @llvm.x86.avx512.mask.max.sd.round(<2 x double>, <2 x doubl
define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_sd_sae:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2551,7 +2529,6 @@ define <2 x double> @test_mask_max_sd_sae(<2 x double> %a0, <2 x double> %a1, <2
define <2 x double> @test_maskz_max_sd_sae(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_sd_sae:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2571,7 +2548,6 @@ define <2 x double> @test_max_sd_sae(<2 x double> %a0, <2 x double> %a1) {
define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxsd %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vmovapd %xmm2, %xmm0
@@ -2583,7 +2559,6 @@ define <2 x double> @test_mask_max_sd(<2 x double> %a0, <2 x double> %a1, <2 x d
define <2 x double> @test_maskz_max_sd(<2 x double> %a0, <2 x double> %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -2603,7 +2578,6 @@ define <2 x double> @test_max_sd(<2 x double> %a0, <2 x double> %a1) {
define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, double* %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_mask_max_sd_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxsd (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovapd %xmm1, %xmm0
@@ -2618,7 +2592,6 @@ define <2 x double> @test_mask_max_sd_memfold(<2 x double> %a0, double* %a1, <2
define <2 x double> @test_maskz_max_sd_memfold(<2 x double> %a0, double* %a1, i8 %mask) {
; CHECK-LABEL: test_maskz_max_sd_memfold:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxsd (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -3652,16 +3625,15 @@ declare <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>, <4 x float>, <4
define <4 x float> @test_getexp_ss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) {
; CHECK-LABEL: test_getexp_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm3
; CHECK-NEXT: vgetexpss %xmm1, %xmm0, %xmm3 {%k1}
+; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm4 {%k1} {z}
+; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm5
; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vaddps %xmm2, %xmm3, %xmm2
-; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm3 {%k1} {z}
-; CHECK-NEXT: vgetexpss {sae}, %xmm1, %xmm0, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vaddps %xmm2, %xmm3, %xmm0
+; CHECK-NEXT: vaddps %xmm5, %xmm4, %xmm1
+; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res0 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask.getexp.ss(<4 x float>%a0, <4 x float> %a1, <4 x float> %a2, i8 %mask, i32 8)
@@ -3679,16 +3651,15 @@ declare <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>, <2 x double>,
define <2 x double> @test_getexp_sd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) {
; CHECK-LABEL: test_getexp_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovapd %xmm2, %xmm3
-; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vmovapd %xmm2, %xmm4
+; CHECK-NEXT: vgetexpsd %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm5 {%k1} {z}
; CHECK-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vaddpd %xmm2, %xmm3, %xmm2
-; CHECK-NEXT: vgetexpsd {sae}, %xmm1, %xmm0, %xmm0 {%k1} {z}
-; CHECK-NEXT: vaddpd %xmm4, %xmm0, %xmm0
-; CHECK-NEXT: vaddpd %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vaddpd %xmm2, %xmm4, %xmm0
+; CHECK-NEXT: vaddpd %xmm3, %xmm5, %xmm1
+; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res0 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.getexp.sd(<2 x double>%a0, <2 x double> %a1, <2 x double> %a2, i8 %mask, i32 8)
@@ -3706,11 +3677,9 @@ declare i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double>, <2 x double>, i32, i8, i32
define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
@@ -3721,18 +3690,18 @@ define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8
define i8@test_int_x86_avx512_mask_cmp_sd_all(<2 x double> %x0, <2 x double> %x1, i8 %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_sd_all:
; CHECK: ## BB#0:
+; CHECK-NEXT: vcmplesd %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %ecx
; CHECK-NEXT: vcmpunordsd {sae}, %xmm1, %xmm0, %k0
-; CHECK-NEXT: vcmplesd %xmm1, %xmm0, %k1
-; CHECK-NEXT: korw %k0, %k1, %k0
-; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k1
-; CHECK-NEXT: vcmpneqsd %xmm1, %xmm0, %k2
-; CHECK-NEXT: korw %k1, %k2, %k1
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k2
-; CHECK-NEXT: kandw %k2, %k1, %k1
-; CHECK-NEXT: korw %k1, %k0, %k0
+; CHECK-NEXT: kmovw %k0, %edx
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vcmpneqsd %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %esi
+; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
+; CHECK-NEXT: orb %cl, %dl
+; CHECK-NEXT: orb %sil, %al
+; CHECK-NEXT: orb %dl, %al
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
@@ -3752,11 +3721,9 @@ declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32)
define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpunordss %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
@@ -3768,17 +3735,17 @@ define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %
define i8@test_int_x86_avx512_mask_cmp_ss_all(<4 x float> %x0, <4 x float> %x1, i8 %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss_all:
; CHECK: ## BB#0:
-; CHECK-NEXT: vcmpless %xmm1, %xmm0, %k1
-; CHECK-NEXT: vcmpunordss {sae}, %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: vcmpless %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %ecx
+; CHECK-NEXT: vcmpunordss {sae}, %xmm1, %xmm0, %k0
+; CHECK-NEXT: kmovw %k0, %edx
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vcmpneqss %xmm1, %xmm0, %k2 {%k1}
-; CHECK-NEXT: kmovw %k2, %ecx
-; CHECK-NEXT: vcmpnltss {sae}, %xmm1, %xmm0, %k1 {%k1}
-; CHECK-NEXT: kmovw %k1, %edx
-; CHECK-NEXT: andl $1, %edx
+; CHECK-NEXT: vcmpneqss %xmm1, %xmm0, %k0 {%k1}
+; CHECK-NEXT: kmovw %k0, %esi
+; CHECK-NEXT: vcmpnltss {sae}, %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andb %cl, %al
+; CHECK-NEXT: andb %cl, %dl
+; CHECK-NEXT: andb %sil, %al
; CHECK-NEXT: andb %dl, %al
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
@@ -3899,16 +3866,15 @@ declare <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double>, <2 x double>
define <2 x double>@test_int_x86_avx512_mask_getmant_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_getmant_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vmovapd %xmm2, %xmm3
-; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm4 {%k1} {z}
-; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm3
-; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vmovapd %xmm2, %xmm4
+; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT: vgetmantsd $11, %xmm1, %xmm0, %xmm5 {%k1} {z}
; CHECK-NEXT: vgetmantsd $11, {sae}, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vaddpd %xmm4, %xmm2, %xmm0
-; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vaddpd %xmm5, %xmm4, %xmm0
+; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm1
+; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double> %x0, <2 x double> %x1, i32 11, <2 x double> %x2, i8 %x3, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.getmant.sd(<2 x double> %x0, <2 x double> %x1, i32 11, <2 x double> zeroinitializer, i8 %x3, i32 4)
@@ -3925,15 +3891,14 @@ declare <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float>, <4 x float>, i
define <4 x float>@test_int_x86_avx512_mask_getmant_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_getmant_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm2 {%k1}
-; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm3 {%k1} {z}
-; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm2
-; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm4 {%k1} {z}
; CHECK-NEXT: vgetmantss $11, {sae}, %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vaddps %xmm4, %xmm2, %xmm1
; CHECK-NEXT: vaddps %xmm3, %xmm0, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> %x2, i8 %x3, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> zeroinitializer, i8 %x3, i32 4)
@@ -4057,7 +4022,6 @@ declare <2 x double> @llvm.x86.avx512.mask.cvtss2sd.round(<2 x double>, <4 x flo
define <2 x double>@test_int_x86_avx512_mask_cvt_ss2sd_round(<2 x double> %x0,<4 x float> %x1, <2 x double> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_ss2sd_round:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtss2sd %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vcvtss2sd {sae}, %xmm1, %xmm0, %xmm0
@@ -4074,7 +4038,6 @@ declare <4 x float> @llvm.x86.avx512.mask.cvtsd2ss.round(<4 x float>, <2 x doubl
define <4 x float>@test_int_x86_avx512_mask_cvt_sd2ss_round(<4 x float> %x0,<2 x double> %x1, <4 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_sd2ss_round:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcvtsd2ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vcvtsd2ss {rn-sae}, %xmm1, %xmm0, %xmm0
@@ -4435,8 +4398,8 @@ define <16 x i32>@test_int_x86_avx512_mask_prol_d_512(<16 x i32> %x0, i32 %x1, <
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vprold $3, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vprold $3, %zmm0, %zmm2 {%k1} {z}
-; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vprold $3, %zmm0, %zmm0
+; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.prol.d.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 %x3)
@@ -4455,8 +4418,8 @@ define <8 x i64>@test_int_x86_avx512_mask_prol_q_512(<8 x i64> %x0, i32 %x1, <8
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vprolq $3, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vprolq $3, %zmm0, %zmm2 {%k1} {z}
-; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vprolq $3, %zmm0, %zmm0
+; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.prol.q.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 %x3)
@@ -4557,9 +4520,9 @@ define <8 x double>@test_int_x86_avx512_mask_fixupimm_pd_512(<8 x double> %x0, <
; CHECK-NEXT: vfixupimmpd $4, %zmm2, %zmm1, %zmm3 {%k1}
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vfixupimmpd $5, %zmm2, %zmm1, %zmm4 {%k1} {z}
-; CHECK-NEXT: vaddpd %zmm4, %zmm3, %zmm3
; CHECK-NEXT: vfixupimmpd $3, {sae}, %zmm2, %zmm1, %zmm0
-; CHECK-NEXT: vaddpd %zmm0, %zmm3, %zmm0
+; CHECK-NEXT: vaddpd %zmm4, %zmm3, %zmm1
+; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 4, i8 %x4, i32 4)
%res1 = call <8 x double> @llvm.x86.avx512.mask.fixupimm.pd.512(<8 x double> zeroinitializer, <8 x double> %x1, <8 x i64> %x2, i32 5, i8 %x4, i32 4)
@@ -4580,9 +4543,9 @@ define <8 x double>@test_int_x86_avx512_maskz_fixupimm_pd_512(<8 x double> %x0,
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vmovapd %zmm0, %zmm5
; CHECK-NEXT: vfixupimmpd $5, %zmm4, %zmm1, %zmm5 {%k1} {z}
-; CHECK-NEXT: vaddpd %zmm5, %zmm3, %zmm3
; CHECK-NEXT: vfixupimmpd $2, {sae}, %zmm2, %zmm1, %zmm0
-; CHECK-NEXT: vaddpd %zmm0, %zmm3, %zmm0
+; CHECK-NEXT: vaddpd %zmm5, %zmm3, %zmm1
+; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> %x2, i32 3, i8 %x4, i32 4)
%res1 = call <8 x double> @llvm.x86.avx512.maskz.fixupimm.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x i64> zeroinitializer, i32 5, i8 %x4, i32 4)
@@ -4597,16 +4560,15 @@ declare <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm3
; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1}
; CHECK-NEXT: vxorps %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vmovaps %xmm0, %xmm5
; CHECK-NEXT: vfixupimmss $5, %xmm4, %xmm1, %xmm5 {%k1}
-; CHECK-NEXT: vaddps %xmm5, %xmm3, %xmm3
; CHECK-NEXT: vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vaddps %xmm5, %xmm3, %xmm1
+; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 %x4, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> zeroinitializer, i32 5, i8 %x4, i32 4)
@@ -4621,16 +4583,15 @@ declare <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_maskz_fixupimm_ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm3
-; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3 {%k1} {z}
+; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm4
-; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm4
+; CHECK-NEXT: vfixupimmss $5, %xmm2, %xmm1, %xmm4 {%k1} {z}
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vfixupimmss $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
-; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
-; CHECK-NEXT: vaddps %xmm4, %xmm0, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm4, %xmm0
+; CHECK-NEXT: vaddps %xmm3, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> %x2, i32 5, i8 %x4, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.maskz.fixupimm.ss(<4 x float> %x0, <4 x float> %x1, <4 x i32> zeroinitializer, i32 5, i8 %x4, i32 8)
@@ -4651,9 +4612,9 @@ define <16 x float>@test_int_x86_avx512_mask_fixupimm_ps_512(<16 x float> %x0, <
; CHECK-NEXT: vpxord %zmm4, %zmm4, %zmm4
; CHECK-NEXT: vmovaps %zmm0, %zmm5
; CHECK-NEXT: vfixupimmps $5, %zmm4, %zmm1, %zmm5 {%k1}
-; CHECK-NEXT: vaddps %zmm5, %zmm3, %zmm3
; CHECK-NEXT: vfixupimmps $5, {sae}, %zmm2, %zmm1, %zmm0
-; CHECK-NEXT: vaddps %zmm0, %zmm3, %zmm0
+; CHECK-NEXT: vaddps %zmm5, %zmm3, %zmm1
+; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> %x2, i32 5, i16 %x4, i32 4)
%res1 = call <16 x float> @llvm.x86.avx512.mask.fixupimm.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x i32> zeroinitializer, i32 5, i16 %x4, i32 4)
@@ -4691,16 +4652,15 @@ declare <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double>, <2 x double
define <2 x double>@test_int_x86_avx512_mask_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_fixupimm_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm3
-; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1}
+; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm4
-; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm4
+; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm4 {%k1}
; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
-; CHECK-NEXT: vaddpd %xmm4, %xmm0, %xmm0
+; CHECK-NEXT: vaddpd %xmm0, %xmm4, %xmm0
+; CHECK-NEXT: vaddpd %xmm3, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 %x4, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> zeroinitializer, i32 5, i8 %x4, i32 8)
@@ -4715,16 +4675,15 @@ declare <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double>, <2 x doubl
define <2 x double>@test_int_x86_avx512_maskz_fixupimm_sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_fixupimm_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm3
; CHECK-NEXT: vfixupimmsd $5, %xmm2, %xmm1, %xmm3 {%k1} {z}
; CHECK-NEXT: vxorpd %xmm4, %xmm4, %xmm4
; CHECK-NEXT: vmovapd %xmm0, %xmm5
; CHECK-NEXT: vfixupimmsd $5, {sae}, %xmm4, %xmm1, %xmm5 {%k1} {z}
-; CHECK-NEXT: vaddpd %xmm5, %xmm3, %xmm3
; CHECK-NEXT: vfixupimmsd $5, {sae}, %xmm2, %xmm1, %xmm0 {%k1} {z}
-; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vaddpd %xmm5, %xmm3, %xmm1
+; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> %x2, i32 5, i8 %x4, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.maskz.fixupimm.sd(<2 x double> %x0, <2 x double> %x1, <2 x i64> zeroinitializer, i32 5, i8 %x4, i32 8)
@@ -4816,18 +4775,17 @@ declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double>, <2 x double>,
define <2 x double>@test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm3
-; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm3 {%k1}
-; CHECK-NEXT: vmovapd %xmm0, %xmm4
-; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm4
-; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm4
-; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm4 {%k1}
-; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0
-; CHECK-NEXT: vaddpd %xmm4, %xmm0, %xmm0
-; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm4 {%k1}
+; CHECK-NEXT: vmovapd %xmm0, %xmm5
+; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm5
+; CHECK-NEXT: vfmadd213sd {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm1
+; CHECK-NEXT: vaddpd %xmm0, %xmm5, %xmm0
+; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 4)
@@ -4844,18 +4802,17 @@ declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float>, <4 x float>, <4
define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm3
-; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm3 {%k1}
-; CHECK-NEXT: vmovaps %xmm0, %xmm4
-; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm4
-; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm0, %xmm4
-; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm4 {%k1}
-; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm0
-; CHECK-NEXT: vaddps %xmm4, %xmm0, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm4 {%k1}
+; CHECK-NEXT: vmovaps %xmm0, %xmm5
+; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm5
+; CHECK-NEXT: vfmadd213ss {rz-sae}, %xmm2, %xmm1, %xmm0 {%k1}
+; CHECK-NEXT: vaddps %xmm4, %xmm3, %xmm1
+; CHECK-NEXT: vaddps %xmm0, %xmm5, %xmm0
+; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4)
@@ -4872,7 +4829,6 @@ declare <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double>, <2 x double>
define <2 x double>@test_int_x86_avx512_maskz_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm0, %xmm3
; CHECK-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm3 {%k1} {z}
@@ -4890,7 +4846,6 @@ declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, <
define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -4904,18 +4859,17 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double>, <2 x double>
define <2 x double>@test_int_x86_avx512_mask3_vfmadd_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm3
-; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT: vmovapd %xmm2, %xmm4
-; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm4
-; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1}
-; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddpd %xmm4, %xmm2, %xmm0
-; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vfmadd231sd %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT: vmovapd %xmm2, %xmm5
+; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vfmadd231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm0
+; CHECK-NEXT: vaddpd %xmm2, %xmm5, %xmm1
+; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 4)
@@ -4932,18 +4886,17 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float>, <4 x float>, <
define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm3
-; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT: vmovaps %xmm2, %xmm4
-; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm4
-; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1}
-; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddps %xmm4, %xmm2, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vfmadd231ss %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT: vmovaps %xmm2, %xmm5
+; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vfmadd231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vaddps %xmm4, %xmm3, %xmm0
+; CHECK-NEXT: vaddps %xmm2, %xmm5, %xmm1
+; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4)
@@ -4959,7 +4912,6 @@ define void @fmadd_ss_mask_memfold(float* %a, float* %b, i8 %c) {
; CHECK-LABEL: fmadd_ss_mask_memfold:
; CHECK: ## BB#0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vfmadd132ss (%rsi), %xmm0, %xmm0 {%k1}
; CHECK-NEXT: vmovss %xmm0, (%rdi)
@@ -4987,7 +4939,6 @@ define void @fmadd_ss_maskz_memfold(float* %a, float* %b, i8 %c) {
; CHECK-LABEL: fmadd_ss_maskz_memfold:
; CHECK: ## BB#0:
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vfmadd132ss (%rsi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vmovss %xmm0, (%rdi)
@@ -5015,7 +4966,6 @@ define void @fmadd_sd_mask_memfold(double* %a, double* %b, i8 %c) {
; CHECK-LABEL: fmadd_sd_mask_memfold:
; CHECK: ## BB#0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vfmadd132sd (%rsi), %xmm0, %xmm0 {%k1}
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
@@ -5039,7 +4989,6 @@ define void @fmadd_sd_maskz_memfold(double* %a, double* %b, i8 %c) {
; CHECK-LABEL: fmadd_sd_maskz_memfold:
; CHECK: ## BB#0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: kmovw %edx, %k1
; CHECK-NEXT: vfmadd132sd (%rsi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vmovlpd %xmm0, (%rdi)
@@ -5064,18 +5013,17 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double>, <2 x double>
define <2 x double>@test_int_x86_avx512_mask3_vfmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm3
-; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT: vmovapd %xmm2, %xmm4
-; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm4
-; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1}
-; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddpd %xmm4, %xmm2, %xmm0
-; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vfmsub231sd %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT: vmovapd %xmm2, %xmm5
+; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vfmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm0
+; CHECK-NEXT: vaddpd %xmm2, %xmm5, %xmm1
+; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 4)
@@ -5092,18 +5040,17 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float>, <4 x float>, <
define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm3
-; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT: vmovaps %xmm2, %xmm4
-; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm4
-; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1}
-; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddps %xmm4, %xmm2, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vfmsub231ss %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT: vmovaps %xmm2, %xmm5
+; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vfmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vaddps %xmm4, %xmm3, %xmm0
+; CHECK-NEXT: vaddps %xmm2, %xmm5, %xmm1
+; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4)
@@ -5120,18 +5067,17 @@ declare <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double>, <2 x double
define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm3
-; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT: vmovapd %xmm2, %xmm4
-; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vaddpd %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovapd %xmm2, %xmm4
-; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1}
-; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddpd %xmm4, %xmm2, %xmm0
-; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vfnmsub231sd %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT: vmovapd %xmm2, %xmm5
+; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vfnmsub231sd {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm0
+; CHECK-NEXT: vaddpd %xmm2, %xmm5, %xmm1
+; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1, i32 4)
%res1 = call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3, i32 4)
@@ -5148,18 +5094,17 @@ declare <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float>, <4 x float>,
define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3,i32 %x4 ){
; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm3
-; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm3 {%k1}
-; CHECK-NEXT: vmovaps %xmm2, %xmm4
-; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm4
-; CHECK-NEXT: vaddps %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovaps %xmm2, %xmm4
-; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm4 {%k1}
-; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2
-; CHECK-NEXT: vaddps %xmm4, %xmm2, %xmm0
-; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
+; CHECK-NEXT: vfnmsub231ss %xmm1, %xmm0, %xmm4 {%k1}
+; CHECK-NEXT: vmovaps %xmm2, %xmm5
+; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm5
+; CHECK-NEXT: vfnmsub231ss {rz-sae}, %xmm1, %xmm0, %xmm2 {%k1}
+; CHECK-NEXT: vaddps %xmm4, %xmm3, %xmm0
+; CHECK-NEXT: vaddps %xmm2, %xmm5, %xmm1
+; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1, i32 4)
%res1 = call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3, i32 4)
@@ -5174,7 +5119,6 @@ define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ss(<4 x float> %x0, <4 x fl
define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1, float *%ptr_b ,i8 %x3,i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ss_rm:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vfmadd231ss (%rdi), %xmm0, %xmm1 {%k1}
; CHECK-NEXT: vmovaps %xmm1, %xmm0
@@ -5188,7 +5132,6 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ss_rm(<4 x float> %x0, <4 x
define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,float *%ptr_b ,i8 %x3,i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ss_rm:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vfmadd132ss (%rdi), %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -5202,7 +5145,8 @@ define <4 x float>@test_int_x86_avx512_mask_vfmadd_ss_rm(<4 x float> %x0, <4 x f
define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ss_rm(<4 x float> %x0, <4 x float> %x1,float *%ptr_b ,i8 %x3,i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ss_rm:
; CHECK: ## BB#0:
-; CHECK-NEXT: kxorw %k0, %k0, %k1
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss (%rdi), %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%q = load float, float* %ptr_b
diff --git a/test/CodeGen/X86/avx512-load-store.ll b/test/CodeGen/X86/avx512-load-store.ll
index 3295c66c6d420..4fd985bf24cdf 100644
--- a/test/CodeGen/X86/avx512-load-store.ll
+++ b/test/CodeGen/X86/avx512-load-store.ll
@@ -12,7 +12,7 @@ define <4 x float> @test_mm_mask_move_ss(<4 x float> %__W, i8 zeroext %__U, <4 x
; CHECK32-LABEL: test_mm_mask_move_ss:
; CHECK32: # BB#0: # %entry
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT: andl $1, %eax
+; CHECK32-NEXT: andb $1, %al
; CHECK32-NEXT: kmovw %eax, %k1
; CHECK32-NEXT: vmovss %xmm2, %xmm0, %xmm0 {%k1}
; CHECK32-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
@@ -37,7 +37,7 @@ define <4 x float> @test_mm_maskz_move_ss(i8 zeroext %__U, <4 x float> %__A, <4
; CHECK32-LABEL: test_mm_maskz_move_ss:
; CHECK32: # BB#0: # %entry
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT: andl $1, %eax
+; CHECK32-NEXT: andb $1, %al
; CHECK32-NEXT: kmovw %eax, %k1
; CHECK32-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK32-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1}
@@ -62,7 +62,7 @@ define <2 x double> @test_mm_mask_move_sd(<2 x double> %__W, i8 zeroext %__U, <2
; CHECK32-LABEL: test_mm_mask_move_sd:
; CHECK32: # BB#0: # %entry
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT: andl $1, %eax
+; CHECK32-NEXT: andb $1, %al
; CHECK32-NEXT: kmovw %eax, %k1
; CHECK32-NEXT: vmovsd %xmm2, %xmm0, %xmm0 {%k1}
; CHECK32-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
@@ -87,7 +87,7 @@ define <2 x double> @test_mm_maskz_move_sd(i8 zeroext %__U, <2 x double> %__A, <
; CHECK32-LABEL: test_mm_maskz_move_sd:
; CHECK32: # BB#0: # %entry
; CHECK32-NEXT: movb {{[0-9]+}}(%esp), %al
-; CHECK32-NEXT: andl $1, %eax
+; CHECK32-NEXT: andb $1, %al
; CHECK32-NEXT: kmovw %eax, %k1
; CHECK32-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; CHECK32-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1}
diff --git a/test/CodeGen/X86/avx512-mask-bugfix.ll b/test/CodeGen/X86/avx512-mask-bugfix.ll
deleted file mode 100755
index 1940680f1c108..0000000000000
--- a/test/CodeGen/X86/avx512-mask-bugfix.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
-
-; ModuleID = 'foo.ll'
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-; Function Attrs: nounwind readnone
-declare i32 @llvm.x86.avx.movmsk.ps.256(<8 x float>) #0
-
-; Function Attrs: nounwind readnone
-declare i64 @llvm.cttz.i64(i64, i1) #0
-
-; Function Attrs: nounwind
-define void @foo(float* noalias %aFOO, float %b, i32 %a) {
-allocas:
- %full_mask_memory.i57 = alloca <8 x float>
- %return_value_memory.i60 = alloca i1
- %cmp.i = icmp eq i32 %a, 65535
- br i1 %cmp.i, label %all_on, label %some_on
-
-all_on:
- %mask0 = load <8 x float>, <8 x float>* %full_mask_memory.i57
- %v0.i.i.i70 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %mask0) #0
- %allon.i.i76 = icmp eq i32 %v0.i.i.i70, 65535
- br i1 %allon.i.i76, label %check_neighbors.i.i121, label %domixed.i.i100
-
-domixed.i.i100:
- br label %check_neighbors.i.i121
-
-check_neighbors.i.i121:
- %v1.i5.i.i116 = call i32 @llvm.x86.avx.movmsk.ps.256(<8 x float> %mask0) #0
- %alleq.i.i120 = icmp eq i32 %v1.i5.i.i116, 65535
- br i1 %alleq.i.i120, label %all_equal.i.i123, label %not_all_equal.i.i124
-
-; CHECK: kxnorw %k0, %k0, %k0
-; CHECK: kshiftrw $15, %k0, %k0
-; CHECK: jmp
-; CHECK: kxorw %k0, %k0, %k0
-
-all_equal.i.i123:
- br label %reduce_equal___vyi.exit128
-
-not_all_equal.i.i124:
- br label %reduce_equal___vyi.exit128
-
-reduce_equal___vyi.exit128:
- %calltmp2.i125 = phi i1 [ true, %all_equal.i.i123 ], [ false, %not_all_equal.i.i124 ]
- store i1 %calltmp2.i125, i1* %return_value_memory.i60
- %return_value.i126 = load i1, i1* %return_value_memory.i60
- %. = select i1 %return_value.i126, i32 1, i32 0
- %select_to_float = sitofp i32 %. to float
- ret void
-
-some_on:
- ret void
-}
-
diff --git a/test/CodeGen/X86/avx512-mask-op.ll b/test/CodeGen/X86/avx512-mask-op.ll
index 7103efe050a49..01153a9e45f77 100644
--- a/test/CodeGen/X86/avx512-mask-op.ll
+++ b/test/CodeGen/X86/avx512-mask-op.ll
@@ -418,7 +418,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; KNL-NEXT: kshiftlw $10, %k0, %k0
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: andl $1, %eax
+; KNL-NEXT: andb $1, %al
; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; KNL-NEXT: retq
;
@@ -428,7 +428,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; SKX-NEXT: kshiftlw $10, %k0, %k0
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: andl $1, %eax
+; SKX-NEXT: andb $1, %al
; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -439,7 +439,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; AVX512BW-NEXT: kshiftlw $10, %k0, %k0
; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: andl $1, %eax
+; AVX512BW-NEXT: andb $1, %al
; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -450,7 +450,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; AVX512DQ-NEXT: kshiftlw $10, %k0, %k0
; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: andl $1, %eax
+; AVX512DQ-NEXT: andb $1, %al
; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
@@ -965,8 +965,8 @@ define <64 x i8> @test16(i64 %x) {
; SKX-LABEL: test16:
; SKX: ## BB#0:
; SKX-NEXT: kmovq %rdi, %k0
-; SKX-NEXT: kxnorw %k0, %k0, %k1
-; SKX-NEXT: kshiftrw $15, %k1, %k1
+; SKX-NEXT: movb $1, %al
+; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpmovm2b %k1, %zmm0
; SKX-NEXT: vpsllq $40, %xmm0, %xmm0
; SKX-NEXT: vpmovm2b %k0, %zmm1
@@ -981,8 +981,8 @@ define <64 x i8> @test16(i64 %x) {
; AVX512BW-LABEL: test16:
; AVX512BW: ## BB#0:
; AVX512BW-NEXT: kmovq %rdi, %k0
-; AVX512BW-NEXT: kxnorw %k0, %k0, %k1
-; AVX512BW-NEXT: kshiftrw $15, %k1, %k1
+; AVX512BW-NEXT: movb $1, %al
+; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpmovm2b %k1, %zmm0
; AVX512BW-NEXT: vpsllq $40, %xmm0, %xmm0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm1
@@ -1085,7 +1085,6 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
; SKX-NEXT: kmovq %rdi, %k0
; SKX-NEXT: cmpl %edx, %esi
; SKX-NEXT: setg %al
-; SKX-NEXT: andl $1, %eax
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpmovm2b %k1, %zmm0
; SKX-NEXT: vpsllq $40, %xmm0, %xmm0
@@ -1103,7 +1102,6 @@ define <64 x i8> @test17(i64 %x, i32 %y, i32 %z) {
; AVX512BW-NEXT: kmovq %rdi, %k0
; AVX512BW-NEXT: cmpl %edx, %esi
; AVX512BW-NEXT: setg %al
-; AVX512BW-NEXT: andl $1, %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpmovm2b %k1, %zmm0
; AVX512BW-NEXT: vpsllq $40, %xmm0, %xmm0
@@ -1166,21 +1164,25 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; KNL-LABEL: test18:
; KNL: ## BB#0:
; KNL-NEXT: kmovw %edi, %k1
-; KNL-NEXT: kmovw %esi, %k2
-; KNL-NEXT: kshiftlw $7, %k2, %k0
-; KNL-NEXT: kshiftrw $15, %k0, %k0
-; KNL-NEXT: kshiftlw $6, %k2, %k2
+; KNL-NEXT: kmovw %esi, %k0
+; KNL-NEXT: kshiftlw $7, %k0, %k2
; KNL-NEXT: kshiftrw $15, %k2, %k2
+; KNL-NEXT: kmovw %k2, %eax
+; KNL-NEXT: kshiftlw $6, %k0, %k0
+; KNL-NEXT: kshiftrw $15, %k0, %k0
+; KNL-NEXT: kmovw %k0, %ecx
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; KNL-NEXT: kmovw %ecx, %k1
+; KNL-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; KNL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; KNL-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
-; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
-; KNL-NEXT: kshiftlw $1, %k1, %k1
-; KNL-NEXT: kshiftrw $1, %k1, %k1
-; KNL-NEXT: kshiftlw $7, %k0, %k0
-; KNL-NEXT: korw %k0, %k1, %k1
+; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
+; KNL-NEXT: kshiftlw $1, %k0, %k0
+; KNL-NEXT: kshiftrw $1, %k0, %k0
+; KNL-NEXT: kmovw %eax, %k1
+; KNL-NEXT: kshiftlw $7, %k1, %k1
+; KNL-NEXT: korw %k1, %k0, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqw %zmm0, %xmm0
; KNL-NEXT: retq
@@ -1191,16 +1193,20 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kshiftlw $7, %k1, %k2
; SKX-NEXT: kshiftrw $15, %k2, %k2
+; SKX-NEXT: kmovd %k2, %eax
; SKX-NEXT: kshiftlw $6, %k1, %k1
; SKX-NEXT: kshiftrw $15, %k1, %k1
+; SKX-NEXT: kmovd %k1, %ecx
; SKX-NEXT: vpmovm2q %k0, %zmm0
-; SKX-NEXT: vpmovm2q %k1, %zmm1
+; SKX-NEXT: kmovd %ecx, %k0
+; SKX-NEXT: vpmovm2q %k0, %zmm1
; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; SKX-NEXT: vpmovq2m %zmm2, %k0
; SKX-NEXT: kshiftlb $1, %k0, %k0
; SKX-NEXT: kshiftrb $1, %k0, %k0
-; SKX-NEXT: kshiftlb $7, %k2, %k1
+; SKX-NEXT: kmovd %eax, %k1
+; SKX-NEXT: kshiftlb $7, %k1, %k1
; SKX-NEXT: korb %k1, %k0, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
; SKX-NEXT: vzeroupper
@@ -1209,21 +1215,25 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; AVX512BW-LABEL: test18:
; AVX512BW: ## BB#0:
; AVX512BW-NEXT: kmovd %edi, %k1
-; AVX512BW-NEXT: kmovd %esi, %k2
-; AVX512BW-NEXT: kshiftlw $7, %k2, %k0
-; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
-; AVX512BW-NEXT: kshiftlw $6, %k2, %k2
+; AVX512BW-NEXT: kmovd %esi, %k0
+; AVX512BW-NEXT: kshiftlw $7, %k0, %k2
; AVX512BW-NEXT: kshiftrw $15, %k2, %k2
+; AVX512BW-NEXT: kmovd %k2, %eax
+; AVX512BW-NEXT: kshiftlw $6, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
+; AVX512BW-NEXT: kmovd %k0, %ecx
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k2} {z}
+; AVX512BW-NEXT: kmovd %ecx, %k1
+; AVX512BW-NEXT: vpternlogq $255, %zmm1, %zmm1, %zmm1 {%k1} {z}
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; AVX512BW-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512BW-NEXT: vpsllq $63, %zmm2, %zmm0
-; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k1
-; AVX512BW-NEXT: kshiftlw $1, %k1, %k1
-; AVX512BW-NEXT: kshiftrw $1, %k1, %k1
-; AVX512BW-NEXT: kshiftlw $7, %k0, %k0
-; AVX512BW-NEXT: korw %k0, %k1, %k0
+; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
+; AVX512BW-NEXT: kshiftlw $1, %k0, %k0
+; AVX512BW-NEXT: kshiftrw $1, %k0, %k0
+; AVX512BW-NEXT: kmovd %eax, %k1
+; AVX512BW-NEXT: kshiftlw $7, %k1, %k1
+; AVX512BW-NEXT: korw %k1, %k0, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512BW-NEXT: vzeroupper
@@ -1235,16 +1245,20 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kshiftlw $7, %k1, %k2
; AVX512DQ-NEXT: kshiftrw $15, %k2, %k2
+; AVX512DQ-NEXT: kmovw %k2, %eax
; AVX512DQ-NEXT: kshiftlw $6, %k1, %k1
; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
+; AVX512DQ-NEXT: kmovw %k1, %ecx
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
-; AVX512DQ-NEXT: vpmovm2q %k1, %zmm1
+; AVX512DQ-NEXT: kmovw %ecx, %k0
+; AVX512DQ-NEXT: vpmovm2q %k0, %zmm1
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512DQ-NEXT: vpmovq2m %zmm2, %k0
; AVX512DQ-NEXT: kshiftlb $1, %k0, %k0
; AVX512DQ-NEXT: kshiftrb $1, %k0, %k0
-; AVX512DQ-NEXT: kshiftlb $7, %k2, %k1
+; AVX512DQ-NEXT: kmovw %eax, %k1
+; AVX512DQ-NEXT: kshiftlb $7, %k1, %k1
; AVX512DQ-NEXT: korb %k1, %k0, %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
@@ -1383,10 +1397,8 @@ define void @test23(<2 x i1> %a, <2 x i1>* %addr) {
define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
; KNL-LABEL: store_v1i1:
; KNL: ## BB#0:
-; KNL-NEXT: andl $1, %edi
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kxnorw %k0, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: movb %al, (%rsi)
@@ -1394,20 +1406,16 @@ define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
;
; SKX-LABEL: store_v1i1:
; SKX: ## BB#0:
-; SKX-NEXT: andl $1, %edi
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kxnorw %k0, %k0, %k1
-; SKX-NEXT: kshiftrw $15, %k1, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: kmovb %k0, (%rsi)
; SKX-NEXT: retq
;
; AVX512BW-LABEL: store_v1i1:
; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: andl $1, %edi
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kxnorw %k0, %k0, %k1
-; AVX512BW-NEXT: kshiftrw $15, %k1, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movb %al, (%rsi)
@@ -1415,10 +1423,8 @@ define void @store_v1i1(<1 x i1> %c , <1 x i1>* %ptr) {
;
; AVX512DQ-LABEL: store_v1i1:
; AVX512DQ: ## BB#0:
-; AVX512DQ-NEXT: andl $1, %edi
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: kxnorw %k0, %k0, %k1
-; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
; AVX512DQ-NEXT: kmovb %k0, (%rsi)
; AVX512DQ-NEXT: retq
@@ -1613,59 +1619,14 @@ define void @store_v16i1(<16 x i1> %c , <16 x i1>* %ptr) {
@f1.v = internal unnamed_addr global i1 false, align 4
define void @f1(i32 %c) {
-; KNL-LABEL: f1:
-; KNL: ## BB#0: ## %entry
-; KNL-NEXT: movzbl {{.*}}(%rip), %edi
-; KNL-NEXT: movl %edi, %eax
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k0
-; KNL-NEXT: kxnorw %k0, %k0, %k1
-; KNL-NEXT: kshiftrw $15, %k1, %k1
-; KNL-NEXT: kxorw %k1, %k0, %k0
-; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: movb %al, {{.*}}(%rip)
-; KNL-NEXT: xorl $1, %edi
-; KNL-NEXT: jmp _f2 ## TAILCALL
-;
-; SKX-LABEL: f1:
-; SKX: ## BB#0: ## %entry
-; SKX-NEXT: movzbl {{.*}}(%rip), %edi
-; SKX-NEXT: movl %edi, %eax
-; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: kmovd %eax, %k0
-; SKX-NEXT: kxnorw %k0, %k0, %k1
-; SKX-NEXT: kshiftrw $15, %k1, %k1
-; SKX-NEXT: kxorw %k1, %k0, %k0
-; SKX-NEXT: kmovb %k0, {{.*}}(%rip)
-; SKX-NEXT: xorl $1, %edi
-; SKX-NEXT: jmp _f2 ## TAILCALL
-;
-; AVX512BW-LABEL: f1:
-; AVX512BW: ## BB#0: ## %entry
-; AVX512BW-NEXT: movzbl {{.*}}(%rip), %edi
-; AVX512BW-NEXT: movl %edi, %eax
-; AVX512BW-NEXT: andl $1, %eax
-; AVX512BW-NEXT: kmovd %eax, %k0
-; AVX512BW-NEXT: kxnorw %k0, %k0, %k1
-; AVX512BW-NEXT: kshiftrw $15, %k1, %k1
-; AVX512BW-NEXT: kxorw %k1, %k0, %k0
-; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: movb %al, {{.*}}(%rip)
-; AVX512BW-NEXT: xorl $1, %edi
-; AVX512BW-NEXT: jmp _f2 ## TAILCALL
-;
-; AVX512DQ-LABEL: f1:
-; AVX512DQ: ## BB#0: ## %entry
-; AVX512DQ-NEXT: movzbl {{.*}}(%rip), %edi
-; AVX512DQ-NEXT: movl %edi, %eax
-; AVX512DQ-NEXT: andl $1, %eax
-; AVX512DQ-NEXT: kmovw %eax, %k0
-; AVX512DQ-NEXT: kxnorw %k0, %k0, %k1
-; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
-; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
-; AVX512DQ-NEXT: kmovb %k0, {{.*}}(%rip)
-; AVX512DQ-NEXT: xorl $1, %edi
-; AVX512DQ-NEXT: jmp _f2 ## TAILCALL
+; CHECK-LABEL: f1:
+; CHECK: ## BB#0: ## %entry
+; CHECK-NEXT: movzbl {{.*}}(%rip), %edi
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: movb %al, {{.*}}(%rip)
+; CHECK-NEXT: xorl $1, %edi
+; CHECK-NEXT: jmp _f2 ## TAILCALL
entry:
%.b1 = load i1, i1* @f1.v, align 4
%not..b1 = xor i1 %.b1, true
diff --git a/test/CodeGen/X86/avx512-mask-spills.ll b/test/CodeGen/X86/avx512-mask-spills.ll
index 96aefdb105845..4ef88ac495c32 100644
--- a/test/CodeGen/X86/avx512-mask-spills.ll
+++ b/test/CodeGen/X86/avx512-mask-spills.ll
@@ -9,11 +9,13 @@ define <4 x i1> @test_4i1(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: Lcfi0:
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vpcmpnleud %xmm1, %xmm0, %k0
-; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k1
-; CHECK-NEXT: korw %k1, %k0, %k0
+; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
+; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
; CHECK-NEXT: callq _f
; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload
+; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k1 ## 2-byte Reload
+; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: vpmovm2d %k0, %xmm0
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
@@ -32,12 +34,14 @@ define <8 x i1> @test_8i1(<8 x i32> %a, <8 x i32> %b) {
; CHECK-NEXT: Lcfi1:
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vpcmpnleud %ymm1, %ymm0, %k0
-; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
-; CHECK-NEXT: korb %k1, %k0, %k0
+; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
+; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq _f
; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload
+; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k1 ## 2-byte Reload
+; CHECK-NEXT: korb %k1, %k0, %k0
; CHECK-NEXT: vpmovm2w %k0, %xmm0
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
@@ -56,12 +60,14 @@ define <16 x i1> @test_16i1(<16 x i32> %a, <16 x i32> %b) {
; CHECK-NEXT: Lcfi2:
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
-; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
-; CHECK-NEXT: korw %k1, %k0, %k0
+; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
+; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq _f
; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload
+; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k1 ## 2-byte Reload
+; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: vpmovm2b %k0, %xmm0
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
@@ -79,12 +85,14 @@ define <32 x i1> @test_32i1(<32 x i16> %a, <32 x i16> %b) {
; CHECK-NEXT: Lcfi3:
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: vpcmpnleuw %zmm1, %zmm0, %k0
-; CHECK-NEXT: vpcmpgtw %zmm1, %zmm0, %k1
-; CHECK-NEXT: kord %k1, %k0, %k0
; CHECK-NEXT: kmovd %k0, {{[0-9]+}}(%rsp) ## 4-byte Spill
+; CHECK-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovd %k0, (%rsp) ## 4-byte Spill
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq _f
; CHECK-NEXT: kmovd {{[0-9]+}}(%rsp), %k0 ## 4-byte Reload
+; CHECK-NEXT: kmovd (%rsp), %k1 ## 4-byte Reload
+; CHECK-NEXT: kord %k1, %k0, %k0
; CHECK-NEXT: vpmovm2b %k0, %ymm0
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
@@ -98,18 +106,20 @@ define <32 x i1> @test_32i1(<32 x i16> %a, <32 x i16> %b) {
define <64 x i1> @test_64i1(<64 x i8> %a, <64 x i8> %b) {
; CHECK-LABEL: test_64i1:
; CHECK: ## BB#0:
-; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: subq $24, %rsp
; CHECK-NEXT: Lcfi4:
-; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
-; CHECK-NEXT: vpcmpgtb %zmm1, %zmm0, %k1
-; CHECK-NEXT: korq %k1, %k0, %k0
-; CHECK-NEXT: kmovq %k0, (%rsp) ## 8-byte Spill
+; CHECK-NEXT: kmovq %k0, {{[0-9]+}}(%rsp) ## 8-byte Spill
+; CHECK-NEXT: vpcmpgtb %zmm1, %zmm0, %k0
+; CHECK-NEXT: kmovq %k0, {{[0-9]+}}(%rsp) ## 8-byte Spill
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: callq _f
-; CHECK-NEXT: kmovq (%rsp), %k0 ## 8-byte Reload
+; CHECK-NEXT: kmovq {{[0-9]+}}(%rsp), %k0 ## 8-byte Reload
+; CHECK-NEXT: kmovq {{[0-9]+}}(%rsp), %k1 ## 8-byte Reload
+; CHECK-NEXT: korq %k1, %k0, %k0
; CHECK-NEXT: vpmovm2b %k0, %zmm0
-; CHECK-NEXT: popq %rax
+; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: retq
%cmp_res = icmp ugt <64 x i8> %a, %b
diff --git a/test/CodeGen/X86/avx512-memfold.ll b/test/CodeGen/X86/avx512-memfold.ll
index d754b2b78f6ca..17cb30255f75a 100644
--- a/test/CodeGen/X86/avx512-memfold.ll
+++ b/test/CodeGen/X86/avx512-memfold.ll
@@ -4,11 +4,9 @@
define i8 @test_int_x86_avx512_mask_cmp_ss(<4 x float> %a, float* %b, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_cmp_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vcmpunordss (%rdi), %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
%b.val = load float, float* %b
@@ -24,7 +22,6 @@ declare i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float>, <4 x float>, i32, i8, i32)
define <4 x float> @test_mask_max_ss(<4 x float> %a, float* %b, i8 %mask) {
; CHECK-LABEL: test_mask_max_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vmaxss (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -41,7 +38,6 @@ declare <4 x float> @llvm.x86.avx512.mask.max.ss.round(<4 x float>, <4 x float>,
define <4 x float> @test_maskz_add_ss(<4 x float> %a, float* %b, i8 %mask) {
; CHECK-LABEL: test_maskz_add_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vaddss (%rdi), %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -61,7 +57,6 @@ declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double>, <2 x double>,
define <2 x double> @test_int_x86_avx512_mask_vfmadd_sd(<2 x double> %a, <2 x double> %b, double* %c, i8 %mask){
; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vfmadd213sd (%rdi), %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512-regcall-NoMask.ll b/test/CodeGen/X86/avx512-regcall-NoMask.ll
index 334097917853b..f43d5b3e11dd9 100644
--- a/test/CodeGen/X86/avx512-regcall-NoMask.ll
+++ b/test/CodeGen/X86/avx512-regcall-NoMask.ll
@@ -1,16 +1,10 @@
-; RUN: llc < %s -mtriple=i386-pc-win32 -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=X32 %s
-; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=WIN64 %s
+; RUN: llc < %s -mtriple=i386-pc-win32 -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=ALL --check-prefix=X32 %s
+; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=ALL --check-prefix=WIN64 %s
; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -mattr=+avx512bw -mattr=+avx512dq | FileCheck --check-prefix=LINUXOSX64 %s
-; X32-LABEL: test_argReti1:
-; X32: kmov{{.*}} %eax, %k{{[0-7]}}
-; X32: kmov{{.*}} %k{{[0-7]}}, %eax
-; X32: ret{{.*}}
-
-; WIN64-LABEL: test_argReti1:
-; WIN64: kmov{{.*}} %eax, %k{{[0-7]}}
-; WIN64: kmov{{.*}} %k{{[0-7]}}, %eax
-; WIN64: ret{{.*}}
+; ALL-LABEL: test_argReti1:
+; ALL: incb %al
+; ALL: ret{{.*}}
; Test regcall when receiving/returning i1
define x86_regcallcc i1 @test_argReti1(i1 %a) {
@@ -18,17 +12,11 @@ define x86_regcallcc i1 @test_argReti1(i1 %a) {
ret i1 %add
}
-; X32-LABEL: test_CallargReti1:
-; X32: kmov{{.*}} %k{{[0-7]}}, %eax
-; X32: call{{.*}} {{.*}}test_argReti1
-; X32: kmov{{.*}} %eax, %k{{[0-7]}}
-; X32: ret{{.*}}
-
-; WIN64-LABEL: test_CallargReti1:
-; WIN64: kmov{{.*}} %k{{[0-7]}}, %eax
-; WIN64: call{{.*}} {{.*}}test_argReti1
-; WIN64: kmov{{.*}} %eax, %k{{[0-7]}}
-; WIN64: ret{{.*}}
+; ALL-LABEL: test_CallargReti1:
+; ALL: movzbl %al, %eax
+; ALL: call{{.*}}test_argReti1
+; ALL: incb %al
+; ALL: ret{{.*}}
; Test regcall when passing/retrieving i1
define x86_regcallcc i1 @test_CallargReti1(i1 %a) {
diff --git a/test/CodeGen/X86/avx512-scalar_mask.ll b/test/CodeGen/X86/avx512-scalar_mask.ll
index 47c6813fa8dce..f6ee8ff4c0f65 100644
--- a/test/CodeGen/X86/avx512-scalar_mask.ll
+++ b/test/CodeGen/X86/avx512-scalar_mask.ll
@@ -7,7 +7,6 @@ declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, <
define <4 x float>@test_var_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2, i8 %mask) {
; CHECK-LABEL: test_var_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -18,7 +17,6 @@ define <4 x float>@test_var_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %
define <4 x float>@test_var_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2, i8 %mask) {
; CHECK-LABEL: test_var_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
@@ -30,7 +28,8 @@ define <4 x float>@test_var_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float>
define <4 x float>@test_const0_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const0_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kxorw %k0, %k0, %k1
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 0, i32 4)
@@ -41,7 +40,8 @@ define <4 x float>@test_const0_mask(<4 x float> %v0, <4 x float> %v1, <4 x float
define <4 x float>@test_const0_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const0_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kxorw %k0, %k0, %k1
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 0, i32 4)
@@ -52,7 +52,8 @@ define <4 x float>@test_const0_maskz(<4 x float> %v0, <4 x float> %v1, <4 x floa
define <4 x float>@test_const2_mask(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const2_mask:
; CHECK: ## BB#0:
-; CHECK-NEXT: kxorw %k0, %k0, %k1
+; CHECK-NEXT: movb $2, %al
+; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 2, i32 4)
@@ -63,7 +64,8 @@ define <4 x float>@test_const2_mask(<4 x float> %v0, <4 x float> %v1, <4 x float
define <4 x float>@test_const2_maskz(<4 x float> %v0, <4 x float> %v1, <4 x float> %v2) {
; CHECK-LABEL: test_const2_maskz:
; CHECK: ## BB#0:
-; CHECK-NEXT: kxorw %k0, %k0, %k1
+; CHECK-NEXT: movb $2, %al
+; CHECK-NEXT: kmovw %eax, %k1
; CHECK-NEXT: vfmadd213ss %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %v0,<4 x float> %v1, <4 x float> %v2, i8 2, i32 4)
diff --git a/test/CodeGen/X86/avx512-select.ll b/test/CodeGen/X86/avx512-select.ll
index 1859b1bcfaf6b..e81f983d9fe68 100644
--- a/test/CodeGen/X86/avx512-select.ll
+++ b/test/CodeGen/X86/avx512-select.ll
@@ -161,7 +161,7 @@ define i64 @pr30249() {
define double @pr30561_f64(double %b, double %a, i1 %c) {
; CHECK-LABEL: pr30561_f64:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: andb $1, %dil
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1}
; CHECK-NEXT: retq
@@ -172,7 +172,7 @@ define double @pr30561_f64(double %b, double %a, i1 %c) {
define float @pr30561_f32(float %b, float %a, i1 %c) {
; CHECK-LABEL: pr30561_f32:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
+; CHECK-NEXT: andb $1, %dil
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
index faa055dfbbf3f..9b4e73a18fc28 100644
--- a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
@@ -796,9 +796,9 @@ define <32 x i16>@test_int_x86_avx512_mask_psrl_w_512(<32 x i16> %x0, <8 x i16>
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm3
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm2 {%k1}
-; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: vpaddw %zmm0, %zmm2, %zmm0
+; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm1
+; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psrl_w_512:
@@ -806,9 +806,9 @@ define <32 x i16>@test_int_x86_avx512_mask_psrl_w_512(<32 x i16> %x0, <8 x i16>
; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm3
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm2 {%k1}
-; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm2
; AVX512F-32-NEXT: vpsrlw %xmm1, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-32-NEXT: vpaddw %zmm0, %zmm2, %zmm0
+; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm1
+; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 %x3)
%res1 = call <32 x i16> @llvm.x86.avx512.mask.psrl.w.512(<32 x i16> %x0, <8 x i16> %x1, <32 x i16> %x2, i32 -1)
@@ -826,8 +826,8 @@ define <32 x i16>@test_int_x86_avx512_mask_psrl_wi_512(<32 x i16> %x0, i32 %x1,
; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm2
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm1 {%k1}
-; AVX512BW-NEXT: vpaddw %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0 {%k1} {z}
+; AVX512BW-NEXT: vpaddw %zmm2, %zmm1, %zmm1
; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
@@ -836,8 +836,8 @@ define <32 x i16>@test_int_x86_avx512_mask_psrl_wi_512(<32 x i16> %x0, i32 %x1,
; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm2
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm1 {%k1}
-; AVX512F-32-NEXT: vpaddw %zmm2, %zmm1, %zmm1
; AVX512F-32-NEXT: vpsrlw $3, %zmm0, %zmm0 {%k1} {z}
+; AVX512F-32-NEXT: vpaddw %zmm2, %zmm1, %zmm1
; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.mask.psrl.wi.512(<32 x i16> %x0, i32 3, <32 x i16> %x2, i32 %x3)
diff --git a/test/CodeGen/X86/avx512bw-intrinsics.ll b/test/CodeGen/X86/avx512bw-intrinsics.ll
index 13b850ccc3b6b..3337f42eb1428 100644
--- a/test/CodeGen/X86/avx512bw-intrinsics.ll
+++ b/test/CodeGen/X86/avx512bw-intrinsics.ll
@@ -2159,9 +2159,9 @@ define <32 x i16>@test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8>
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm2 {%k1}
; AVX512BW-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm3 {%k1} {z}
-; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm2
; AVX512BW-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: vpaddw %zmm0, %zmm2, %zmm0
+; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm1
+; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_dbpsadbw_512:
@@ -2169,9 +2169,9 @@ define <32 x i16>@test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8>
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm2 {%k1}
; AVX512F-32-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm3 {%k1} {z}
-; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm2
; AVX512F-32-NEXT: vdbpsadbw $2, %zmm1, %zmm0, %zmm0
-; AVX512F-32-NEXT: vpaddw %zmm0, %zmm2, %zmm0
+; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm1
+; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.mask.dbpsadbw.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <32 x i16> %x3, i32 %x4)
%res1 = call <32 x i16> @llvm.x86.avx512.mask.dbpsadbw.512(<64 x i8> %x0, <64 x i8> %x1, i32 2, <32 x i16> zeroinitializer, i32 %x4)
@@ -2411,9 +2411,9 @@ define <32 x i16>@test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x
; AVX512BW-NEXT: kmovd %edi, %k1
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm2 {%k1}
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm3 {%k1} {z}
-; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm2
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm0
-; AVX512BW-NEXT: vpaddw %zmm0, %zmm2, %zmm0
+; AVX512BW-NEXT: vpaddw %zmm3, %zmm2, %zmm1
+; AVX512BW-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: retq
;
; AVX512F-32-LABEL: test_int_x86_avx512_mask_permvar_hi_512:
@@ -2421,9 +2421,9 @@ define <32 x i16>@test_int_x86_avx512_mask_permvar_hi_512(<32 x i16> %x0, <32 x
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
; AVX512F-32-NEXT: vpermw %zmm0, %zmm1, %zmm2 {%k1}
; AVX512F-32-NEXT: vpermw %zmm0, %zmm1, %zmm3 {%k1} {z}
-; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm2
; AVX512F-32-NEXT: vpermw %zmm0, %zmm1, %zmm0
-; AVX512F-32-NEXT: vpaddw %zmm0, %zmm2, %zmm0
+; AVX512F-32-NEXT: vpaddw %zmm3, %zmm2, %zmm1
+; AVX512F-32-NEXT: vpaddw %zmm0, %zmm1, %zmm0
; AVX512F-32-NEXT: retl
%res = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3)
%res1 = call <32 x i16> @llvm.x86.avx512.mask.permvar.hi.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> zeroinitializer, i32 %x3)
diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
index 571f345d4616b..7df07b0413ed4 100644
--- a/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
@@ -9,8 +9,8 @@ define <32 x i8>@test_int_x86_avx512_pbroadcastb_256(<16 x i8> %x0, <32 x i8> %x
; CHECK-NEXT: vpbroadcastb %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x78,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastb %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x78,0xc8]
-; CHECK-NEXT: vpaddb %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfc,0xc9]
; CHECK-NEXT: vpbroadcastb %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x78,0xc0]
+; CHECK-NEXT: vpaddb %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfc,0xc9]
; CHECK-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx512.pbroadcastb.256(<16 x i8> %x0, <32 x i8> %x1, i32 -1)
@@ -29,8 +29,8 @@ define <16 x i8>@test_int_x86_avx512_pbroadcastb_128(<16 x i8> %x0, <16 x i8> %x
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x78,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x78,0xc8]
-; CHECK-NEXT: vpaddb %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc9]
; CHECK-NEXT: vpbroadcastb %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x78,0xc0]
+; CHECK-NEXT: vpaddb %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfc,0xc9]
; CHECK-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i8> @llvm.x86.avx512.pbroadcastb.128(<16 x i8> %x0, <16 x i8> %x1, i16 -1)
@@ -49,8 +49,8 @@ define <16 x i16>@test_int_x86_avx512_pbroadcastw_256(<8 x i16> %x0, <16 x i16>
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x79,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x79,0xc8]
-; CHECK-NEXT: vpaddw %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc9]
; CHECK-NEXT: vpbroadcastw %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x79,0xc0]
+; CHECK-NEXT: vpaddw %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc9]
; CHECK-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.pbroadcastw.256(<8 x i16> %x0, <16 x i16> %x1, i16 -1)
@@ -69,8 +69,8 @@ define <8 x i16>@test_int_x86_avx512_pbroadcastw_128(<8 x i16> %x0, <8 x i16> %x
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x79,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x79,0xc8]
-; CHECK-NEXT: vpaddw %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc9]
; CHECK-NEXT: vpbroadcastw %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x79,0xc0]
+; CHECK-NEXT: vpaddw %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xc9]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.pbroadcastw.128(<8 x i16> %x0, <8 x i16> %x1, i8 -1)
@@ -89,8 +89,8 @@ define <64 x i8>@test_int_x86_avx512_pbroadcastb_512(<16 x i8> %x0, <64 x i8> %x
; CHECK-NEXT: vpbroadcastb %xmm0, %zmm2 ## encoding: [0x62,0xf2,0x7d,0x48,0x78,0xd0]
; CHECK-NEXT: kmovq %rdi, %k1 ## encoding: [0xc4,0xe1,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastb %xmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x78,0xc8]
-; CHECK-NEXT: vpaddb %zmm1, %zmm2, %zmm1 ## encoding: [0x62,0xf1,0x6d,0x48,0xfc,0xc9]
; CHECK-NEXT: vpbroadcastb %xmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x78,0xc0]
+; CHECK-NEXT: vpaddb %zmm1, %zmm2, %zmm1 ## encoding: [0x62,0xf1,0x6d,0x48,0xfc,0xc9]
; CHECK-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfc,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <64 x i8> @llvm.x86.avx512.pbroadcastb.512(<16 x i8> %x0, <64 x i8> %x1, i64 -1)
@@ -109,8 +109,8 @@ define <32 x i16>@test_int_x86_avx512_pbroadcastw_512(<8 x i16> %x0, <32 x i16>
; CHECK-NEXT: vpbroadcastw %xmm0, %zmm2 ## encoding: [0x62,0xf2,0x7d,0x48,0x79,0xd0]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpbroadcastw %xmm0, %zmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x49,0x79,0xc8]
-; CHECK-NEXT: vpaddw %zmm1, %zmm2, %zmm1 ## encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc9]
; CHECK-NEXT: vpbroadcastw %xmm0, %zmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xc9,0x79,0xc0]
+; CHECK-NEXT: vpaddw %zmm1, %zmm2, %zmm1 ## encoding: [0x62,0xf1,0x6d,0x48,0xfd,0xc9]
; CHECK-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ## encoding: [0x62,0xf1,0x7d,0x48,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <32 x i16> @llvm.x86.avx512.pbroadcastw.512(<8 x i16> %x0, <32 x i16> %x1, i32 -1)
@@ -1476,9 +1476,9 @@ define <8 x i16>@test_int_x86_avx512_mask_psrl_w_128(<8 x i16> %x0, <8 x i16> %x
; CHECK-NEXT: vpsrlw %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd1,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsrlw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd1,0xd1]
-; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xd3]
; CHECK-NEXT: vpsrlw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd1,0xc1]
-; CHECK-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc2]
+; CHECK-NEXT: vpaddw %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfd,0xcb]
+; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.psrl.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
%res1 = call <8 x i16> @llvm.x86.avx512.mask.psrl.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 -1)
@@ -1496,9 +1496,9 @@ define <16 x i16>@test_int_x86_avx512_mask_psrl_w_256(<16 x i16> %x0, <8 x i16>
; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd1,0xd9]
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd1,0xd1]
-; CHECK-NEXT: vpaddw %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xd3]
; CHECK-NEXT: vpsrlw %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd1,0xc1]
-; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xc0]
+; CHECK-NEXT: vpaddw %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfd,0xcb]
+; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.psrl.w.256(<16 x i16> %x0, <8 x i16> %x1, <16 x i16> %x2, i16 %x3)
%res1 = call <16 x i16> @llvm.x86.avx512.mask.psrl.w.256(<16 x i16> %x0, <8 x i16> %x1, <16 x i16> %x2, i16 -1)
@@ -1596,8 +1596,8 @@ define <8 x i16>@test_int_x86_avx512_mask_psrl_wi_128(<8 x i16> %x0, i32 %x1, <8
; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x71,0xd0,0x03]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x71,0xd0,0x03]
-; CHECK-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xca]
; CHECK-NEXT: vpsrlw $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x71,0xd0,0x03]
+; CHECK-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfd,0xca]
; CHECK-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfd,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.mask.psrl.wi.128(<8 x i16> %x0, i32 3, <8 x i16> %x2, i8 %x3)
@@ -1616,8 +1616,8 @@ define <16 x i16>@test_int_x86_avx512_mask_psrl_wi_256(<16 x i16> %x0, i32 %x1,
; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x71,0xd0,0x03]
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x71,0xd0,0x03]
-; CHECK-NEXT: vpaddw %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xca]
; CHECK-NEXT: vpsrlw $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x71,0xd0,0x03]
+; CHECK-NEXT: vpaddw %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xca]
; CHECK-NEXT: vpaddw %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfd,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.mask.psrl.wi.256(<16 x i16> %x0, i32 3, <16 x i16> %x2, i16 %x3)
diff --git a/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll
index f8f47c87100ad..8f528394f5bd5 100644
--- a/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512cdvl-intrinsics-upgrade.ll
@@ -9,8 +9,8 @@ define <4 x i32>@test_int_x86_avx512_mask_vplzcnt_d_128(<4 x i32> %x0, <4 x i32>
; CHECK-NEXT: vplzcntd %xmm0, %xmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntd %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; CHECK-NEXT: vplzcntd %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.avx512.mask.lzcnt.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
diff --git a/test/CodeGen/X86/avx512cdvl-intrinsics.ll b/test/CodeGen/X86/avx512cdvl-intrinsics.ll
index 96254f7c95b0f..37aea45e6107d 100644
--- a/test/CodeGen/X86/avx512cdvl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512cdvl-intrinsics.ll
@@ -7,8 +7,8 @@ define <4 x i32> @test_int_x86_avx512_mask_vplzcnt_d_128(<4 x i32> %x0, <4 x i32
; CHECK-NEXT: vplzcntd %xmm0, %xmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vplzcntd %xmm0, %xmm1 {%k1}
-; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; CHECK-NEXT: vplzcntd %xmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
%1 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %x0, i1 false)
diff --git a/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
index 1377733739fe2..cf79819734a2d 100644
--- a/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
@@ -13,10 +13,9 @@ define <2 x double>@test_int_x86_avx512_mask_vextractf64x2_512(<8 x double> %x0,
; CHECK-NEXT: kshiftlb $6, %k0, %k0
; CHECK-NEXT: kshiftrb $7, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: vmovq %rax, %xmm2
-; CHECK-NEXT: kmovw %k1, %eax
-; CHECK-NEXT: vmovq %rax, %xmm3
-; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
+; CHECK-NEXT: kmovw %k1, %ecx
+; CHECK-NEXT: vmovd %ecx, %xmm2
+; CHECK-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2
; CHECK-NEXT: vpsllq $63, %xmm2, %xmm2
; CHECK-NEXT: vpsraq $63, %zmm2, %zmm2
; CHECK-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
@@ -40,8 +39,8 @@ define <8 x float>@test_int_x86_avx512_mask_vextractf32x8(<16 x float> %x0, <8 x
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm1 {%k1}
-; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1
; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm0 {%k1} {z}
+; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1
; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq
%res = call <8 x float> @llvm.x86.avx512.mask.vextractf32x8.512(<16 x float> %x0,i32 1, <8 x float> %x2, i8 %x3)
diff --git a/test/CodeGen/X86/avx512dq-intrinsics.ll b/test/CodeGen/X86/avx512dq-intrinsics.ll
index 97ac0fde10ec0..06ee237593e79 100644
--- a/test/CodeGen/X86/avx512dq-intrinsics.ll
+++ b/test/CodeGen/X86/avx512dq-intrinsics.ll
@@ -262,7 +262,6 @@ declare <4 x float> @llvm.x86.avx512.mask.reduce.ss(<4 x float>, <4 x float>,<4
define <4 x float>@test_int_x86_avx512_mask_reduce_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vreducess $4, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vreducess $4, {sae}, %xmm1, %xmm0, %xmm0
@@ -279,7 +278,6 @@ declare <4 x float> @llvm.x86.avx512.mask.range.ss(<4 x float>, <4 x float>,<4 x
define <4 x float>@test_int_x86_avx512_mask_range_ss(<4 x float> %x0, <4 x float> %x1, <4 x float> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vrangess $4, {sae}, %xmm1, %xmm0, %xmm0
@@ -296,7 +294,6 @@ declare <2 x double> @llvm.x86.avx512.mask.reduce.sd(<2 x double>, <2 x double>,
define <2 x double>@test_int_x86_avx512_mask_reduce_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_reduce_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vreducesd $4, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vreducesd $4, {sae}, %xmm1, %xmm0, %xmm0
@@ -313,7 +310,6 @@ declare <2 x double> @llvm.x86.avx512.mask.range.sd(<2 x double>, <2 x double>,<
define <2 x double>@test_int_x86_avx512_mask_range_sd(<2 x double> %x0, <2 x double> %x1, <2 x double> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_range_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vrangesd $4, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vrangesd $4, {sae}, %xmm1, %xmm0, %xmm0
@@ -367,14 +363,11 @@ declare i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_sd(<2 x double> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_sd:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfpclasssd $2, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %ecx
-; CHECK-NEXT: andl $1, %ecx
; CHECK-NEXT: vfpclasssd $4, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: addb %cl, %al
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
@@ -389,14 +382,11 @@ declare i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float>, i32, i8)
define i8 @test_int_x86_avx512_mask_fpclass_ss(<4 x float> %x0, i8 %x1) {
; CHECK-LABEL: test_int_x86_avx512_mask_fpclass_ss:
; CHECK: ## BB#0:
-; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfpclassss $4, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %ecx
-; CHECK-NEXT: andl $1, %ecx
; CHECK-NEXT: vfpclassss $4, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: addb %cl, %al
; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
; CHECK-NEXT: retq
@@ -414,8 +404,8 @@ define <16 x float>@test_int_x86_avx512_mask_broadcastf32x2_512(<4 x float> %x0,
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm2 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
-; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x float> @llvm.x86.avx512.mask.broadcastf32x2.512(<4 x float> %x0, <16 x float> %x2, i16 %x3)
@@ -434,8 +424,8 @@ define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x2_512(<4 x i32> %x0, <16
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm2 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
-; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1]
+; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.broadcasti32x2.512(<4 x i32> %x0, <16 x i32> %x2, i16 %x3)
diff --git a/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
index 595b3e0ebb863..52a84deebf519 100644
--- a/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
@@ -1568,8 +1568,8 @@ define <2 x double>@test_int_x86_avx512_mask_vextractf64x2_256(<4 x double> %x0,
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x19,0xc2,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x19,0xc1,0x01]
-; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xca]
; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x19,0xc0,0x01]
+; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xca]
; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx512.mask.vextractf64x2.256(<4 x double> %x0,i32 1, <2 x double> %x2, i8 %x3)
@@ -1588,9 +1588,9 @@ define <4 x double>@test_int_x86_avx512_mask_insertf64x2_256(<4 x double> %x0, <
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd9,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf64x2 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x18,0xd1,0x01]
-; CHECK-NEXT: vaddpd %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xd3]
; CHECK-NEXT: vinsertf64x2 $1, %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x18,0xc1,0x01]
-; CHECK-NEXT: vaddpd %ymm2, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc2]
+; CHECK-NEXT: vaddpd %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xcb]
+; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.mask.insertf64x2.256(<4 x double> %x0, <2 x double> %x1, i32 1, <4 x double> %x3, i8 %x4)
%res1 = call <4 x double> @llvm.x86.avx512.mask.insertf64x2.256(<4 x double> %x0, <2 x double> %x1, i32 1, <4 x double> %x3, i8 -1)
@@ -1608,9 +1608,9 @@ define <4 x i64>@test_int_x86_avx512_mask_inserti64x2_256(<4 x i64> %x0, <2 x i6
; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd9,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti64x2 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x38,0xd1,0x01]
-; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xd3]
; CHECK-NEXT: vinserti64x2 $1, %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0xfd,0xa9,0x38,0xc1,0x01]
-; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xcb]
+; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.inserti64x2.256(<4 x i64> %x0, <2 x i64> %x1, i32 1, <4 x i64> %x3, i8 %x4)
%res1 = call <4 x i64> @llvm.x86.avx512.mask.inserti64x2.256(<4 x i64> %x0, <2 x i64> %x1, i32 1, <4 x i64> %x3, i8 -1)
diff --git a/test/CodeGen/X86/avx512dqvl-intrinsics.ll b/test/CodeGen/X86/avx512dqvl-intrinsics.ll
index 1bfdfd0e634de..ad9ea93c20311 100644
--- a/test/CodeGen/X86/avx512dqvl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512dqvl-intrinsics.ll
@@ -635,8 +635,8 @@ define <8 x float>@test_int_x86_avx512_mask_broadcastf32x2_256(<4 x float> %x0,
; CHECK-NEXT: ## ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1]
; CHECK-NEXT: vbroadcastf32x2 %xmm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x19,0xd0]
; CHECK-NEXT: ## ymm2 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1]
-; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca]
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x19,0xc0]
+; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca]
; CHECK-NEXT: vaddps %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.broadcastf32x2.256(<4 x float> %x0, <8 x float> %x2, i8 %x3)
@@ -680,8 +680,8 @@ define <4 x i32>@test_int_x86_avx512_mask_broadcasti32x2_128(<4 x i32> %x0, <4 x
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x59,0xc8]
; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x59,0xd0]
-; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0xc0]
+; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.broadcasti32x2.128(<4 x i32> %x0, <4 x i32> %x2, i8 %x3)
diff --git a/test/CodeGen/X86/avx512er-intrinsics.ll b/test/CodeGen/X86/avx512er-intrinsics.ll
index b8531e25bfa15..0e4922f37bbb9 100644
--- a/test/CodeGen/X86/avx512er-intrinsics.ll
+++ b/test/CodeGen/X86/avx512er-intrinsics.ll
@@ -121,7 +121,6 @@ declare <4 x float> @llvm.x86.avx512.rcp28.ss(<4 x float>, <4 x float>, <4 x flo
define <4 x float> @test_rsqrt28_ss_maskz(<4 x float> %a0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_ss_maskz:
; CHECK: # BB#0:
-; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01]
; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28ss {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x99,0xcd,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -132,7 +131,6 @@ define <4 x float> @test_rsqrt28_ss_maskz(<4 x float> %a0, i8 %mask) {
define <4 x float> @test_rsqrt28_ss_mask(<4 x float> %a0, <4 x float> %b0, <4 x float> %c0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_ss_mask:
; CHECK: # BB#0:
-; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01]
; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28ss {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x19,0xcd,0xd1]
; CHECK-NEXT: vmovaps %xmm2, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc2]
@@ -144,7 +142,6 @@ define <4 x float> @test_rsqrt28_ss_mask(<4 x float> %a0, <4 x float> %b0, <4 x
define <2 x double> @test_rsqrt28_sd_maskz(<2 x double> %a0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_maskz:
; CHECK: # BB#0:
-; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01]
; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28sd {sae}, %xmm0, %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x99,0xcd,0xc0]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -155,7 +152,6 @@ define <2 x double> @test_rsqrt28_sd_maskz(<2 x double> %a0, i8 %mask) {
define <2 x double> @test_rsqrt28_sd_mask(<2 x double> %a0, <2 x double> %b0, <2 x double> %c0, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_mask:
; CHECK: # BB#0:
-; CHECK-NEXT: andl $1, %edi # encoding: [0x83,0xe7,0x01]
; CHECK-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vrsqrt28sd {sae}, %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x19,0xcd,0xd1]
; CHECK-NEXT: vmovapd %xmm2, %xmm0 # encoding: [0xc5,0xf9,0x28,0xc2]
@@ -169,7 +165,6 @@ declare <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double>, <2 x double>, <2
define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_maskz_mem:
; CHECK: # BB#0:
-; CHECK-NEXT: andl $1, %esi # encoding: [0x83,0xe6,0x01]
; CHECK-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vrsqrt28sd (%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x07]
; CHECK-NEXT: retq # encoding: [0xc3]
@@ -182,7 +177,6 @@ define <2 x double> @test_rsqrt28_sd_maskz_mem(<2 x double> %a0, double* %ptr, i
define <2 x double> @test_rsqrt28_sd_maskz_mem_offset(<2 x double> %a0, double* %ptr, i8 %mask) {
; CHECK-LABEL: test_rsqrt28_sd_maskz_mem_offset:
; CHECK: # BB#0:
-; CHECK-NEXT: andl $1, %esi # encoding: [0x83,0xe6,0x01]
; CHECK-NEXT: kmovw %esi, %k1 # encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vrsqrt28sd 144(%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x47,0x12]
; CHECK-NEXT: retq # encoding: [0xc3]
diff --git a/test/CodeGen/X86/avx512ifma-intrinsics.ll b/test/CodeGen/X86/avx512ifma-intrinsics.ll
index 9659dc6d455af..30ecc0d2e49e5 100644
--- a/test/CodeGen/X86/avx512ifma-intrinsics.ll
+++ b/test/CodeGen/X86/avx512ifma-intrinsics.ll
@@ -13,8 +13,8 @@ define <8 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_512(<8 x i64> %x0, <8 x i
; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm4 {%k1}
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm0 {%k1}
-; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -41,8 +41,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_512(<8 x i64> %x0, <8 x
; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm4 {%k1} {z}
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
; CHECK-NEXT: vpmadd52huq %zmm2, %zmm1, %zmm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -69,8 +69,8 @@ define <8 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_512(<8 x i64> %x0, <8 x i
; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm4 {%k1}
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm0 {%k1}
-; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
@@ -97,8 +97,8 @@ define <8 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_512(<8 x i64> %x0, <8 x
; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm4 {%k1} {z}
; CHECK-NEXT: vpxord %zmm2, %zmm2, %zmm2
; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm0 {%k1} {z}
-; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
; CHECK-NEXT: vpmadd52luq %zmm2, %zmm1, %zmm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
; CHECK-NEXT: vpaddq %zmm2, %zmm3, %zmm1
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512ifmavl-intrinsics.ll b/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
index b2fe6eba88aba..3ca686cef3bf4 100644
--- a/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512ifmavl-intrinsics.ll
@@ -14,8 +14,8 @@ define <2 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_128(<2 x i64> %x0, <2 x i
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm4 {%k1}
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -42,8 +42,8 @@ define <4 x i64>@test_int_x86_avx512_mask_vpmadd52h_uq_256(<4 x i64> %x0, <4 x i
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm4 {%k1}
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -70,8 +70,8 @@ define <2 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_128(<2 x i64> %x0, <2 x
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm4 {%k1} {z}
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm0 {%k1} {z}
-; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
; CHECK-NEXT: vpmadd52huq %xmm2, %xmm1, %xmm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -98,8 +98,8 @@ define <4 x i64>@test_int_x86_avx512_maskz_vpmadd52h_uq_256(<4 x i64> %x0, <4 x
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm4 {%k1} {z}
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm0 {%k1} {z}
-; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
; CHECK-NEXT: vpmadd52huq %ymm2, %ymm1, %ymm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -126,8 +126,8 @@ define <2 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_128(<2 x i64> %x0, <2 x i
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm4 {%k1}
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm0 {%k1}
-; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -154,8 +154,8 @@ define <4 x i64>@test_int_x86_avx512_mask_vpmadd52l_uq_256(<4 x i64> %x0, <4 x i
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm4 {%k1}
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm0 {%k1}
-; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
@@ -182,8 +182,8 @@ define <2 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_128(<2 x i64> %x0, <2 x
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm4 {%k1} {z}
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm0 {%k1} {z}
-; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
; CHECK-NEXT: vpmadd52luq %xmm2, %xmm1, %xmm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
; CHECK-NEXT: vpaddq %xmm2, %xmm3, %xmm1
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; CHECK-NEXT: retq
@@ -210,8 +210,8 @@ define <4 x i64>@test_int_x86_avx512_maskz_vpmadd52l_uq_256(<4 x i64> %x0, <4 x
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm4 {%k1} {z}
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm0 {%k1} {z}
-; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
; CHECK-NEXT: vpmadd52luq %ymm2, %ymm1, %ymm2 {%k1} {z}
+; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
; CHECK-NEXT: vpaddq %ymm2, %ymm3, %ymm1
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
index c2d8df6476b3e..4d906a4fd29a2 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
@@ -30,8 +30,8 @@ define <4 x i32>@test_int_x86_avx512_pbroadcastd_128(<4 x i32> %x0, <4 x i32> %x
; CHECK-NEXT: vpbroadcastd %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x58,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpbroadcastd %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x58,0xc8]
-; CHECK-NEXT: vpaddd %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc9]
; CHECK-NEXT: vpbroadcastd %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x58,0xc0]
+; CHECK-NEXT: vpaddd %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc9]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.pbroadcastd.128(<4 x i32> %x0, <4 x i32> %x1, i8 -1)
@@ -50,8 +50,8 @@ define <4 x i64>@test_int_x86_avx512_pbroadcastq_256(<2 x i64> %x0, <4 x i64> %x
; CHECK-NEXT: vpbroadcastq %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x59,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpbroadcastq %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x59,0xc8]
-; CHECK-NEXT: vpaddq %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc9]
; CHECK-NEXT: vpbroadcastq %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x59,0xc0]
+; CHECK-NEXT: vpaddq %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc9]
; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.pbroadcastq.256(<2 x i64> %x0, <4 x i64> %x1,i8 -1)
@@ -70,8 +70,8 @@ define <2 x i64>@test_int_x86_avx512_pbroadcastq_128(<2 x i64> %x0, <2 x i64> %x
; CHECK-NEXT: vpbroadcastq %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x59,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpbroadcastq %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x59,0xc8]
-; CHECK-NEXT: vpaddq %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc9]
; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0x89,0x59,0xc0]
+; CHECK-NEXT: vpaddq %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc9]
; CHECK-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.pbroadcastq.128(<2 x i64> %x0, <2 x i64> %x1,i8 -1)
@@ -90,8 +90,8 @@ define <4 x double> @test_x86_vbroadcast_sd_pd_256(<2 x double> %a0, <4 x double
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x19,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x19,0xc8]
-; CHECK-NEXT: vaddpd %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc9]
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0xfd,0xa9,0x19,0xc0]
+; CHECK-NEXT: vaddpd %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc9]
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.mask.broadcast.sd.pd.256(<2 x double> %a0, <4 x double> zeroinitializer, i8 -1)
@@ -110,8 +110,8 @@ define <8 x float> @test_x86_vbroadcast_ss_ps_256(<4 x float> %a0, <8 x float> %
; CHECK-NEXT: vbroadcastss %xmm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x18,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vbroadcastss %xmm0, %ymm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x29,0x18,0xc8]
-; CHECK-NEXT: vaddps %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc9]
; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0xa9,0x18,0xc0]
+; CHECK-NEXT: vaddps %ymm1, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc9]
; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.broadcast.ss.ps.256(<4 x float> %a0, <8 x float> zeroinitializer, i8 -1)
@@ -130,8 +130,8 @@ define <4 x float> @test_x86_vbroadcast_ss_ps_128(<4 x float> %a0, <4 x float> %
; CHECK-NEXT: vbroadcastss %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xd0]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vbroadcastss %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf2,0x7d,0x09,0x18,0xc8]
-; CHECK-NEXT: vaddps %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc9]
; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf2,0x7d,0x89,0x18,0xc0]
+; CHECK-NEXT: vaddps %xmm1, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc9]
; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.broadcast.ss.ps.128(<4 x float> %a0, <4 x float> zeroinitializer, i8 -1)
@@ -152,9 +152,9 @@ define <4 x float>@test_int_x86_avx512_mask_movsldup_128(<4 x float> %x0, <4 x f
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovsldup %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x12,0xc8]
; CHECK-NEXT: ## xmm1 {%k1} = xmm0[0,0,2,2]
-; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca]
; CHECK-NEXT: vmovsldup %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x12,0xc0]
; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm0[0,0,2,2]
+; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca]
; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.movsldup.128(<4 x float> %x0, <4 x float> %x1, i8 %x2)
@@ -175,9 +175,9 @@ define <8 x float>@test_int_x86_avx512_mask_movsldup_256(<8 x float> %x0, <8 x f
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovsldup %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x12,0xc8]
; CHECK-NEXT: ## ymm1 {%k1} = ymm0[0,0,2,2,4,4,6,6]
-; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca]
; CHECK-NEXT: vmovsldup %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0xa9,0x12,0xc0]
; CHECK-NEXT: ## ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6]
+; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca]
; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.movsldup.256(<8 x float> %x0, <8 x float> %x1, i8 %x2)
@@ -198,9 +198,9 @@ define <4 x float>@test_int_x86_avx512_mask_movshdup_128(<4 x float> %x0, <4 x f
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovshdup %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x09,0x16,0xc8]
; CHECK-NEXT: ## xmm1 {%k1} = xmm0[1,1,3,3]
-; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca]
; CHECK-NEXT: vmovshdup %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0x89,0x16,0xc0]
; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm0[1,1,3,3]
+; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xca]
; CHECK-NEXT: vaddps %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x float> @llvm.x86.avx512.mask.movshdup.128(<4 x float> %x0, <4 x float> %x1, i8 %x2)
@@ -221,9 +221,9 @@ define <8 x float>@test_int_x86_avx512_mask_movshdup_256(<8 x float> %x0, <8 x f
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovshdup %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7e,0x29,0x16,0xc8]
; CHECK-NEXT: ## ymm1 {%k1} = ymm0[1,1,3,3,5,5,7,7]
-; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca]
; CHECK-NEXT: vmovshdup %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7e,0xa9,0x16,0xc0]
; CHECK-NEXT: ## ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7]
+; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xca]
; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.movshdup.256(<8 x float> %x0, <8 x float> %x1, i8 %x2)
@@ -243,9 +243,9 @@ define <2 x double>@test_int_x86_avx512_mask_movddup_128(<2 x double> %x0, <2 x
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovddup %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x09,0x12,0xc8]
; CHECK-NEXT: ## xmm1 {%k1} = xmm0[0,0]
-; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xca]
; CHECK-NEXT: vmovddup %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0x89,0x12,0xc0]
; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm0[0,0]
+; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xca]
; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx512.mask.movddup.128(<2 x double> %x0, <2 x double> %x1, i8 %x2)
@@ -266,9 +266,9 @@ define <4 x double>@test_int_x86_avx512_mask_movddup_256(<4 x double> %x0, <4 x
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vmovddup %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xff,0x29,0x12,0xc8]
; CHECK-NEXT: ## ymm1 {%k1} = ymm0[0,0,2,2]
-; CHECK-NEXT: vaddpd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xca]
; CHECK-NEXT: vmovddup %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xff,0xa9,0x12,0xc0]
; CHECK-NEXT: ## ymm0 {%k1} {z} = ymm0[0,0,2,2]
+; CHECK-NEXT: vaddpd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xca]
; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.mask.movddup.256(<4 x double> %x0, <4 x double> %x1, i8 %x2)
@@ -3209,10 +3209,10 @@ define <2 x double>@test_int_x86_avx512_mask_shuf_pd_128(<2 x double> %x0, <2 x
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vshufpd $1, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xc6,0xd1,0x01]
; CHECK-NEXT: ## xmm2 {%k1} = xmm0[1],xmm1[0]
-; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xd3]
; CHECK-NEXT: vshufpd $1, %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0xc6,0xc1,0x01]
; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm0[1],xmm1[0]
-; CHECK-NEXT: vaddpd %xmm2, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc2]
+; CHECK-NEXT: vaddpd %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xcb]
+; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx512.mask.shuf.pd.128(<2 x double> %x0, <2 x double> %x1, i32 1, <2 x double> %x3, i8 %x4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.shuf.pd.128(<2 x double> %x0, <2 x double> %x1, i32 1, <2 x double> %x3, i8 -1)
@@ -3540,9 +3540,9 @@ define <2 x i64>@test_int_x86_avx512_mask_psrl_q_128(<2 x i64> %x0, <2 x i64> %x
; CHECK-NEXT: vpsrlq %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd3,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrlq %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x09,0xd3,0xd1]
-; CHECK-NEXT: vpaddq %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xd3]
; CHECK-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0xd3,0xc1]
-; CHECK-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xc0]
+; CHECK-NEXT: vpaddq %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xd4,0xcb]
+; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.psrl.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
%res1 = call <2 x i64> @llvm.x86.avx512.mask.psrl.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 -1)
@@ -3560,9 +3560,9 @@ define <4 x i64>@test_int_x86_avx512_mask_psrl_q_256(<4 x i64> %x0, <2 x i64> %x
; CHECK-NEXT: vpsrlq %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd3,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrlq %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0xfd,0x29,0xd3,0xd1]
-; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xd3]
; CHECK-NEXT: vpsrlq %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0xd3,0xc1]
-; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xc0]
+; CHECK-NEXT: vpaddq %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xd4,0xcb]
+; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.psrl.q.256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 %x3)
%res1 = call <4 x i64> @llvm.x86.avx512.mask.psrl.q.256(<4 x i64> %x0, <2 x i64> %x1, <4 x i64> %x2, i8 -1)
@@ -3580,9 +3580,9 @@ define <4 x i32>@test_int_x86_avx512_mask_psrl_d_128(<4 x i32> %x0, <4 x i32> %x
; CHECK-NEXT: vpsrld %xmm1, %xmm0, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd2,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrld %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xd2,0xd1]
-; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xd3]
; CHECK-NEXT: vpsrld %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xd2,0xc1]
-; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xcb]
+; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.psrl.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
%res1 = call <4 x i32> @llvm.x86.avx512.mask.psrl.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 -1)
@@ -3600,9 +3600,9 @@ define <8 x i32>@test_int_x86_avx512_mask_psrl_d_256(<8 x i32> %x0, <4 x i32> %x
; CHECK-NEXT: vpsrld %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd2,0xd9]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpsrld %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xd2,0xd1]
-; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xd3]
; CHECK-NEXT: vpsrld %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xd2,0xc1]
-; CHECK-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
+; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xcb]
+; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.psrl.d.256(<8 x i32> %x0, <4 x i32> %x1, <8 x i32> %x2, i8 %x3)
%res1 = call <8 x i32> @llvm.x86.avx512.mask.psrl.d.256(<8 x i32> %x0, <4 x i32> %x1, <8 x i32> %x2, i8 -1)
@@ -3720,8 +3720,8 @@ define <2 x i64>@test_int_x86_avx512_mask_psrl_qi_128(<2 x i64> %x0, i32 %x1, <2
; CHECK-NEXT: vpsrlq $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x73,0xd0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsrlq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x73,0xd0,0x03]
-; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca]
; CHECK-NEXT: vpsrlq $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x73,0xd0,0x03]
+; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca]
; CHECK-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.psrl.qi.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3)
@@ -3740,8 +3740,8 @@ define <4 x i64>@test_int_x86_avx512_mask_psrl_qi_256(<4 x i64> %x0, i32 %x1, <4
; CHECK-NEXT: vpsrlq $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x73,0xd0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsrlq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x73,0xd0,0x03]
-; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca]
; CHECK-NEXT: vpsrlq $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x73,0xd0,0x03]
+; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca]
; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.psrl.qi.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3)
@@ -3760,8 +3760,8 @@ define <4 x i32>@test_int_x86_avx512_mask_psrl_di_128(<4 x i32> %x0, i32 %x1, <4
; CHECK-NEXT: vpsrld $3, %xmm0, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0x72,0xd0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsrld $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x72,0xd0,0x03]
-; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
; CHECK-NEXT: vpsrld $3, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0x72,0xd0,0x03]
+; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.psrl.di.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 %x3)
@@ -3780,8 +3780,8 @@ define <8 x i32>@test_int_x86_avx512_mask_psrl_di_256(<8 x i32> %x0, i32 %x1, <8
; CHECK-NEXT: vpsrld $3, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0x72,0xd0,0x03]
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vpsrld $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x72,0xd0,0x03]
-; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca]
; CHECK-NEXT: vpsrld $3, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0x72,0xd0,0x03]
+; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca]
; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.psrl.di.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 %x3)
@@ -4642,10 +4642,10 @@ define <4 x i32>@test_int_x86_avx512_mask_valign_d_128(<4 x i32> %x0, <4 x i32>
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: valignd $2, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x03,0xd1,0x02]
; CHECK-NEXT: ## xmm2 {%k1} = xmm1[2,3],xmm0[0,1]
-; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xd3]
; CHECK-NEXT: valignd $2, %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0x89,0x03,0xc1,0x02]
; CHECK-NEXT: ## xmm0 {%k1} {z} = xmm1[2,3],xmm0[0,1]
-; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xc0]
+; CHECK-NEXT: vpaddd %xmm3, %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xfe,0xcb]
+; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.valign.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 2, <4 x i32> %x3, i8 %x4)
%res1 = call <4 x i32> @llvm.x86.avx512.mask.valign.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 2, <4 x i32> %x3, i8 -1)
@@ -4817,9 +4817,9 @@ define <8 x float>@test_int_x86_avx512_mask_insertf32x4_256(<8 x float> %x0, <4
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd9,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x18,0xd1,0x01]
-; CHECK-NEXT: vaddps %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xd3]
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0xa9,0x18,0xc1,0x01]
-; CHECK-NEXT: vaddps %ymm2, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc2]
+; CHECK-NEXT: vaddps %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xcb]
+; CHECK-NEXT: vaddps %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x float> @llvm.x86.avx512.mask.insertf32x4.256(<8 x float> %x0, <4 x float> %x1, i32 1, <8 x float> %x3, i8 %x4)
%res1 = call <8 x float> @llvm.x86.avx512.mask.insertf32x4.256(<8 x float> %x0, <4 x float> %x1, i32 1, <8 x float> %x3, i8 -1)
@@ -4837,9 +4837,9 @@ define <8 x i32>@test_int_x86_avx512_mask_inserti32x4_256(<8 x i32> %x0, <4 x i3
; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm3 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd9,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x38,0xd1,0x01]
-; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm2 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xd3]
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0xa9,0x38,0xc1,0x01]
-; CHECK-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc2]
+; CHECK-NEXT: vpaddd %ymm3, %ymm2, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xed,0xfe,0xcb]
+; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.inserti32x4.256(<8 x i32> %x0, <4 x i32> %x1, i32 1, <8 x i32> %x3, i8 %x4)
diff --git a/test/CodeGen/X86/avx512vl-intrinsics.ll b/test/CodeGen/X86/avx512vl-intrinsics.ll
index 684b0468cf518..1f324d6795649 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics.ll
@@ -4368,8 +4368,8 @@ define <4 x i32>@test_int_x86_avx512_mask_prol_d_128(<4 x i32> %x0, i32 %x1, <4
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprold $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x72,0xc8,0x03]
; CHECK-NEXT: vprold $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0x89,0x72,0xc8,0x03]
-; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
; CHECK-NEXT: vprold $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x72,0xc8,0x03]
+; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.prol.d.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 %x3)
@@ -4388,8 +4388,8 @@ define <8 x i32>@test_int_x86_avx512_mask_prol_d_256(<8 x i32> %x0, i32 %x1, <8
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprold $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x72,0xc8,0x03]
; CHECK-NEXT: vprold $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0xa9,0x72,0xc8,0x03]
-; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca]
; CHECK-NEXT: vprold $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x72,0xc8,0x03]
+; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca]
; CHECK-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.prol.d.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 %x3)
@@ -4408,8 +4408,8 @@ define <2 x i64>@test_int_x86_avx512_mask_prol_q_128(<2 x i64> %x0, i32 %x1, <2
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprolq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc8,0x03]
; CHECK-NEXT: vprolq $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0x89,0x72,0xc8,0x03]
-; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca]
; CHECK-NEXT: vprolq $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x72,0xc8,0x03]
+; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca]
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.prol.q.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3)
@@ -4428,8 +4428,8 @@ define <4 x i64>@test_int_x86_avx512_mask_prol_q_256(<4 x i64> %x0, i32 %x1, <4
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprolq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc8,0x03]
; CHECK-NEXT: vprolq $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0xa9,0x72,0xc8,0x03]
-; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca]
; CHECK-NEXT: vprolq $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x72,0xc8,0x03]
+; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca]
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.prol.q.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3)
@@ -4528,8 +4528,8 @@ define <4 x i32>@test_int_x86_avx512_mask_pror_d_128(<4 x i32> %x0, i32 %x1, <4
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprord $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x09,0x72,0xc0,0x03]
; CHECK-NEXT: vprord $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0x89,0x72,0xc0,0x03]
-; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
; CHECK-NEXT: vprord $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x72,0xc0,0x03]
+; CHECK-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xca]
; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx512.mask.pror.d.128(<4 x i32> %x0, i32 3, <4 x i32> %x2, i8 %x3)
@@ -4548,8 +4548,8 @@ define <8 x i32>@test_int_x86_avx512_mask_pror_d_256(<8 x i32> %x0, i32 %x1, <8
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprord $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x75,0x29,0x72,0xc0,0x03]
; CHECK-NEXT: vprord $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0x6d,0xa9,0x72,0xc0,0x03]
-; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca]
; CHECK-NEXT: vprord $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x28,0x72,0xc0,0x03]
+; CHECK-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xca]
; CHECK-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xfe,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.pror.d.256(<8 x i32> %x0, i32 3, <8 x i32> %x2, i8 %x3)
@@ -4568,8 +4568,8 @@ define <2 x i64>@test_int_x86_avx512_mask_pror_q_128(<2 x i64> %x0, i32 %x1, <2
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprorq $3, %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x72,0xc0,0x03]
; CHECK-NEXT: vprorq $3, %xmm0, %xmm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0x89,0x72,0xc0,0x03]
-; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca]
; CHECK-NEXT: vprorq $3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x08,0x72,0xc0,0x03]
+; CHECK-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xca]
; CHECK-NEXT: vpaddq %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.pror.q.128(<2 x i64> %x0, i32 3, <2 x i64> %x2, i8 %x3)
@@ -4588,8 +4588,8 @@ define <4 x i64>@test_int_x86_avx512_mask_pror_q_256(<4 x i64> %x0, i32 %x1, <4
; CHECK-NEXT: kmovw %esi, %k1 ## encoding: [0xc5,0xf8,0x92,0xce]
; CHECK-NEXT: vprorq $3, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x72,0xc0,0x03]
; CHECK-NEXT: vprorq $3, %ymm0, %ymm2 {%k1} {z} ## encoding: [0x62,0xf1,0xed,0xa9,0x72,0xc0,0x03]
-; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca]
; CHECK-NEXT: vprorq $3, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x28,0x72,0xc0,0x03]
+; CHECK-NEXT: vpaddq %ymm2, %ymm1, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xca]
; CHECK-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0xd4,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx512.mask.pror.q.256(<4 x i64> %x0, i32 3, <4 x i64> %x2, i8 %x3)
@@ -4690,9 +4690,9 @@ define <2 x double>@test_int_x86_avx512_mask_fixupimm_pd_128(<2 x double> %x0, <
; CHECK-NEXT: vfixupimmpd $5, %xmm2, %xmm1, %xmm3 {%k1} ## encoding: [0x62,0xf3,0xf5,0x09,0x54,0xda,0x05]
; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4]
; CHECK-NEXT: vfixupimmpd $4, %xmm2, %xmm1, %xmm4 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0x89,0x54,0xe2,0x04]
-; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xdc]
; CHECK-NEXT: vfixupimmpd $3, %xmm2, %xmm1, %xmm0 ## encoding: [0x62,0xf3,0xf5,0x08,0x54,0xc2,0x03]
-; CHECK-NEXT: vaddpd %xmm0, %xmm3, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc0]
+; CHECK-NEXT: vaddpd %xmm4, %xmm3, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xcc]
+; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <2 x double> @llvm.x86.avx512.mask.fixupimm.pd.128(<2 x double> %x0, <2 x double> %x1,<2 x i64> %x2, i32 5, i8 %x4)
%res1 = call <2 x double> @llvm.x86.avx512.mask.fixupimm.pd.128(<2 x double> zeroinitializer, <2 x double> %x1, <2 x i64> %x2, i32 4, i8 %x4)
@@ -4732,9 +4732,9 @@ define <4 x double>@test_int_x86_avx512_mask_fixupimm_pd_256(<4 x double> %x0, <
; CHECK-NEXT: vfixupimmpd $4, %ymm2, %ymm1, %ymm3 {%k1} ## encoding: [0x62,0xf3,0xf5,0x29,0x54,0xda,0x04]
; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
; CHECK-NEXT: vfixupimmpd $5, %ymm2, %ymm1, %ymm4 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xa9,0x54,0xe2,0x05]
-; CHECK-NEXT: vaddpd %ymm4, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xdc]
; CHECK-NEXT: vfixupimmpd $3, %ymm2, %ymm1, %ymm0 ## encoding: [0x62,0xf3,0xf5,0x28,0x54,0xc2,0x03]
-; CHECK-NEXT: vaddpd %ymm0, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc0]
+; CHECK-NEXT: vaddpd %ymm4, %ymm3, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xcc]
+; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.mask.fixupimm.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x i64> %x2, i32 4, i8 %x4)
%res1 = call <4 x double> @llvm.x86.avx512.mask.fixupimm.pd.256(<4 x double> zeroinitializer, <4 x double> %x1, <4 x i64> %x2 , i32 5, i8 %x4)
@@ -4755,9 +4755,9 @@ define <4 x double>@test_int_x86_avx512_maskz_fixupimm_pd_256(<4 x double> %x0,
; CHECK-NEXT: vpxor %ymm4, %ymm4, %ymm4 ## EVEX TO VEX Compression encoding: [0xc5,0xdd,0xef,0xe4]
; CHECK-NEXT: vmovapd %ymm0, %ymm5 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xe8]
; CHECK-NEXT: vfixupimmpd $4, %ymm4, %ymm1, %ymm5 {%k1} {z} ## encoding: [0x62,0xf3,0xf5,0xa9,0x54,0xec,0x04]
-; CHECK-NEXT: vaddpd %ymm5, %ymm3, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xdd]
; CHECK-NEXT: vfixupimmpd $3, %ymm2, %ymm1, %ymm0 ## encoding: [0x62,0xf3,0xf5,0x28,0x54,0xc2,0x03]
-; CHECK-NEXT: vaddpd %ymm0, %ymm3, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc0]
+; CHECK-NEXT: vaddpd %ymm5, %ymm3, %ymm1 ## EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xcd]
+; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call <4 x double> @llvm.x86.avx512.maskz.fixupimm.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x i64> %x2, i32 5, i8 %x4)
%res1 = call <4 x double> @llvm.x86.avx512.maskz.fixupimm.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x i64> zeroinitializer, i32 4, i8 %x4)
diff --git a/test/CodeGen/X86/bitcast-setcc-128.ll b/test/CodeGen/X86/bitcast-setcc-128.ll
new file mode 100644
index 0000000000000..d1508f99fc71e
--- /dev/null
+++ b/test/CodeGen/X86/bitcast-setcc-128.ll
@@ -0,0 +1,823 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefixes=CHECK,SSE2-SSSE3,SSE2
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+ssse3 < %s | FileCheck %s --check-prefixes=CHECK,SSE2-SSSE3,SSSE3
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx < %s | FileCheck %s --check-prefixes=CHECK,AVX1
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefixes=CHECK,AVX512
+
+define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) {
+; SSE2-SSSE3-LABEL: v8i16:
+; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pextrw $7, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pextrw $6, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pextrw $5, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pextrw $4, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pextrw $3, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pextrw $2, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pextrw $1, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movd %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v8i16:
+; AVX1: ## BB#0:
+; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $7, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrw $6, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrw $5, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrw $4, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrw $3, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrw $2, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrw $1, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: v8i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: retq
+ %x = icmp sgt <8 x i16> %a, %b
+ %res = bitcast <8 x i1> %x to i8
+ ret i8 %res
+}
+
+define i4 @v4i32(<4 x i32> %a, <4 x i32> %b) {
+; SSE2-SSSE3-LABEL: v4i32:
+; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: movd %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE2-SSSE3-NEXT: movd %xmm1, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-SSSE3-NEXT: movd %xmm1, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-SSSE3-NEXT: movd %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v4i32:
+; AVX1: ## BB#0:
+; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrd $3, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrd $2, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrd $1, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: v4i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT: retq
+ %x = icmp sgt <4 x i32> %a, %b
+ %res = bitcast <4 x i1> %x to i4
+ ret i4 %res
+}
+
+define i4 @v4f32(<4 x float> %a, <4 x float> %b) {
+; SSE2-SSSE3-LABEL: v4f32:
+; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: movd %xmm1, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movaps %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; SSE2-SSSE3-NEXT: movd %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-SSSE3-NEXT: movd %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSE2-SSSE3-NEXT: movd %xmm1, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v4f32:
+; AVX1: ## BB#0:
+; AVX1-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vextractps $3, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vextractps $2, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vextractps $1, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vextractps $0, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: v4f32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vcmpltps %xmm0, %xmm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT: retq
+ %x = fcmp ogt <4 x float> %a, %b
+ %res = bitcast <4 x i1> %x to i4
+ ret i4 %res
+}
+
+define i16 @v16i8(<16 x i8> %a, <16 x i8> %b) {
+; SSE2-SSSE3-LABEL: v16i8:
+; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %cl
+; SSE2-SSSE3-NEXT: andb $1, %cl
+; SSE2-SSSE3-NEXT: movb %cl, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: andb $1, %al
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v16i8:
+; AVX1: ## BB#0:
+; AVX1-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $15, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $14, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $13, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $12, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $11, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $10, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $9, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $8, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $7, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $6, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $5, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $4, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $3, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $2, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $1, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrb $0, %xmm0, %eax
+; AVX1-NEXT: andb $1, %al
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: v16i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: retq
+ %x = icmp sgt <16 x i8> %a, %b
+ %res = bitcast <16 x i1> %x to i16
+ ret i16 %res
+}
+
+define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
+; SSE2-SSSE3-LABEL: v2i8:
+; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3-NEXT: psllq $56, %xmm0
+; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSE2-SSSE3-NEXT: psrad $31, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-SSSE3-NEXT: psrad $24, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-SSSE3-NEXT: psllq $56, %xmm1
+; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: psrad $31, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-SSSE3-NEXT: psrad $24, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
+; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: movq %xmm1, %rax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-SSSE3-NEXT: movq %xmm0, %rax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v2i8:
+; AVX1: ## BB#0:
+; AVX1-NEXT: vpsllq $56, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX1-NEXT: vpsrad $24, %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT: vpsllq $56, %xmm0, %xmm0
+; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
+; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: v2i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllq $56, %xmm1, %xmm1
+; AVX512-NEXT: vpsraq $56, %xmm1, %xmm1
+; AVX512-NEXT: vpsllq $56, %xmm0, %xmm0
+; AVX512-NEXT: vpsraq $56, %xmm0, %xmm0
+; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT: retq
+ %x = icmp sgt <2 x i8> %a, %b
+ %res = bitcast <2 x i1> %x to i2
+ ret i2 %res
+}
+
+define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
+; SSE2-SSSE3-LABEL: v2i16:
+; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3-NEXT: psllq $48, %xmm0
+; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSE2-SSSE3-NEXT: psrad $31, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-SSSE3-NEXT: psrad $16, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-SSSE3-NEXT: psllq $48, %xmm1
+; SSE2-SSSE3-NEXT: movdqa %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: psrad $31, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
+; SSE2-SSSE3-NEXT: psrad $16, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
+; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: movq %xmm1, %rax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-SSSE3-NEXT: movq %xmm0, %rax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v2i16:
+; AVX1: ## BB#0:
+; AVX1-NEXT: vpsllq $48, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX1-NEXT: vpsrad $16, %xmm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT: vpsllq $48, %xmm0, %xmm0
+; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
+; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: v2i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllq $48, %xmm1, %xmm1
+; AVX512-NEXT: vpsraq $48, %xmm1, %xmm1
+; AVX512-NEXT: vpsllq $48, %xmm0, %xmm0
+; AVX512-NEXT: vpsraq $48, %xmm0, %xmm0
+; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT: retq
+ %x = icmp sgt <2 x i16> %a, %b
+ %res = bitcast <2 x i1> %x to i2
+ ret i2 %res
+}
+
+define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
+; SSE2-SSSE3-LABEL: v2i32:
+; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3-NEXT: psllq $32, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,3,2,3]
+; SSE2-SSSE3-NEXT: psrad $31, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE2-SSSE3-NEXT: psllq $32, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
+; SSE2-SSSE3-NEXT: psrad $31, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
+; SSE2-SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [2147483648,0,2147483648,0]
+; SSE2-SSSE3-NEXT: pxor %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pxor %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: movdqa %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,2,2]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm0, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: movq %xmm1, %rax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-SSSE3-NEXT: movq %xmm0, %rax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v2i32:
+; AVX1: ## BB#0:
+; AVX1-NEXT: vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
+; AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT: vpsrad $31, %xmm0, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: v2i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllq $32, %xmm1, %xmm1
+; AVX512-NEXT: vpsraq $32, %xmm1, %xmm1
+; AVX512-NEXT: vpsllq $32, %xmm0, %xmm0
+; AVX512-NEXT: vpsraq $32, %xmm0, %xmm0
+; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT: retq
+ %x = icmp sgt <2 x i32> %a, %b
+ %res = bitcast <2 x i1> %x to i2
+ ret i2 %res
+}
+
+define i2 @v2i64(<2 x i64> %a, <2 x i64> %b) {
+; SSE2-SSSE3-LABEL: v2i64:
+; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,0,2147483648,0]
+; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm1
+; SSE2-SSSE3-NEXT: pxor %xmm2, %xmm0
+; SSE2-SSSE3-NEXT: movdqa %xmm0, %xmm2
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-SSSE3-NEXT: pcmpeqd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: movq %xmm1, %rax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-SSSE3-NEXT: movq %xmm0, %rax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v2i64:
+; AVX1: ## BB#0:
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: v2i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT: retq
+ %x = icmp sgt <2 x i64> %a, %b
+ %res = bitcast <2 x i1> %x to i2
+ ret i2 %res
+}
+
+define i2 @v2f64(<2 x double> %a, <2 x double> %b) {
+; SSE2-SSSE3-LABEL: v2f64:
+; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm1
+; SSE2-SSSE3-NEXT: movq %xmm1, %rax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-SSSE3-NEXT: movq %xmm0, %rax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v2f64:
+; AVX1: ## BB#0:
+; AVX1-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpextrq $1, %xmm0, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: v2f64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vcmpltpd %xmm0, %xmm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT: retq
+ %x = fcmp ogt <2 x double> %a, %b
+ %res = bitcast <2 x i1> %x to i2
+ ret i2 %res
+}
+
+define i4 @v4i8(<4 x i8> %a, <4 x i8> %b) {
+; SSE2-SSSE3-LABEL: v4i8:
+; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3-NEXT: pslld $24, %xmm1
+; SSE2-SSSE3-NEXT: psrad $24, %xmm1
+; SSE2-SSSE3-NEXT: pslld $24, %xmm0
+; SSE2-SSSE3-NEXT: psrad $24, %xmm0
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: movd %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE2-SSSE3-NEXT: movd %xmm1, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-SSSE3-NEXT: movd %xmm1, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-SSSE3-NEXT: movd %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v4i8:
+; AVX1: ## BB#0:
+; AVX1-NEXT: vpslld $24, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $24, %xmm1, %xmm1
+; AVX1-NEXT: vpslld $24, %xmm0, %xmm0
+; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrd $3, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrd $2, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrd $1, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: v4i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpslld $24, %xmm1, %xmm1
+; AVX512-NEXT: vpsrad $24, %xmm1, %xmm1
+; AVX512-NEXT: vpslld $24, %xmm0, %xmm0
+; AVX512-NEXT: vpsrad $24, %xmm0, %xmm0
+; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT: retq
+ %x = icmp sgt <4 x i8> %a, %b
+ %res = bitcast <4 x i1> %x to i4
+ ret i4 %res
+}
+
+define i4 @v4i16(<4 x i16> %a, <4 x i16> %b) {
+; SSE2-SSSE3-LABEL: v4i16:
+; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3-NEXT: pslld $16, %xmm1
+; SSE2-SSSE3-NEXT: psrad $16, %xmm1
+; SSE2-SSSE3-NEXT: pslld $16, %xmm0
+; SSE2-SSSE3-NEXT: psrad $16, %xmm0
+; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: movd %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE2-SSSE3-NEXT: movd %xmm1, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-SSSE3-NEXT: movd %xmm1, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-SSSE3-NEXT: movd %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v4i16:
+; AVX1: ## BB#0:
+; AVX1-NEXT: vpslld $16, %xmm1, %xmm1
+; AVX1-NEXT: vpsrad $16, %xmm1, %xmm1
+; AVX1-NEXT: vpslld $16, %xmm0, %xmm0
+; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrd $3, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrd $2, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrd $1, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: v4i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpslld $16, %xmm1, %xmm1
+; AVX512-NEXT: vpsrad $16, %xmm1, %xmm1
+; AVX512-NEXT: vpslld $16, %xmm0, %xmm0
+; AVX512-NEXT: vpsrad $16, %xmm0, %xmm0
+; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT: retq
+ %x = icmp sgt <4 x i16> %a, %b
+ %res = bitcast <4 x i1> %x to i4
+ ret i4 %res
+}
+
+define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) {
+; SSE2-SSSE3-LABEL: v8i8:
+; SSE2-SSSE3: ## BB#0:
+; SSE2-SSSE3-NEXT: psllw $8, %xmm1
+; SSE2-SSSE3-NEXT: psraw $8, %xmm1
+; SSE2-SSSE3-NEXT: psllw $8, %xmm0
+; SSE2-SSSE3-NEXT: psraw $8, %xmm0
+; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0
+; SSE2-SSSE3-NEXT: pextrw $7, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pextrw $6, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pextrw $5, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pextrw $4, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pextrw $3, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pextrw $2, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: pextrw $1, %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movd %xmm0, %eax
+; SSE2-SSSE3-NEXT: andl $1, %eax
+; SSE2-SSSE3-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; SSE2-SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; SSE2-SSSE3-NEXT: retq
+;
+; AVX1-LABEL: v8i8:
+; AVX1: ## BB#0:
+; AVX1-NEXT: vpsllw $8, %xmm1, %xmm1
+; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
+; AVX1-NEXT: vpsllw $8, %xmm0, %xmm0
+; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $7, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrw $6, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrw $5, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrw $4, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrw $3, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrw $2, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vpextrw $1, %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: andl $1, %eax
+; AVX1-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX1-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: v8i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpsllw $8, %xmm1, %xmm1
+; AVX512-NEXT: vpsraw $8, %xmm1, %xmm1
+; AVX512-NEXT: vpsllw $8, %xmm0, %xmm0
+; AVX512-NEXT: vpsraw $8, %xmm0, %xmm0
+; AVX512-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: retq
+ %x = icmp sgt <8 x i8> %a, %b
+ %res = bitcast <8 x i1> %x to i8
+ ret i8 %res
+}
diff --git a/test/CodeGen/X86/bitcast-setcc-256.ll b/test/CodeGen/X86/bitcast-setcc-256.ll
new file mode 100644
index 0000000000000..51c6ad7c7f9ef
--- /dev/null
+++ b/test/CodeGen/X86/bitcast-setcc-256.ll
@@ -0,0 +1,363 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX2
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefix=AVX512
+
+define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
+; AVX2-LABEL: v16i16:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrb $15, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $14, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $13, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $12, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $11, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $10, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $9, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $8, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $7, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $6, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $5, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $4, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $3, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $2, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $1, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movzwl -{{[0-9]+}}(%rsp), %eax
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: v16i16:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x = icmp sgt <16 x i16> %a, %b
+ %res = bitcast <16 x i1> %x to i16
+ ret i16 %res
+}
+
+define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
+; AVX2-LABEL: v8i32:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrw $7, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrw $6, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrw $5, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrw $4, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrw $3, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrw $2, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrw $1, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: v8i32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x = icmp sgt <8 x i32> %a, %b
+ %res = bitcast <8 x i1> %x to i8
+ ret i8 %res
+}
+
+define i8 @v8f32(<8 x float> %a, <8 x float> %b) {
+; AVX2-LABEL: v8f32:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrw $7, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrw $6, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrw $5, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrw $4, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrw $3, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrw $2, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrw $1, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: v8f32:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x = fcmp ogt <8 x float> %a, %b
+ %res = bitcast <8 x i1> %x to i8
+ ret i8 %res
+}
+
+define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) {
+; AVX2-LABEL: v32i8:
+; AVX2: ## BB#0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: Lcfi0:
+; AVX2-NEXT: .cfi_def_cfa_offset 16
+; AVX2-NEXT: Lcfi1:
+; AVX2-NEXT: .cfi_offset %rbp, -16
+; AVX2-NEXT: movq %rsp, %rbp
+; AVX2-NEXT: Lcfi2:
+; AVX2-NEXT: .cfi_def_cfa_register %rbp
+; AVX2-NEXT: andq $-32, %rsp
+; AVX2-NEXT: subq $32, %rsp
+; AVX2-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpextrb $15, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $14, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $13, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $12, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $11, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $10, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $9, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $8, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $7, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $6, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $5, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $4, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $3, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $2, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $1, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $0, %xmm1, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $15, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $14, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $13, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $12, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $11, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $10, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $9, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $8, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $7, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $6, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $5, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $4, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $3, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $2, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $1, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: vpextrb $0, %xmm0, %eax
+; AVX2-NEXT: andb $1, %al
+; AVX2-NEXT: movb %al, (%rsp)
+; AVX2-NEXT: movl (%rsp), %eax
+; AVX2-NEXT: movq %rbp, %rsp
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: v32i8:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpcmpgtb %ymm1, %ymm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x = icmp sgt <32 x i8> %a, %b
+ %res = bitcast <32 x i1> %x to i32
+ ret i32 %res
+}
+
+define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) {
+; AVX2-LABEL: v4i64:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrd $3, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrd $2, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrd $1, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: v4i64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x = icmp sgt <4 x i64> %a, %b
+ %res = bitcast <4 x i1> %x to i4
+ ret i4 %res
+}
+
+define i4 @v4f64(<4 x double> %a, <4 x double> %b) {
+; AVX2-LABEL: v4f64:
+; AVX2: ## BB#0:
+; AVX2-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpextrd $3, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrd $2, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vpextrd $1, %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: vmovd %xmm0, %eax
+; AVX2-NEXT: andl $1, %eax
+; AVX2-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX2-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: v4f64:
+; AVX512: ## BB#0:
+; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %k0
+; AVX512-NEXT: kmovd %k0, %eax
+; AVX512-NEXT: movb %al, -{{[0-9]+}}(%rsp)
+; AVX512-NEXT: movb -{{[0-9]+}}(%rsp), %al
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %x = fcmp ogt <4 x double> %a, %b
+ %res = bitcast <4 x i1> %x to i4
+ ret i4 %res
+}
diff --git a/test/CodeGen/X86/bswap_tree2.ll b/test/CodeGen/X86/bswap_tree2.ll
index 1340b7662a7ad..a9c74df9d0d91 100644
--- a/test/CodeGen/X86/bswap_tree2.ll
+++ b/test/CodeGen/X86/bswap_tree2.ll
@@ -9,32 +9,31 @@
define i32 @test1(i32 %x) nounwind {
; CHECK-LABEL: test1:
; CHECK: # BB#0:
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: andl $16711680, %ecx # imm = 0xFF0000
-; CHECK-NEXT: movl %eax, %edx
-; CHECK-NEXT: orl $-16777216, %edx # imm = 0xFF000000
-; CHECK-NEXT: shll $8, %ecx
-; CHECK-NEXT: shrl $8, %edx
-; CHECK-NEXT: orl %ecx, %edx
-; CHECK-NEXT: bswapl %eax
-; CHECK-NEXT: shrl $16, %eax
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: movl %ecx, %edx
+; CHECK-NEXT: andl $16711680, %edx # imm = 0xFF0000
+; CHECK-NEXT: movl %ecx, %eax
+; CHECK-NEXT: orl $-16777216, %eax # imm = 0xFF000000
+; CHECK-NEXT: shll $8, %edx
+; CHECK-NEXT: shrl $8, %eax
+; CHECK-NEXT: bswapl %ecx
+; CHECK-NEXT: shrl $16, %ecx
; CHECK-NEXT: orl %edx, %eax
+; CHECK-NEXT: orl %ecx, %eax
; CHECK-NEXT: retl
;
; CHECK64-LABEL: test1:
; CHECK64: # BB#0:
-; CHECK64-NEXT: movl %edi, %eax
-; CHECK64-NEXT: andl $16711680, %eax # imm = 0xFF0000
; CHECK64-NEXT: movl %edi, %ecx
-; CHECK64-NEXT: orl $-16777216, %ecx # imm = 0xFF000000
-; CHECK64-NEXT: shll $8, %eax
-; CHECK64-NEXT: shrl $8, %ecx
-; CHECK64-NEXT: orl %eax, %ecx
+; CHECK64-NEXT: andl $16711680, %ecx # imm = 0xFF0000
+; CHECK64-NEXT: movl %edi, %eax
+; CHECK64-NEXT: orl $-16777216, %eax # imm = 0xFF000000
+; CHECK64-NEXT: shll $8, %ecx
+; CHECK64-NEXT: shrl $8, %eax
; CHECK64-NEXT: bswapl %edi
; CHECK64-NEXT: shrl $16, %edi
-; CHECK64-NEXT: orl %ecx, %edi
-; CHECK64-NEXT: movl %edi, %eax
+; CHECK64-NEXT: orl %ecx, %eax
+; CHECK64-NEXT: orl %edi, %eax
; CHECK64-NEXT: retq
%byte0 = and i32 %x, 255 ; 0x000000ff
%byte1 = and i32 %x, 65280 ; 0x0000ff00
diff --git a/test/CodeGen/X86/constant-combines.ll b/test/CodeGen/X86/constant-combines.ll
index 5ea736e92c784..4f55814958f44 100644
--- a/test/CodeGen/X86/constant-combines.ll
+++ b/test/CodeGen/X86/constant-combines.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s | FileCheck %s
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
@@ -11,13 +12,20 @@ define void @PR22524({ float, float }* %arg) {
; it folded it to a zero too late to legalize the zero store operation. If this
; ever starts forming a zero store instead of movss, the test case has stopped
; being useful.
-;
+;
; CHECK-LABEL: PR22524:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movl $0, 4(%rdi)
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: movd %eax, %xmm0
+; CHECK-NEXT: xorps %xmm1, %xmm1
+; CHECK-NEXT: mulss %xmm0, %xmm1
+; CHECK-NEXT: movl $0, (%rdi)
+; CHECK-NEXT: movss %xmm1, 4(%rdi)
+; CHECK-NEXT: retq
entry:
%0 = getelementptr inbounds { float, float }, { float, float }* %arg, i32 0, i32 1
store float 0.000000e+00, float* %0, align 4
-; CHECK: movl $0, 4(%rdi)
-
%1 = getelementptr inbounds { float, float }, { float, float }* %arg, i64 0, i32 0
%2 = bitcast float* %1 to i64*
%3 = load i64, i64* %2, align 8
@@ -28,8 +36,6 @@ entry:
%8 = fmul float %7, 0.000000e+00
%9 = bitcast float* %1 to i32*
store i32 %6, i32* %9, align 4
-; CHECK: movl $0, (%rdi)
store float %8, float* %0, align 4
-; CHECK: movss %{{.*}}, 4(%rdi)
ret void
}
diff --git a/test/CodeGen/X86/fast-isel-load-i1.ll b/test/CodeGen/X86/fast-isel-load-i1.ll
index 2f3c6c4b84b93..f515d38cbb950 100644
--- a/test/CodeGen/X86/fast-isel-load-i1.ll
+++ b/test/CodeGen/X86/fast-isel-load-i1.ll
@@ -4,9 +4,7 @@
define i1 @test_i1(i1* %b) {
; CHECK-LABEL: test_i1:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: movzbl (%rdi), %eax
-; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: testb $1, %al
+; CHECK-NEXT: testb $1, (%rdi)
; CHECK-NEXT: je .LBB0_2
; CHECK-NEXT: # BB#1: # %in
; CHECK-NEXT: xorl %eax, %eax
diff --git a/test/CodeGen/X86/fma-fneg-combine.ll b/test/CodeGen/X86/fma-fneg-combine.ll
index bb332f7282a8e..d1d69c68af7b1 100644
--- a/test/CodeGen/X86/fma-fneg-combine.ll
+++ b/test/CodeGen/X86/fma-fneg-combine.ll
@@ -141,7 +141,6 @@ define <4 x float> @test11(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze
; SKX-LABEL: test11:
; SKX: # BB#0: # %entry
; SKX-NEXT: vxorps {{.*}}(%rip){1to4}, %xmm2, %xmm0
-; SKX-NEXT: andl $1, %edi
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vfmadd231ss %xmm1, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -150,7 +149,6 @@ define <4 x float> @test11(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze
; KNL: # BB#0: # %entry
; KNL-NEXT: vbroadcastss {{.*}}(%rip), %xmm0
; KNL-NEXT: vxorps %xmm0, %xmm2, %xmm0
-; KNL-NEXT: andl $1, %edi
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vfmadd231ss %xmm1, %xmm1, %xmm0 {%k1}
; KNL-NEXT: retq
@@ -186,7 +184,6 @@ define <2 x double> @test13(<2 x double> %a, <2 x double> %b, <2 x double> %c, i
; SKX-LABEL: test13:
; SKX: # BB#0: # %entry
; SKX-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0
-; SKX-NEXT: andl $1, %edi
; SKX-NEXT: kmovd %edi, %k1
; SKX-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1}
; SKX-NEXT: retq
@@ -194,10 +191,10 @@ define <2 x double> @test13(<2 x double> %a, <2 x double> %b, <2 x double> %c, i
; KNL-LABEL: test13:
; KNL: # BB#0: # %entry
; KNL-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0
-; KNL-NEXT: andl $1, %edi
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: vfmadd213sd %xmm2, %xmm1, %xmm0 {%k1}
; KNL-NEXT: retq
+
entry:
%sub.i = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %a
%0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %sub.i, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4)
diff --git a/test/CodeGen/X86/fmsubadd-combine.ll b/test/CodeGen/X86/fmsubadd-combine.ll
new file mode 100644
index 0000000000000..bd8888966cf2c
--- /dev/null
+++ b/test/CodeGen/X86/fmsubadd-combine.ll
@@ -0,0 +1,193 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma | FileCheck -check-prefix=FMA3 -check-prefix=FMA3_256 %s
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma,+avx512f | FileCheck -check-prefix=FMA3 -check-prefix=FMA3_512 %s
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+fma4 | FileCheck -check-prefix=FMA4 %s
+
+; This test checks the fusing of MUL + SUB/ADD to FMSUBADD.
+
+define <2 x double> @mul_subadd_pd128(<2 x double> %A, <2 x double> %B, <2 x double> %C) #0 {
+; FMA3_256-LABEL: mul_subadd_pd128:
+; FMA3_256: # BB#0: # %entry
+; FMA3_256-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; FMA3_256-NEXT: vsubpd %xmm2, %xmm0, %xmm1
+; FMA3_256-NEXT: vaddpd %xmm2, %xmm0, %xmm0
+; FMA3_256-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; FMA3_256-NEXT: retq
+;
+; FMA3_512-LABEL: mul_subadd_pd128:
+; FMA3_512: # BB#0: # %entry
+; FMA3_512-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; FMA3_512-NEXT: vsubpd %xmm2, %xmm0, %xmm1
+; FMA3_512-NEXT: vaddpd %xmm2, %xmm0, %xmm0
+; FMA3_512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; FMA3_512-NEXT: retq
+;
+; FMA4-LABEL: mul_subadd_pd128:
+; FMA4: # BB#0: # %entry
+; FMA4-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vsubpd %xmm2, %xmm0, %xmm1
+; FMA4-NEXT: vaddpd %xmm2, %xmm0, %xmm0
+; FMA4-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; FMA4-NEXT: retq
+entry:
+ %AB = fmul <2 x double> %A, %B
+ %Sub = fsub <2 x double> %AB, %C
+ %Add = fadd <2 x double> %AB, %C
+ %subadd = shufflevector <2 x double> %Add, <2 x double> %Sub, <2 x i32> <i32 0, i32 3>
+ ret <2 x double> %subadd
+}
+
+define <4 x float> @mul_subadd_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) #0 {
+; FMA3-LABEL: mul_subadd_ps128:
+; FMA3: # BB#0: # %entry
+; FMA3-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; FMA3-NEXT: vsubps %xmm2, %xmm0, %xmm1
+; FMA3-NEXT: vaddps %xmm2, %xmm0, %xmm0
+; FMA3-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; FMA3-NEXT: retq
+;
+; FMA4-LABEL: mul_subadd_ps128:
+; FMA4: # BB#0: # %entry
+; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; FMA4-NEXT: vsubps %xmm2, %xmm0, %xmm1
+; FMA4-NEXT: vaddps %xmm2, %xmm0, %xmm0
+; FMA4-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; FMA4-NEXT: retq
+entry:
+ %AB = fmul <4 x float> %A, %B
+ %Sub = fsub <4 x float> %AB, %C
+ %Add = fadd <4 x float> %AB, %C
+ %subadd = shufflevector <4 x float> %Add, <4 x float> %Sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x float> %subadd
+}
+
+define <4 x double> @mul_subadd_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) #0 {
+; FMA3-LABEL: mul_subadd_pd256:
+; FMA3: # BB#0: # %entry
+; FMA3-NEXT: vmulpd %ymm1, %ymm0, %ymm0
+; FMA3-NEXT: vsubpd %ymm2, %ymm0, %ymm1
+; FMA3-NEXT: vaddpd %ymm2, %ymm0, %ymm0
+; FMA3-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
+; FMA3-NEXT: retq
+;
+; FMA4-LABEL: mul_subadd_pd256:
+; FMA4: # BB#0: # %entry
+; FMA4-NEXT: vmulpd %ymm1, %ymm0, %ymm0
+; FMA4-NEXT: vsubpd %ymm2, %ymm0, %ymm1
+; FMA4-NEXT: vaddpd %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
+; FMA4-NEXT: retq
+entry:
+ %AB = fmul <4 x double> %A, %B
+ %Sub = fsub <4 x double> %AB, %C
+ %Add = fadd <4 x double> %AB, %C
+ %subadd = shufflevector <4 x double> %Add, <4 x double> %Sub, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x double> %subadd
+}
+
+define <8 x float> @mul_subadd_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) #0 {
+; FMA3-LABEL: mul_subadd_ps256:
+; FMA3: # BB#0: # %entry
+; FMA3-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; FMA3-NEXT: vsubps %ymm2, %ymm0, %ymm1
+; FMA3-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; FMA3-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; FMA3-NEXT: retq
+;
+; FMA4-LABEL: mul_subadd_ps256:
+; FMA4: # BB#0: # %entry
+; FMA4-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; FMA4-NEXT: vsubps %ymm2, %ymm0, %ymm1
+; FMA4-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; FMA4-NEXT: retq
+entry:
+ %AB = fmul <8 x float> %A, %B
+ %Sub = fsub <8 x float> %AB, %C
+ %Add = fadd <8 x float> %AB, %C
+ %subadd = shufflevector <8 x float> %Add, <8 x float> %Sub, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x float> %subadd
+}
+
+define <8 x double> @mul_subadd_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) #0 {
+; FMA3_256-LABEL: mul_subadd_pd512:
+; FMA3_256: # BB#0: # %entry
+; FMA3_256-NEXT: vmulpd %ymm2, %ymm0, %ymm0
+; FMA3_256-NEXT: vmulpd %ymm3, %ymm1, %ymm1
+; FMA3_256-NEXT: vsubpd %ymm5, %ymm1, %ymm2
+; FMA3_256-NEXT: vsubpd %ymm4, %ymm0, %ymm3
+; FMA3_256-NEXT: vaddpd %ymm5, %ymm1, %ymm1
+; FMA3_256-NEXT: vaddpd %ymm4, %ymm0, %ymm0
+; FMA3_256-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3]
+; FMA3_256-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3]
+; FMA3_256-NEXT: retq
+;
+; FMA3_512-LABEL: mul_subadd_pd512:
+; FMA3_512: # BB#0: # %entry
+; FMA3_512-NEXT: vmulpd %zmm1, %zmm0, %zmm0
+; FMA3_512-NEXT: vsubpd %zmm2, %zmm0, %zmm1
+; FMA3_512-NEXT: vaddpd %zmm2, %zmm0, %zmm0
+; FMA3_512-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[0],zmm1[1],zmm0[2],zmm1[3],zmm0[4],zmm1[5],zmm0[6],zmm1[7]
+; FMA3_512-NEXT: retq
+;
+; FMA4-LABEL: mul_subadd_pd512:
+; FMA4: # BB#0: # %entry
+; FMA4-NEXT: vmulpd %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vmulpd %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vsubpd %ymm5, %ymm1, %ymm2
+; FMA4-NEXT: vsubpd %ymm4, %ymm0, %ymm3
+; FMA4-NEXT: vaddpd %ymm5, %ymm1, %ymm1
+; FMA4-NEXT: vaddpd %ymm4, %ymm0, %ymm0
+; FMA4-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3]
+; FMA4-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3]
+; FMA4-NEXT: retq
+entry:
+ %AB = fmul <8 x double> %A, %B
+ %Sub = fsub <8 x double> %AB, %C
+ %Add = fadd <8 x double> %AB, %C
+ %subadd = shufflevector <8 x double> %Add, <8 x double> %Sub, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
+ ret <8 x double> %subadd
+}
+
+define <16 x float> @mul_subadd_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) #0 {
+; FMA3_256-LABEL: mul_subadd_ps512:
+; FMA3_256: # BB#0: # %entry
+; FMA3_256-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; FMA3_256-NEXT: vmulps %ymm3, %ymm1, %ymm1
+; FMA3_256-NEXT: vsubps %ymm5, %ymm1, %ymm2
+; FMA3_256-NEXT: vsubps %ymm4, %ymm0, %ymm3
+; FMA3_256-NEXT: vaddps %ymm5, %ymm1, %ymm1
+; FMA3_256-NEXT: vaddps %ymm4, %ymm0, %ymm0
+; FMA3_256-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7]
+; FMA3_256-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; FMA3_256-NEXT: retq
+;
+; FMA3_512-LABEL: mul_subadd_ps512:
+; FMA3_512: # BB#0: # %entry
+; FMA3_512-NEXT: vmulps %zmm1, %zmm0, %zmm1
+; FMA3_512-NEXT: vaddps %zmm2, %zmm1, %zmm0
+; FMA3_512-NEXT: movw $-21846, %ax # imm = 0xAAAA
+; FMA3_512-NEXT: kmovw %eax, %k1
+; FMA3_512-NEXT: vsubps %zmm2, %zmm1, %zmm0 {%k1}
+; FMA3_512-NEXT: retq
+;
+; FMA4-LABEL: mul_subadd_ps512:
+; FMA4: # BB#0: # %entry
+; FMA4-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vmulps %ymm3, %ymm1, %ymm1
+; FMA4-NEXT: vsubps %ymm5, %ymm1, %ymm2
+; FMA4-NEXT: vsubps %ymm4, %ymm0, %ymm3
+; FMA4-NEXT: vaddps %ymm5, %ymm1, %ymm1
+; FMA4-NEXT: vaddps %ymm4, %ymm0, %ymm0
+; FMA4-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7]
+; FMA4-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
+; FMA4-NEXT: retq
+entry:
+ %AB = fmul <16 x float> %A, %B
+ %Sub = fsub <16 x float> %AB, %C
+ %Add = fadd <16 x float> %AB, %C
+ %subadd = shufflevector <16 x float> %Add, <16 x float> %Sub, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
+ ret <16 x float> %subadd
+}
+
+attributes #0 = { nounwind "unsafe-fp-math"="true" }
diff --git a/test/CodeGen/X86/fold-tied-op.ll b/test/CodeGen/X86/fold-tied-op.ll
index eb06eb75a4d70..d68236e9d250e 100644
--- a/test/CodeGen/X86/fold-tied-op.ll
+++ b/test/CodeGen/X86/fold-tied-op.ll
@@ -6,10 +6,9 @@ target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
target triple = "i386--netbsd"
; CHECK-LABEL: fn1
-; CHECK: orl {{.*#+}} 4-byte Folded Reload
-; CHECK: addl {{.*#+}} 4-byte Folded Reload
-; CHECK: xorl {{.*#+}} 4-byte Folded Reload
-; CHECK: xorl {{.*#+}} 4-byte Folded Reload
+; CHECK: addl {{.*#+}} 4-byte Folded Reload
+; CHECK: imull {{.*#+}} 4-byte Folded Reload
+; CHECK: orl {{.*#+}} 4-byte Folded Reload
; CHECK: retl
%struct.XXH_state64_t = type { i32, i32, i64, i64, i64 }
diff --git a/test/CodeGen/X86/fp128-i128.ll b/test/CodeGen/X86/fp128-i128.ll
index 6c6bc8bdc1d13..98082ec611d49 100644
--- a/test/CodeGen/X86/fp128-i128.ll
+++ b/test/CodeGen/X86/fp128-i128.ll
@@ -50,8 +50,8 @@ define void @TestUnionLD1(fp128 %s, i64 %n) #0 {
; CHECK-NEXT: andq %rdi, %rcx
; CHECK-NEXT: movabsq $-281474976710656, %rdx # imm = 0xFFFF000000000000
; CHECK-NEXT: andq -{{[0-9]+}}(%rsp), %rdx
-; CHECK-NEXT: orq %rcx, %rdx
; CHECK-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: orq %rcx, %rdx
; CHECK-NEXT: movq %rdx, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm0
; CHECK-NEXT: jmp foo # TAILCALL
diff --git a/test/CodeGen/X86/haddsub-2.ll b/test/CodeGen/X86/haddsub-2.ll
index b5507523a75a5..4596b83f7bc2f 100644
--- a/test/CodeGen/X86/haddsub-2.ll
+++ b/test/CodeGen/X86/haddsub-2.ll
@@ -933,14 +933,14 @@ define <4 x float> @not_a_hsub_2(<4 x float> %A, <4 x float> %B) {
; AVX-NEXT: vsubss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX-NEXT: vsubss %xmm3, %xmm0, %xmm0
+; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm1[3,1,2,3]
+; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
+; AVX-NEXT: vsubss %xmm4, %xmm3, %xmm3
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[2,3]
-; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
-; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
-; AVX-NEXT: vsubss %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; AVX-NEXT: vsubss %xmm3, %xmm1, %xmm1
+; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX-NEXT: vsubss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[0]
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
; AVX-NEXT: retq
%vecext = extractelement <4 x float> %A, i32 2
%vecext1 = extractelement <4 x float> %A, i32 3
diff --git a/test/CodeGen/X86/leaFixup32.mir b/test/CodeGen/X86/leaFixup32.mir
new file mode 100644
index 0000000000000..e3986e47df4da
--- /dev/null
+++ b/test/CodeGen/X86/leaFixup32.mir
@@ -0,0 +1,509 @@
+# RUN: llc -run-pass x86-fixup-LEAs -mtriple=i386 -verify-machineinstrs -mcpu=corei7-avx -o - %s | FileCheck %s
+--- |
+ ; ModuleID = 'test/CodeGen/X86/fixup-lea.ll'
+ source_filename = "test/CodeGen/X86/fixup-lea.ll"
+ target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
+ target triple = "i386"
+ ;generated using: llc -stop-after x86-pad-short-functions fixup-lea.ll > leaFinxup32.mir
+
+ ;test2add_32: 3 operands LEA32r that can be replaced with 2 add instructions
+ ; where ADD32ri8 is chosen
+ define i32 @test2add_32() {
+ ret i32 0
+ }
+
+ ;test2add_ebp_32: 3 operands LEA32r that can be replaced with 2 add instructions
+ ; where the base is rbp/r13/ebp register
+ define i32 @test2add_ebp_32() {
+ ret i32 0
+ }
+
+ ;test1add_ebp_32: 2 operands LEA32r where base register is ebp and can be replaced
+ ; with an add instruction
+ define i32 @test1add_ebp_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_32: 3 operands LEA32r that can be replaced with 1 lea 1 add instructions
+ define i32 @testleaadd_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_ebp_32: 3 operands LEA32r that can be replaced with 1 lea 1 add instructions
+ ; where the base is ebp register
+ define i32 @testleaadd_ebp_32() {
+ ret i32 0
+ }
+
+ ;test1lea_ebp_32: 2 operands LEA32r wher base register is rbp/r13/ebp and can be replaced
+ ; with a lea instruction
+ define i32 @test1lea_ebp_32() {
+ ret i32 0
+ }
+
+ ;test2addi32_32: 3 operands LEA32r that can be replaced with 2 add instructions where ADD32ri32
+ ; is chosen
+ define i32 @test2addi32_32() {
+ ret i32 0
+ }
+
+ ;test1mov1add_ebp_32: 2 operands LEA32r that can be replaced with 1 add 1 mov instructions
+ ; where the base is rbp/r13/ebp register
+ define i32 @test1mov1add_ebp_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_ebp_index_32: 3 operands LEA32r that can be replaced with 1 lea 1 add instructions
+ ; where the base and the index are ebp register and there is offset
+ define i32 @testleaadd_ebp_index_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_ebp_index2_32: 3 operands LEA32r that can be replaced with 1 lea 1 add instructions
+ ; where the base and the index are ebp register and there is scale
+ define i32 @testleaadd_ebp_index2_32() {
+ ret i32 0
+ }
+
+ ;test_skip_opt_32: 3 operands LEA32r that can not be replaced with 2 instructions
+ define i32 @test_skip_opt_32() {
+ ret i32 0
+ }
+
+ ;test_skip_eflags_32: LEA32r that cannot be replaced since its not safe to clobber eflags
+ define i32 @test_skip_eflags_32() {
+ ret i32 0
+ }
+
+...
+---
+name: test2add_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp
+ ; CHECK: %eax = ADD32rr %eax, killed %ebp
+ ; CHECK: %eax = ADD32ri8 %eax, -5
+
+ %eax = LEA32r killed %eax, 1, killed %ebp, -5, _
+ RETQ %eax
+
+...
+---
+name: test2add_ebp_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp
+ ; CHECK: %ebp = ADD32rr %ebp, killed %eax
+ ; CHECK: %ebp = ADD32ri8 %ebp, -5
+
+ %ebp = LEA32r killed %ebp, 1, killed %eax, -5, _
+ RETQ %ebp
+
+...
+---
+name: test1add_ebp_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp
+ ; CHECK: %ebp = ADD32rr %ebp, killed %eax
+
+ %ebp = LEA32r killed %ebp, 1, killed %eax, 0, _
+ RETQ %ebp
+
+...
+---
+name: testleaadd_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+ - { reg: '%ebx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp, %esi
+ ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0
+ ; CHECK: %ebx = ADD32ri8 %ebx, -5
+
+ %ebx = LEA32r killed %eax, 1, killed %ebp, -5, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_ebp_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+ - { reg: '%ebx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp
+ ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, _
+ ; CHECK: %ebx = ADD32ri8 %ebx, -5
+
+ %ebx = LEA32r killed %ebp, 1, killed %eax, -5, _
+ RETQ %ebx
+
+...
+---
+name: test1lea_ebp_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+ - { reg: '%ebx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp
+ ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, _
+
+ %ebx = LEA32r killed %ebp, 1, killed %eax, 0, _
+ RETQ %ebx
+
+...
+---
+name: test2addi32_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp
+ ; CHECK: %eax = ADD32rr %eax, killed %ebp
+ ; CHECK: %eax = ADD32ri %eax, 129
+
+ %eax = LEA32r killed %eax, 1, killed %ebp, 129, _
+ RETQ %eax
+
+...
+---
+name: test1mov1add_ebp_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%eax' }
+ - { reg: '%ebx' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp, %ebx
+ ; CHECK: %ebx = MOV32rr %ebp
+ ; CHECK: %ebx = ADD32rr %ebx, %ebp
+
+ %ebx = LEA32r %ebp, 1, %ebp, 0, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_ebp_index_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%ebx' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp, %ebx
+ ; CHECK: %ebx = LEA32r _, 1, %ebp, 5, _
+ ; CHECK: %ebx = ADD32rr %ebx, %ebp
+
+ %ebx = LEA32r %ebp, 1, %ebp, 5, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_ebp_index2_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%ebx' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp, %ebx
+ ; CHECK: %ebx = LEA32r _, 4, %ebp, 5, _
+ ; CHECK: %ebx = ADD32rr %ebx, %ebp
+
+ %ebx = LEA32r %ebp, 4, %ebp, 5, _
+ RETQ %ebx
+
+...
+---
+name: test_skip_opt_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%ebx' }
+ - { reg: '%ebp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp, %ebx
+ ; CHECK: %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, _
+
+ %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, _
+ RETQ %ebp
+
+...
+---
+name: test_skip_eflags_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%ebp' }
+ - { reg: '%eax' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp, %ebx
+ ; CHECK: %ebx = LEA32r killed %eax, 4, killed %eax, 5, _
+ ; CHECK: %ebp = LEA32r killed %ebx, 4, killed %ebx, 0, _
+ ; CHECK: %ebp = ADD32ri8 %ebp, 5
+
+ CMP32rr %eax, killed %ebx, implicit-def %eflags
+ %ebx = LEA32r killed %eax, 4, killed %eax, 5, _
+ JE_1 %bb.1, implicit %eflags
+ RETQ %ebx
+ bb.1:
+ liveins: %eax, %ebp, %ebx
+ %ebp = LEA32r killed %ebx, 4, killed %ebx, 5, _
+ RETQ %ebp
+
+...
+
+
+
diff --git a/test/CodeGen/X86/leaFixup64.mir b/test/CodeGen/X86/leaFixup64.mir
new file mode 100644
index 0000000000000..b35dee181a475
--- /dev/null
+++ b/test/CodeGen/X86/leaFixup64.mir
@@ -0,0 +1,1041 @@
+# RUN: llc -run-pass x86-fixup-LEAs -mtriple=x86_64-gnu-unknown -verify-machineinstrs -mcpu=corei7-avx -o - %s | FileCheck %s
+--- |
+ ; ModuleID = 'lea-2.ll'
+ source_filename = "lea-2.ll"
+ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+ ;generated using: llc -stop-after x86-pad-short-functions lea-2.ll > leaFinxup64.mir
+
+ ;testleaadd_64_32_1: 3 operands LEA64_32r cannot be replaced with 2 add instructions
+ ; but can be replaced with 1 lea + 1 add
+ define i32 @testleaadd_64_32_1() {
+ ret i32 0
+ }
+
+ ;testleaadd_rbp_64_32_1: 3 operands LEA64_32r cannot be replaced with 2 add instructions
+ ; where the base is rbp/r13/ebp register but it can be replaced with 1 lea + 1 add
+ define i32 @testleaadd_rbp_64_32_1() {
+ ret i32 0
+ }
+
+ ;test1lea_rbp_64_32_1: 2 operands LEA64_32r where base register is rbp/r13/ebp and can not
+ ; be replaced with an add instruction but can be replaced with 1 lea instruction
+ define i32 @test1lea_rbp_64_32_1() {
+ ret i32 0
+ }
+
+ ;test2add_64: 3 operands LEA64r that can be replaced with 2 add instructions
+ define i32 @test2add_64() {
+ ret i32 0
+ }
+
+ ;test2add_rbp_64: 3 operands LEA64r that can be replaced with 2 add instructions
+ ; where the base is rbp/r13/ebp register
+ define i32 @test2add_rbp_64() {
+ ret i32 0
+ }
+
+ ;test1add_rbp_64: 2 operands LEA64r where base register is rbp/r13/ebp and can be replaced
+ ; with an add instruction
+ define i32 @test1add_rbp_64() {
+ ret i32 0
+ }
+
+ ;testleaadd_64_32: 3 operands LEA64_32r that can be replaced with 1 lea 1 add instructions
+ define i32 @testleaadd_64_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_rbp_64_32: 3 operands LEA64_32r that can be replaced with 1 lea 1 add instructions
+ ; where the base is rbp/r13/ebp register
+ define i32 @testleaadd_rbp_64_32() {
+ ret i32 0
+ }
+
+ ;test1lea_rbp_64_32: 2 operands LEA64_32r where base register is rbp/r13/ebp and can be replaced
+ ; with a lea instruction
+ define i32 @test1lea_rbp_64_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_64: 3 operands LEA64r that can be replaced with 1 lea 1 add instructions
+ define i32 @testleaadd_64() {
+ ret i32 0
+ }
+
+ ;testleaadd_rbp_64: 3 operands LEA64r that can be replaced with 1 lea 1 add instructions
+ ; where the base is rbp/r13/ebp register
+ define i32 @testleaadd_rbp_64() {
+ ret i32 0
+ }
+
+ ;test1lea_rbp_64: 2 operands LEA64r wher base register is rbp/r13/ebp and can be replaced
+ ; with a lea instruction
+ define i32 @test1lea_rbp_64() {
+ ret i32 0
+ }
+
+ ;test8: dst = base & scale!=1, can't optimize
+ define i32 @test8() {
+ ret i32 0
+ }
+
+ ;testleaaddi32_64_32: 3 operands LEA64_32r that can be replaced with 1 lea + 1 add instructions where
+ ; ADD64ri32 is chosen
+ define i32 @testleaaddi32_64_32() {
+ ret i32 0
+ }
+
+ ;test1mov1add_rbp_64_32: 2 operands LEA64_32r cannot be replaced with 1 add 1 mov instructions
+ ; where the base is rbp/r13/ebp register
+ define i32 @test1mov1add_rbp_64_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_rbp_index_64_32: 3 operands LEA64_32r that cannot replaced with 1 lea 1 add instructions
+ ; where the base and the index are ebp register and there is offset
+ define i32 @testleaadd_rbp_index_64_32() {
+ ret i32 0
+ }
+
+ ;testleaadd_rbp_index2_64_32: 3 operands LEA64_32r that cannot replaced with 1 lea 1 add instructions
+ ; where the base and the index are ebp register and there is scale
+ define i32 @testleaadd_rbp_index2_64_32() {
+ ret i32 0
+ }
+
+ ;test2addi32_64: 3 operands LEA64r that can be replaced with 2 add instructions where ADD64ri32
+ ; is chosen
+ define i32 @test2addi32_64() {
+ ret i32 0
+ }
+
+ ;test1mov1add_rbp_64: 2 operands LEA64r that can be replaced with 1 add 1 mov instructions
+ ; where the base is rbp/r13/ebp register
+ define i32 @test1mov1add_rbp_64() {
+ ret i32 0
+ }
+
+ ;testleaadd_rbp_index_64: 3 operands LEA64r that can be replaced with 1 lea 1 add instructions
+ ; where the base and the index are ebp register and there is offset
+ define i32 @testleaadd_rbp_index_64() {
+ ret i32 0
+ }
+
+ ;testleaadd_rbp_index2_64: 3 operands LEA64r that can be replaced with 1 lea 1 add instructions
+ ; where the base and the index are ebp register and there is scale
+ define i32 @testleaadd_rbp_index2_64() {
+ ret i32 0
+ }
+
+ ;test_skip_opt_64: 3 operands LEA64r that can not be replaced with 2 instructions
+ define i32 @test_skip_opt_64() {
+ ret i32 0
+ }
+
+ ;test_skip_eflags_64: LEA64r that cannot be replaced since its not safe to clobber eflags
+ define i32 @test_skip_eflags_64() {
+ ret i32 0
+ }
+
+ ;test_skip_opt_64_32: 3 operands LEA64_32r that can not be replaced with 2 instructions
+ define i32 @test_skip_opt_64_32() {
+ ret i32 0
+ }
+
+ ;test_skip_eflags_64_32: LEA64_32r that cannot be replaced since its not safe to clobber eflags
+ define i32 @test_skip_eflags_64_32() {
+ ret i32 0
+ }
+
+
+...
+---
+name: testleaadd_64_32_1
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %eax = LEA64_32r killed %rax, 1, killed %rbp, 0
+ ; CHECK: %eax = ADD32ri8 %eax, -5
+
+ %eax = LEA64_32r killed %rax, 1, killed %rbp, -5, _
+ RETQ %eax
+
+...
+---
+name: testleaadd_rbp_64_32_1
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %ebp = LEA64_32r killed %rax, 1, killed %rbp, 0
+ ; CHECK: %ebp = ADD32ri8 %ebp, -5
+
+ %ebp = LEA64_32r killed %rbp, 1, killed %rax, -5, _
+ RETQ %ebp
+
+...
+---
+name: test1lea_rbp_64_32_1
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %ebp = LEA64_32r killed %rax, 1, killed %rbp, 0
+
+ %ebp = LEA64_32r killed %rbp, 1, killed %rax, 0, _
+ RETQ %ebp
+
+...
+---
+name: test2add_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %rax = ADD64rr %rax, killed %rbp
+ ; CHECK: %rax = ADD64ri8 %rax, -5
+
+ %rax = LEA64r killed %rax, 1, killed %rbp, -5, _
+ RETQ %eax
+
+...
+---
+name: test2add_rbp_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %rbp = ADD64rr %rbp, killed %rax
+ ; CHECK: %rbp = ADD64ri8 %rbp, -5
+
+ %rbp = LEA64r killed %rbp, 1, killed %rax, -5, _
+ RETQ %ebp
+
+...
+---
+name: test1add_rbp_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %rbp = ADD64rr %rbp, killed %rax
+
+ %rbp = LEA64r killed %rbp, 1, killed %rax, 0, _
+ RETQ %ebp
+
+...
+---
+name: testleaadd_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+ - { reg: '%rbx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %ebx = ADD32ri8 %ebx, -5
+
+ %ebx = LEA64_32r killed %rax, 1, killed %rbp, -5, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_rbp_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+ - { reg: '%rbx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %ebx = ADD32ri8 %ebx, -5
+
+ %ebx = LEA64_32r killed %rbp, 1, killed %rax, -5, _
+ RETQ %ebx
+
+...
+---
+name: test1lea_rbp_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+ - { reg: '%rbx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _
+
+ %ebx = LEA64_32r killed %rbp, 1, killed %rax, 0, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+ - { reg: '%rbx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %rbx = ADD64ri8 %rbx, -5
+
+ %rbx = LEA64r killed %rax, 1, killed %rbp, -5, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_rbp_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+ - { reg: '%rbx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %rbx = ADD64ri8 %rbx, -5
+
+ %rbx = LEA64r killed %rbp, 1, killed %rax, -5, _
+ RETQ %ebx
+
+...
+---
+name: test1lea_rbp_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+ - { reg: '%rbx' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _
+
+ %rbx = LEA64r killed %rbp, 1, killed %rax, 0, _
+ RETQ %ebx
+
+...
+---
+name: test8
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rdi' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rdi, %rbp
+ ; CHECK: %r12 = LEA64r _, 2, killed %r13, 5, _
+ ; CHECK: %r12 = ADD64rr %r12, killed %rbp
+ %rbp = KILL %rbp, implicit-def %rbp
+ %r13 = KILL %rdi, implicit-def %r13
+ %r12 = LEA64r killed %rbp, 2, killed %r13, 5, _
+ RETQ %r12
+
+...
+---
+name: testleaaddi32_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %eax = LEA64_32r killed %rax, 1, killed %rbp, 0
+ ; CHECK: %eax = ADD32ri %eax, 129
+
+ %eax = LEA64_32r killed %rax, 1, killed %rbp, 129, _
+ RETQ %eax
+
+...
+---
+name: test1mov1add_rbp_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, _
+
+ %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_rbp_index_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbx' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, _
+
+ %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_rbp_index2_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbx' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %eax, %ebp, %ebx
+ ; CHECK: %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, _
+
+ %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, _
+ RETQ %ebx
+
+...
+---
+name: test2addi32_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp
+ ; CHECK: %rax = ADD64rr %rax, killed %rbp
+ ; CHECK: %rax = ADD64ri32 %rax, 129
+
+ %rax = LEA64r killed %rax, 1, killed %rbp, 129, _
+ RETQ %eax
+
+...
+---
+name: test1mov1add_rbp_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rax' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %rbx = MOV64rr %rbp
+ ; CHECK: %rbx = ADD64rr %rbx, %rbp
+
+ %rbx = LEA64r %rbp, 1, %rbp, 0, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_rbp_index_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbx' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %rbx = LEA64r _, 1, %rbp, 5, _
+ ; CHECK: %rbx = ADD64rr %rbx, %rbp
+
+ %rbx = LEA64r %rbp, 1, %rbp, 5, _
+ RETQ %ebx
+
+...
+---
+name: testleaadd_rbp_index2_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbx' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %rbx = LEA64r _, 4, %rbp, 5, _
+ ; CHECK: %rbx = ADD64rr %rbx, %rbp
+
+ %rbx = LEA64r %rbp, 4, %rbp, 5, _
+ RETQ %ebx
+
+...
+---
+name: test_skip_opt_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbx' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, _
+
+ %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, _
+ RETQ %ebp
+
+...
+---
+name: test_skip_eflags_64
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbp' }
+ - { reg: '%rax' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %rbx = LEA64r killed %rax, 4, killed %rax, 5, _
+ ; CHECK: %rbp = LEA64r killed %rbx, 4, killed %rbx, 0, _
+ ; CHECK: %rbp = ADD64ri8 %rbp, 5
+
+ CMP64rr %rax, killed %rbx, implicit-def %eflags
+ %rbx = LEA64r killed %rax, 4, killed %rax, 5, _
+ JE_1 %bb.1, implicit %eflags
+ RETQ %ebx
+ bb.1:
+ liveins: %rax, %rbp, %rbx
+ %rbp = LEA64r killed %rbx, 4, killed %rbx, 5, _
+ RETQ %ebp
+
+...
+---
+name: test_skip_opt_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbx' }
+ - { reg: '%rbp' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, _
+
+ %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, _
+ RETQ %ebp
+
+...
+---
+name: test_skip_eflags_64_32
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%rbp' }
+ - { reg: '%rax' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0 (%ir-block.0):
+ liveins: %rax, %rbp, %rbx
+ ; CHECK: %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, _
+ ; CHECK: %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 0, _
+ ; CHECK: %ebp = ADD32ri8 %ebp, 5
+
+ CMP64rr %rax, killed %rbx, implicit-def %eflags
+ %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, _
+ JE_1 %bb.1, implicit %eflags
+ RETQ %ebx
+ bb.1:
+ liveins: %rax, %rbp, %rbx
+ %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 5, _
+ RETQ %ebp
+
+...
+
+
+
diff --git a/test/CodeGen/X86/lrshrink.ll b/test/CodeGen/X86/lrshrink.ll
deleted file mode 100644
index a9cf086dbd900..0000000000000
--- a/test/CodeGen/X86/lrshrink.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
-
-; Checks if "%7 = add nuw nsw i64 %4, %2" is moved before the last call
-; to minimize live-range.
-
-define i64 @test(i1 %a, i64 %r1, i64 %r2, i64 %s1, i64 %s2, i64 %t1, i64 %t2) {
-entry:
- br i1 %a, label %then, label %else
-
-then:
- br label %else
-
-else:
- %0 = phi i64 [ 4, %entry ], [ 10, %then ]
- %r = phi i64 [ %r1, %entry ], [ %r2, %then ]
- %s = phi i64 [ %s1, %entry ], [ %s2, %then ]
- %t = phi i64 [ %t1, %entry ], [ %t2, %then ]
-; CHECK-LABEL: test:
-; CHECK: add
-; CHECK: add
-; CHECK: call
-; CHECK: add
-; CHECK: call
-; CHECK: add
-; CHECK: call
-; CHECK: add
- %1 = tail call i32 @_Z3foov()
- %2 = zext i32 %1 to i64
- %3 = tail call i32 @_Z3foov()
- %4 = zext i32 %3 to i64
- %5 = tail call i32 @_Z3foov()
- %6 = zext i32 %5 to i64
- %7 = add nuw nsw i64 %0, %r
- tail call void @llvm.dbg.value(metadata i64 %7, i64 0, metadata !5, metadata !DIExpression()), !dbg !6
- %8 = add nuw nsw i64 %2, %7
- %9 = add nuw nsw i64 %4, %8
- %10 = add nuw nsw i64 %6, %9
- %11 = add nuw nsw i64 %s, %t
- tail call void @llvm.dbg.value(metadata i64 %11, i64 0, metadata !5, metadata !DIExpression()), !dbg !6
- %12 = add nuw nsw i64 %10, %11
- ret i64 %12
-}
-
-declare i32 @_Z3foov()
-declare void @llvm.dbg.value(metadata, i64, metadata, metadata)
-
-!llvm.dbg.cu = !{!0}
-!llvm.module.flags = !{!1, !2}
-
-!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, emissionKind: FullDebug)
-!1 = !{i32 2, !"Dwarf Version", i32 4}
-!2 = !{i32 2, !"Debug Info Version", i32 3}
-!3 = !DIFile(filename: "a.c", directory: "./")
-!4 = distinct !DISubprogram(name: "test", scope: !3, unit: !0)
-!5 = !DILocalVariable(name: "x", scope: !4)
-!6 = !DILocation(line: 4, scope: !4)
diff --git a/test/CodeGen/X86/madd.ll b/test/CodeGen/X86/madd.ll
index af86df5100165..d332b2f3169f0 100644
--- a/test/CodeGen/X86/madd.ll
+++ b/test/CodeGen/X86/madd.ll
@@ -129,9 +129,9 @@ define i32 @test_unsigned_short(i16* nocapture readonly, i16* nocapture readonly
; SSE2-NEXT: pmullw %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
-; SSE2-NEXT: paddd %xmm2, %xmm0
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
; SSE2-NEXT: paddd %xmm3, %xmm1
+; SSE2-NEXT: paddd %xmm2, %xmm0
; SSE2-NEXT: addq $16, %rsi
; SSE2-NEXT: addq $16, %rdi
; SSE2-NEXT: addq $-8, %rax
@@ -246,23 +246,23 @@ define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i3
; SSE2-NEXT: pmullw %xmm4, %xmm5
; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
; SSE2-NEXT: psrad $16, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm5
+; SSE2-NEXT: movq {{.*#+}} xmm6 = mem[0],zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psraw $8, %xmm6
+; SSE2-NEXT: movq {{.*#+}} xmm7 = mem[0],zero
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psraw $8, %xmm7
+; SSE2-NEXT: pmullw %xmm6, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; SSE2-NEXT: psrad $16, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm7
+; SSE2-NEXT: paddd %xmm7, %xmm2
+; SSE2-NEXT: paddd %xmm6, %xmm3
+; SSE2-NEXT: paddd %xmm5, %xmm1
; SSE2-NEXT: paddd %xmm4, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: paddd %xmm4, %xmm1
-; SSE2-NEXT: movq {{.*#+}} xmm4 = mem[0],zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psraw $8, %xmm4
-; SSE2-NEXT: movq {{.*#+}} xmm5 = mem[0],zero
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psraw $8, %xmm5
-; SSE2-NEXT: pmullw %xmm4, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: paddd %xmm4, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
-; SSE2-NEXT: psrad $16, %xmm4
-; SSE2-NEXT: paddd %xmm4, %xmm2
; SSE2-NEXT: addq $16, %rsi
; SSE2-NEXT: addq $16, %rdi
; SSE2-NEXT: addq $-16, %rax
diff --git a/test/CodeGen/X86/masked_gather_scatter.ll b/test/CodeGen/X86/masked_gather_scatter.ll
index c5de8dd96cbc7..91087f650ad6d 100644
--- a/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/test/CodeGen/X86/masked_gather_scatter.ll
@@ -300,8 +300,8 @@ define <8 x i32> @test6(<8 x i32>%a1, <8 x i32*> %ptr) {
;
; KNL_32-LABEL: test6:
; KNL_32: # BB#0:
-; KNL_32-NEXT: kxnorw %k0, %k0, %k1
; KNL_32-NEXT: vpmovsxdq %ymm1, %zmm2
+; KNL_32-NEXT: kxnorw %k0, %k0, %k1
; KNL_32-NEXT: kxnorw %k0, %k0, %k2
; KNL_32-NEXT: vpgatherqd (,%zmm2), %ymm1 {%k2}
; KNL_32-NEXT: vpscatterqd %ymm0, (,%zmm2) {%k1}
@@ -1575,7 +1575,7 @@ define <16 x float> @test29(float* %base, <16 x i32> %ind) {
; Check non-power-of-2 case. It should be scalarized.
declare <3 x i32> @llvm.masked.gather.v3i32.v3p0i32(<3 x i32*>, i32, <3 x i1>, <3 x i32>)
define <3 x i32> @test30(<3 x i32*> %base, <3 x i32> %ind, <3 x i1> %mask, <3 x i32> %src0) {
-; ALL-LABEL: test30:
+; ALL-LABEL: test30
; ALL-NOT: gather
%sext_ind = sext <3 x i32> %ind to <3 x i64>
@@ -1691,12 +1691,12 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i
; KNL_32-LABEL: test_gather_16i64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi4:
+; KNL_32-NEXT: .Lcfi0:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi5:
+; KNL_32-NEXT: .Lcfi1:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi6:
+; KNL_32-NEXT: .Lcfi2:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -1814,12 +1814,12 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <
; KNL_32-LABEL: test_gather_16f64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi7:
+; KNL_32-NEXT: .Lcfi3:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi8:
+; KNL_32-NEXT: .Lcfi4:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi9:
+; KNL_32-NEXT: .Lcfi5:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -1936,12 +1936,12 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> %
; KNL_32-LABEL: test_scatter_16i64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi10:
+; KNL_32-NEXT: .Lcfi6:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi11:
+; KNL_32-NEXT: .Lcfi7:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi12:
+; KNL_32-NEXT: .Lcfi8:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -2058,12 +2058,12 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou
; KNL_32-LABEL: test_scatter_16f64:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi13:
+; KNL_32-NEXT: .Lcfi9:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi14:
+; KNL_32-NEXT: .Lcfi10:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi15:
+; KNL_32-NEXT: .Lcfi11:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-64, %esp
; KNL_32-NEXT: subl $64, %esp
@@ -2139,12 +2139,12 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6
; KNL_32-LABEL: test_pr28312:
; KNL_32: # BB#0:
; KNL_32-NEXT: pushl %ebp
-; KNL_32-NEXT: .Lcfi16:
+; KNL_32-NEXT: .Lcfi12:
; KNL_32-NEXT: .cfi_def_cfa_offset 8
-; KNL_32-NEXT: .Lcfi17:
+; KNL_32-NEXT: .Lcfi13:
; KNL_32-NEXT: .cfi_offset %ebp, -8
; KNL_32-NEXT: movl %esp, %ebp
-; KNL_32-NEXT: .Lcfi18:
+; KNL_32-NEXT: .Lcfi14:
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-32, %esp
; KNL_32-NEXT: subl $32, %esp
diff --git a/test/CodeGen/X86/merge-consecutive-loads-128.ll b/test/CodeGen/X86/merge-consecutive-loads-128.ll
index 2f7714e63886f..71417694b0d4b 100644
--- a/test/CodeGen/X86/merge-consecutive-loads-128.ll
+++ b/test/CodeGen/X86/merge-consecutive-loads-128.ll
@@ -270,9 +270,9 @@ define <4 x float> @merge_4f32_f32_012u(float* %ptr) nounwind uwtable noinline s
; SSE2: # BB#0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: merge_4f32_f32_012u:
@@ -292,9 +292,9 @@ define <4 x float> @merge_4f32_f32_012u(float* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X32-SSE1-NEXT: retl
;
; X32-SSE41-LABEL: merge_4f32_f32_012u:
@@ -321,9 +321,9 @@ define <4 x float> @merge_4f32_f32_019u(float* %ptr) nounwind uwtable noinline s
; SSE2: # BB#0:
; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: retq
;
; SSE41-LABEL: merge_4f32_f32_019u:
@@ -343,9 +343,9 @@ define <4 x float> @merge_4f32_f32_019u(float* %ptr) nounwind uwtable noinline s
; X32-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-SSE1-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE1-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-SSE1-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X32-SSE1-NEXT: retl
;
; X32-SSE41-LABEL: merge_4f32_f32_019u:
diff --git a/test/CodeGen/X86/misched-matrix.ll b/test/CodeGen/X86/misched-matrix.ll
index 94bbe75702cb8..e62a1d04dad67 100644
--- a/test/CodeGen/X86/misched-matrix.ll
+++ b/test/CodeGen/X86/misched-matrix.ll
@@ -17,9 +17,9 @@
;
; TOPDOWN-LABEL: %for.body
; TOPDOWN: movl %{{.*}}, (
-; TOPDOWN-NOT: imull {{[0-9]*}}(
+; TOPDOWN: imull {{[0-9]*}}(
; TOPDOWN: movl %{{.*}}, 4(
-; TOPDOWN-NOT: imull {{[0-9]*}}(
+; TOPDOWN: imull {{[0-9]*}}(
; TOPDOWN: movl %{{.*}}, 8(
; TOPDOWN: movl %{{.*}}, 12(
; TOPDOWN-LABEL: %for.end
diff --git a/test/CodeGen/X86/mul-i1024.ll b/test/CodeGen/X86/mul-i1024.ll
index 340aa047c022b..87661004373f3 100644
--- a/test/CodeGen/X86/mul-i1024.ll
+++ b/test/CodeGen/X86/mul-i1024.ll
@@ -11,7 +11,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
; X32-NEXT: andl $-8, %esp
-; X32-NEXT: subl $2640, %esp # imm = 0xA50
+; X32-NEXT: subl $2632, %esp # imm = 0xA48
; X32-NEXT: movl 8(%ebp), %eax
; X32-NEXT: movl 64(%eax), %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
@@ -58,7 +58,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl 20(%eax), %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl 24(%eax), %ecx
-; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl 28(%eax), %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl 32(%eax), %ecx
@@ -1992,7 +1992,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl $0, %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl %esi, (%esp) # 4-byte Spill
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
@@ -2002,23 +2002,19 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl %ecx, %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl %edi, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: adcl %esi, %edi
-; X32-NEXT: movl $0, %ebx
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: addl %ebx, %edi
+; X32-NEXT: adcl %esi, %ecx
+; X32-NEXT: setb %al
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movzbl %al, %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
@@ -2035,8 +2031,14 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl %eax, %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl %ecx, %edx
-; X32-NEXT: addl %eax, %edx
+; X32-NEXT: adcl %eax, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -2045,157 +2047,144 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl %eax, %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %eax, %ecx
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl %eax, %ebx
+; X32-NEXT: addl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl %eax, %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl %ecx, %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl $0, %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: addl %edx, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl %edx, %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, %esi
+; X32-NEXT: adcl $0, %eax
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl %edi, %esi
-; X32-NEXT: adcl %ebx, %ecx
-; X32-NEXT: movl $0, %edx
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %ebx, %eax
+; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: setb %dl
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movzbl %dl, %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: addl %edi, %ebx
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl %esi, %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ebx
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: adcl %esi, %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ebx
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %edi, %ebx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: addl %esi, %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl %edi, %esi
+; X32-NEXT: addl %eax, %ebx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl %ecx, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
; X32-NEXT: adcl %edx, %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %eax, %ebx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl %edi, %eax
-; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl $0, %edx
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: sbbl %esi, %esi
-; X32-NEXT: andl $1, %esi
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl $0, %edx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: addl %edi, %edx
+; X32-NEXT: adcl %eax, %esi
+; X32-NEXT: setb %al
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %ecx, %ebx
-; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %edx, %edi
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl %esi, %ebx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -2215,16 +2204,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: addl %edx, %esi
; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %edx, %edx
-; X32-NEXT: andl $1, %edx
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %esi
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
@@ -2246,7 +2234,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -2268,16 +2256,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %esi
; X32-NEXT: addl %eax, %edx
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
@@ -2306,112 +2293,97 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %ebx
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, %edx
+; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, %esi
+; X32-NEXT: movl %eax, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl $0, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl %eax, %edx
-; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl $0, %edx
+; X32-NEXT: addl %eax, %esi
+; X32-NEXT: adcl %edi, %edx
+; X32-NEXT: setb %al
+; X32-NEXT: addl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movzbl %al, %edi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: adcl $0, %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: addl %edx, %eax
+; X32-NEXT: addl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl %edi, %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
+; X32-NEXT: adcl %ebx, %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl %edi, %edx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, %esi
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl (%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: adcl %eax, %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: addl %esi, %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl %esi, %eax
+; X32-NEXT: movl %eax, %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl %edx, %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl $0, %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
@@ -2429,16 +2401,14 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %edx
; X32-NEXT: addl %ebx, %esi
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl $0, %ecx
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: adcl $0, %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -2447,16 +2417,16 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, %edi
-; X32-NEXT: movl %edi, %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %eax
+; X32-NEXT: movl %eax, %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl %ecx, %eax
-; X32-NEXT: movl %eax, %edi
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -2467,10 +2437,10 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl %edx, %ebx
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl %edi, %edx
-; X32-NEXT: adcl (%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: adcl %edi, %eax
; X32-NEXT: movl %eax, %esi
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
@@ -2485,52 +2455,50 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, %edx
+; X32-NEXT: movl %eax, %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl $0, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl %eax, %edx
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movl $0, %edi
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: addl %eax, %edi
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: setb %al
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, (%esp) # 4-byte Spill
-; X32-NEXT: movl (%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, (%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: addl %edx, %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: addl %edi, %ebx
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %ecx, %eax
+; X32-NEXT: adcl %esi, %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %edi, (%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -2550,16 +2518,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %esi
; X32-NEXT: addl %eax, %edx
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
@@ -2582,7 +2549,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
@@ -2603,16 +2570,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %esi
; X32-NEXT: addl %eax, %edx
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
@@ -2639,105 +2605,88 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: adcl $0, %edi
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: adcl $0, %eax
+; X32-NEXT: adcl $0, %edx
; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl (%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl %ecx, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl $0, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl $0, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl %eax, %edx
-; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: addl %edx, %esi
+; X32-NEXT: adcl %ecx, %edi
+; X32-NEXT: setb %al
+; X32-NEXT: addl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movzbl %al, %ebx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: addl %edx, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: addl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl %edi, %eax
+; X32-NEXT: movl %eax, %edi
+; X32-NEXT: adcl %ebx, %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl %eax, %esi
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %edi, %eax
+; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
@@ -2766,76 +2715,70 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X32-NEXT: addl %eax, %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl %edi, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %edx
+; X32-NEXT: movl %edx, %eax
+; X32-NEXT: adcl %ecx, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: addl %eax, %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl %eax, %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %eax, %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl %ebx, %eax
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: adcl %edi, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
-; X32-NEXT: addl (%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: addl %ebx, %edi
+; X32-NEXT: adcl %edx, %ecx
+; X32-NEXT: setb %dl
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movzbl %dl, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl %ecx, %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %edx, %ebx
; X32-NEXT: addl %eax, %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill
-; X32-NEXT: movl %ecx, %edi
-; X32-NEXT: adcl %eax, %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl %eax, %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %ecx, %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl %eax, %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %edx, %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl %edx, %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl %ecx, %esi
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %eax, %ecx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
@@ -2847,55 +2790,53 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl $0, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: addl %eax, %edx
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: setb %al
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl %edx, %eax
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movl $0, %edx
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: sbbl %esi, %esi
-; X32-NEXT: andl $1, %esi
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: adcl (%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %edi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %edx, %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %ecx, %ebx
+; X32-NEXT: adcl %esi, %ebx
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -2915,20 +2856,19 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %esi
; X32-NEXT: addl %eax, %edx
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
-; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
@@ -2968,16 +2908,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %esi
; X32-NEXT: addl %eax, %edx
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
@@ -3004,109 +2943,87 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: adcl $0, %edi
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: adcl $0, %eax
+; X32-NEXT: adcl $0, %edx
; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl $0, %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: adcl $0, %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl %eax, %edx
-; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: addl %edi, %esi
+; X32-NEXT: adcl %edx, %ecx
+; X32-NEXT: setb %al
+; X32-NEXT: addl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movzbl %al, %edi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: adcl $0, %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: addl %edx, %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl %edi, %edx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, %esi
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: addl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl %ecx, %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl %ebx, %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, %edx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: adcl %eax, %esi
+; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: movl %ecx, %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
@@ -3127,56 +3044,33 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %ebx
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: adcl $0, %esi
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %ecx
; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
@@ -3214,37 +3108,35 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl $0, %esi
; X32-NEXT: addl %edi, %edx
-; X32-NEXT: adcl %eax, %ecx
-; X32-NEXT: movl $0, %ebx
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
+; X32-NEXT: adcl %eax, %esi
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: addl %edx, %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %ecx, %eax
+; X32-NEXT: adcl %esi, %ebx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -3264,38 +3156,37 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %esi
; X32-NEXT: addl %eax, %edx
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %edi
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: sbbl %ebx, %ebx
-; X32-NEXT: andl $1, %ebx
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %edi
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl (%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: adcl $0, %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: addl %edx, %ecx
-; X32-NEXT: movl (%esp), %edx # 4-byte Reload
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: addl %edx, %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
; X32-NEXT: adcl %esi, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
; X32-NEXT: adcl %edi, %esi
-; X32-NEXT: adcl %ebx, %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
@@ -3319,15 +3210,14 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %esi
; X32-NEXT: addl %eax, %edx
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: adcl $0, %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
@@ -3337,118 +3227,113 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: addl %edx, %ebx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl %esi, %edx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: adcl %esi, %ebx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: adcl %ecx, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl (%esp), %eax # 4-byte Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: movl %edx, %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: movl %ebx, %eax
; X32-NEXT: adcl $0, %eax
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl %esi, %ecx
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, %edx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl %ecx, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: adcl $0, %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl $0, %edx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl %edi, %edx
-; X32-NEXT: adcl %ebx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ebx, %ebx
-; X32-NEXT: andl $1, %ebx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: addl %ecx, %esi
+; X32-NEXT: adcl %edx, %edi
+; X32-NEXT: setb %cl
+; X32-NEXT: addl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movzbl %cl, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl $0, %edx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: addl %edx, %eax
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl %ebx, %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: addl %esi, %ebx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: adcl %edi, %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl %edx, %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %edi
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, %edx
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %esi, %eax
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: adcl %ebx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
@@ -3462,36 +3347,34 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, (%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload
; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %ebx
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl $0, %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
@@ -3500,18 +3383,17 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl %eax, %edi
-; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
+; X32-NEXT: addl %ecx, %edi
+; X32-NEXT: adcl %eax, %esi
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
@@ -3555,14 +3437,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %esi
; X32-NEXT: addl %eax, %edx
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: adcl $0, %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
@@ -3612,136 +3493,120 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %esi
; X32-NEXT: addl %eax, %edx
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ebx, %ebx
-; X32-NEXT: andl $1, %ebx
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
; X32-NEXT: adcl %edi, %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: adcl %ebx, %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl %edx, %eax
+; X32-NEXT: adcl %esi, %ecx
+; X32-NEXT: movl %edi, %edx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: addl %edx, %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl %ebx, %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %edx, %ecx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: movl %esi, %edx
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edi, %esi
-; X32-NEXT: adcl $0, %esi
+; X32-NEXT: adcl $0, %ebx
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl $0, %edx
; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: addl %eax, %esi
+; X32-NEXT: adcl %edx, %edi
+; X32-NEXT: setb %al
+; X32-NEXT: addl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl %edi, %edx
-; X32-NEXT: adcl %ebx, %ecx
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ebx, %ebx
-; X32-NEXT: andl $1, %ebx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: addl %esi, %ebx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: adcl %edi, %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: addl %edx, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: adcl %ecx, %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl %ebx, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: addl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: adcl %edx, %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -3760,16 +3625,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %esi
; X32-NEXT: addl %edi, %edx
; X32-NEXT: adcl %ebx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
@@ -3786,79 +3650,80 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %eax, %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl $0, %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl %edx, %eax
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movl $0, %edx
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: sbbl %esi, %esi
-; X32-NEXT: andl $1, %esi
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl $0, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: addl %eax, %edx
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: setb %al
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: addl %eax, %edx
-; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: addl %edx, %edi
+; X32-NEXT: adcl %esi, %ebx
+; X32-NEXT: movl %ecx, %edx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: adcl %esi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -3877,35 +3742,35 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: addl %edx, %eax
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movl $0, %edx
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: sbbl %esi, %esi
-; X32-NEXT: andl $1, %esi
+; X32-NEXT: setb %dl
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movzbl %dl, %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl %edi, %ebx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl $0, %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl %esi, %edi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl %ecx, %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -3925,44 +3790,43 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %esi
; X32-NEXT: addl %eax, %edx
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ebx, %ebx
-; X32-NEXT: andl $1, %ebx
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl %ecx, %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl %ecx, %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl %ebx, %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: addl %edx, %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl %esi, %edx
+; X32-NEXT: adcl %esi, %edi
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl %ebx, %esi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
@@ -3986,15 +3850,14 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %esi
; X32-NEXT: addl %eax, %edx
; X32-NEXT: adcl %ecx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ebx, %ebx
-; X32-NEXT: andl $1, %ebx
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: adcl $0, %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
@@ -4007,10 +3870,10 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: addl %edx, %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edi, %esi
-; X32-NEXT: adcl %ebx, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, %edx
+; X32-NEXT: adcl %ebx, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
@@ -4025,116 +3888,107 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl $0, %eax
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: adcl $0, %edx
; X32-NEXT: adcl $0, %esi
+; X32-NEXT: adcl $0, %edx
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl $0, %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl %edx, %eax
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movl $0, %edx
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: sbbl %esi, %esi
-; X32-NEXT: andl $1, %esi
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: addl %eax, %ebx
-; X32-NEXT: adcl %ecx, %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %edx, %ecx
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl $0, %edx
+; X32-NEXT: addl %eax, %esi
+; X32-NEXT: adcl %ecx, %edx
+; X32-NEXT: setb %al
+; X32-NEXT: addl {{[0-9]+}}(%esp), %esi
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: movzbl %al, %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: addl %esi, %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: adcl %edx, %edi
+; X32-NEXT: movl %ebx, %edx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl $0, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl %edi, %edx
-; X32-NEXT: adcl %ebx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
+; X32-NEXT: addl %eax, %edx
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
@@ -4156,10 +4010,10 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl $0, %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl $0, %edx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: adcl $0, %ecx
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
@@ -4168,45 +4022,46 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: adcl $0, %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: addl %ecx, %ebx
-; X32-NEXT: adcl %edx, %eax
-; X32-NEXT: movl $0, %ecx
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: sbbl %edi, %edi
-; X32-NEXT: andl $1, %edi
+; X32-NEXT: addl %edx, %ebx
+; X32-NEXT: adcl %ecx, %eax
+; X32-NEXT: setb %cl
; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl $0, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
; X32-NEXT: addl %ebx, %ecx
+; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
; X32-NEXT: adcl %eax, %ebx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: adcl %edi, %esi
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
+; X32-NEXT: adcl %edx, %edi
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
@@ -4216,15 +4071,16 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
@@ -4236,25 +4092,21 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
@@ -4264,6 +4116,11 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
@@ -4278,13 +4135,13 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
@@ -4292,10 +4149,6 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
@@ -4304,6 +4157,10 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
@@ -4312,67 +4169,66 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl 16(%ebp), %ebx
-; X32-NEXT: movl %ecx, 4(%ebx)
-; X32-NEXT: movl 16(%ebp), %ecx
-; X32-NEXT: movl %eax, (%ecx)
+; X32-NEXT: movl 16(%ebp), %edx
+; X32-NEXT: movl %ecx, 4(%edx)
+; X32-NEXT: movl %eax, (%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 8(%ecx)
+; X32-NEXT: movl %eax, 8(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 12(%ecx)
+; X32-NEXT: movl %eax, 12(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 16(%ecx)
+; X32-NEXT: movl %eax, 16(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 20(%ecx)
+; X32-NEXT: movl %eax, 20(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 24(%ecx)
+; X32-NEXT: movl %eax, 24(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 28(%ecx)
+; X32-NEXT: movl %eax, 28(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 32(%ecx)
+; X32-NEXT: movl %eax, 32(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 36(%ecx)
+; X32-NEXT: movl %eax, 36(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 40(%ecx)
+; X32-NEXT: movl %eax, 40(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 44(%ecx)
+; X32-NEXT: movl %eax, 44(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 48(%ecx)
+; X32-NEXT: movl %eax, 48(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 52(%ecx)
+; X32-NEXT: movl %eax, 52(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 56(%ecx)
+; X32-NEXT: movl %eax, 56(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 60(%ecx)
+; X32-NEXT: movl %eax, 60(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 64(%ecx)
+; X32-NEXT: movl %eax, 64(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 68(%ecx)
+; X32-NEXT: movl %eax, 68(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 72(%ecx)
+; X32-NEXT: movl %eax, 72(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 76(%ecx)
+; X32-NEXT: movl %eax, 76(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 80(%ecx)
+; X32-NEXT: movl %eax, 80(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 84(%ecx)
+; X32-NEXT: movl %eax, 84(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 88(%ecx)
+; X32-NEXT: movl %eax, 88(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 92(%ecx)
+; X32-NEXT: movl %eax, 92(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 96(%ecx)
-; X32-NEXT: movl %edx, 100(%ecx)
+; X32-NEXT: movl %eax, 96(%edx)
+; X32-NEXT: movl %esi, 100(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 104(%ecx)
-; X32-NEXT: movl %esi, 108(%ecx)
+; X32-NEXT: movl %eax, 104(%edx)
+; X32-NEXT: movl %edi, 108(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 112(%ecx)
-; X32-NEXT: movl %edi, 116(%ecx)
+; X32-NEXT: movl %eax, 112(%edx)
+; X32-NEXT: movl %ebx, 116(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 120(%ecx)
+; X32-NEXT: movl %eax, 120(%edx)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: movl %eax, 124(%ecx)
+; X32-NEXT: movl %eax, 124(%edx)
; X32-NEXT: leal -12(%ebp), %esp
; X32-NEXT: popl %esi
; X32-NEXT: popl %edi
@@ -4390,40 +4246,41 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: pushq %rbx
; X64-NEXT: subq $352, %rsp # imm = 0x160
; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq 48(%rdi), %r8
-; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq 40(%rdi), %rcx
-; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq 48(%rdi), %r9
+; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq 40(%rdi), %rbp
+; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq 32(%rdi), %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rdi, %r13
-; X64-NEXT: xorl %r9d, %r9d
-; X64-NEXT: mulq %r9
-; X64-NEXT: movq %rdx, %rbx
-; X64-NEXT: movq %rax, %r11
-; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: mulq %r9
+; X64-NEXT: movq %rdi, %r10
+; X64-NEXT: xorl %r8d, %r8d
+; X64-NEXT: mulq %r8
+; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, %rcx
-; X64-NEXT: addq %rbx, %rcx
+; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: mulq %r8
+; X64-NEXT: movq %rax, %rbx
+; X64-NEXT: addq %rdi, %rbx
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: addq %r11, %rcx
-; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %rbx, %rbp
-; X64-NEXT: movq %rbx, %rcx
-; X64-NEXT: sbbq %rbx, %rbx
-; X64-NEXT: andl $1, %ebx
+; X64-NEXT: addq %rcx, %rbx
+; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rcx, %r11
+; X64-NEXT: adcq %rdi, %rbp
+; X64-NEXT: setb %bl
+; X64-NEXT: movzbl %bl, %ebx
; X64-NEXT: addq %rax, %rbp
; X64-NEXT: adcq %rdx, %rbx
-; X64-NEXT: movq %r8, %rax
-; X64-NEXT: mulq %r9
+; X64-NEXT: movq %r9, %rax
+; X64-NEXT: mulq %r8
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %r11, %r12
+; X64-NEXT: movq %r11, %r8
; X64-NEXT: addq %rax, %r12
-; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %rcx, %r8
-; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: movq %rdi, %r9
+; X64-NEXT: movq %r9, (%rsp) # 8-byte Spill
; X64-NEXT: adcq %rdx, %rax
; X64-NEXT: addq %rbp, %r12
; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
@@ -4433,186 +4290,182 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: xorl %ebp, %ebp
; X64-NEXT: mulq %rbp
-; X64-NEXT: movq %rax, %r10
+; X64-NEXT: movq %rax, %rdi
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq 8(%rsi), %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %rbp
-; X64-NEXT: xorl %r9d, %r9d
+; X64-NEXT: xorl %r11d, %r11d
; X64-NEXT: movq %rax, %r15
; X64-NEXT: addq %rcx, %r15
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: addq %r10, %r15
+; X64-NEXT: addq %rdi, %r15
; X64-NEXT: adcq %rcx, %rbp
-; X64-NEXT: movq %rcx, %rdi
-; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: sbbq %rbx, %rbx
-; X64-NEXT: andl $1, %ebx
+; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: setb %bl
; X64-NEXT: addq %rax, %rbp
+; X64-NEXT: movzbl %bl, %ebx
; X64-NEXT: adcq %rdx, %rbx
; X64-NEXT: movq 16(%rsi), %rax
-; X64-NEXT: movq %rsi, %r14
-; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rsi, %r13
+; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: mulq %r9
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r10, %rcx
-; X64-NEXT: movq %rcx, %rsi
-; X64-NEXT: addq %rax, %rsi
-; X64-NEXT: movq %rdi, %r9
-; X64-NEXT: adcq %rdx, %r9
-; X64-NEXT: addq %rbp, %rsi
-; X64-NEXT: movq %rsi, %r10
-; X64-NEXT: adcq %rbx, %r9
-; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r11, %rax
-; X64-NEXT: addq %rcx, %rax
-; X64-NEXT: adcq %rdi, %r8
-; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r13, %rbp
-; X64-NEXT: movq (%rbp), %rax
+; X64-NEXT: movq %rdi, %r14
+; X64-NEXT: addq %rax, %r14
+; X64-NEXT: movq %rcx, %r11
+; X64-NEXT: adcq %rdx, %r11
+; X64-NEXT: addq %rbp, %r14
+; X64-NEXT: adcq %rbx, %r11
+; X64-NEXT: movq %r8, %rax
+; X64-NEXT: movq %r8, %rbp
+; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: addq %rdi, %rax
+; X64-NEXT: movq %r9, %rax
+; X64-NEXT: adcq %rcx, %rax
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq (%r10), %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: xorl %r8d, %r8d
; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %rcx, %rax
+; X64-NEXT: addq %rdi, %rax
+; X64-NEXT: movq %rdi, %r9
; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: adcq %rdi, %rax
+; X64-NEXT: adcq %rcx, %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq 32(%r14), %rax
+; X64-NEXT: movq 32(%r13), %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %r8
+; X64-NEXT: xorl %r8d, %r8d
; X64-NEXT: movq %rax, %r13
-; X64-NEXT: movq %rdx, (%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: movq %rbx, %r14
+; X64-NEXT: movq %rbx, %rcx
; X64-NEXT: addq %r13, %rax
; X64-NEXT: movq %rsi, %rax
; X64-NEXT: adcq %rdx, %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: addq %rcx, %r11
-; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rcx, %r8
-; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: addq %r9, %rax
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: adcq %r15, %rax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r10, %r12
-; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r10, %rcx
+; X64-NEXT: adcq %r14, %r12
+; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; X64-NEXT: adcq %r9, %rax
+; X64-NEXT: adcq %r11, %rax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r9, %r10
-; X64-NEXT: movq 8(%rbp), %rax
+; X64-NEXT: movq %r11, %rdi
+; X64-NEXT: movq 8(%r10), %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rbp, %rdi
-; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: xorl %edx, %edx
-; X64-NEXT: mulq %rdx
-; X64-NEXT: xorl %r9d, %r9d
-; X64-NEXT: movq %rax, %r12
-; X64-NEXT: addq %rsi, %r12
+; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: mulq %r8
+; X64-NEXT: movq %rax, %r11
+; X64-NEXT: addq %rsi, %r11
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: addq %r14, %r12
+; X64-NEXT: addq %rcx, %r11
; X64-NEXT: adcq %rsi, %rbp
; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: sbbq %rbx, %rbx
-; X64-NEXT: andl $1, %ebx
+; X64-NEXT: setb %bl
; X64-NEXT: addq %rax, %rbp
+; X64-NEXT: movzbl %bl, %ebx
; X64-NEXT: adcq %rdx, %rbx
-; X64-NEXT: movq 16(%rdi), %rax
+; X64-NEXT: movq 16(%r10), %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: mulq %r9
-; X64-NEXT: xorl %edi, %edi
+; X64-NEXT: mulq %r8
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rcx, %r8
+; X64-NEXT: addq %rax, %r8
+; X64-NEXT: movq %rsi, %r10
+; X64-NEXT: adcq %rdx, %r10
+; X64-NEXT: addq %rbp, %r8
+; X64-NEXT: movq %r8, %rax
+; X64-NEXT: adcq %rbx, %r10
+; X64-NEXT: movq %rcx, %rdx
+; X64-NEXT: movq %rcx, %r12
+; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: addq %r9, %rdx
; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r14, %r9
-; X64-NEXT: addq %rax, %r9
-; X64-NEXT: adcq %rdx, %rsi
-; X64-NEXT: addq %rbp, %r9
-; X64-NEXT: movq %r9, %rdx
-; X64-NEXT: adcq %rbx, %rsi
-; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: movq %r14, %rsi
-; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: addq %r8, %rsi
-; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r12, %r15
+; X64-NEXT: movq %r11, %r8
+; X64-NEXT: adcq %r8, %r15
; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %rdx, %rcx
-; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rdx, %r15
-; X64-NEXT: adcq %rax, %r10
-; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rax, %r10
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: adcq %rax, %r14
+; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, %rcx
+; X64-NEXT: adcq %r10, %rdi
+; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: movq 40(%rsi), %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: mulq %rdi
-; X64-NEXT: xorl %r8d, %r8d
-; X64-NEXT: movq %rax, %rcx
-; X64-NEXT: movq (%rsp), %rdi # 8-byte Reload
-; X64-NEXT: addq %rdi, %rcx
+; X64-NEXT: xorl %r14d, %r14d
+; X64-NEXT: mulq %r14
+; X64-NEXT: movq %rax, %rdi
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload
+; X64-NEXT: addq %r9, %rdi
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: addq %r13, %rcx
-; X64-NEXT: adcq %rdi, %rbp
-; X64-NEXT: sbbq %rbx, %rbx
-; X64-NEXT: andl $1, %ebx
+; X64-NEXT: addq %r13, %rdi
+; X64-NEXT: adcq %r9, %rbp
+; X64-NEXT: setb %bl
; X64-NEXT: addq %rax, %rbp
-; X64-NEXT: adcq %rdx, %rbx
+; X64-NEXT: movzbl %bl, %r11d
+; X64-NEXT: adcq %rdx, %r11
; X64-NEXT: movq 48(%rsi), %rax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: mulq %r8
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: mulq %r14
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r13, %r8
-; X64-NEXT: addq %rax, %r8
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: adcq %rdx, %rax
-; X64-NEXT: addq %rbp, %r8
-; X64-NEXT: adcq %rbx, %rax
-; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: addq %r13, %r14
-; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %rcx, %r12
+; X64-NEXT: movq %r13, %rbx
+; X64-NEXT: addq %rax, %rbx
+; X64-NEXT: movq %r9, %rsi
+; X64-NEXT: adcq %rdx, %rsi
+; X64-NEXT: addq %rbp, %rbx
+; X64-NEXT: adcq %r11, %rsi
+; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: addq %r13, %r12
; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r8, %r15
-; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %rax, %r10
+; X64-NEXT: adcq %rdi, %r8
+; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq %rbx, %rcx
+; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq %rsi, %r10
; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rax, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload
+; X64-NEXT: movq %rdx, %rax
; X64-NEXT: addq %r13, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; X64-NEXT: adcq %rdi, %rax
+; X64-NEXT: movq (%rsp), %rax # 8-byte Reload
+; X64-NEXT: adcq %r9, %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: movq %rdx, %rax
; X64-NEXT: addq %r13, %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
-; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
-; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
-; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
+; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
+; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
+; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
; X64-NEXT: mulq %rdi
-; X64-NEXT: movq %rax, %r8
+; X64-NEXT: movq %rax, %r9
; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: movq 56(%rax), %r11
; X64-NEXT: movq %r11, %rax
+; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %rdi
-; X64-NEXT: movq %rdi, %r9
+; X64-NEXT: movq %rdi, %r10
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rsi, %rbx
@@ -4621,15 +4474,15 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq %rax, %r10
-; X64-NEXT: addq %rbx, %r10
+; X64-NEXT: movq %rax, %r8
+; X64-NEXT: addq %rbx, %r8
; X64-NEXT: adcq %rbp, %rsi
-; X64-NEXT: sbbq %rcx, %rcx
-; X64-NEXT: andl $1, %ecx
+; X64-NEXT: setb %cl
; X64-NEXT: movq %r11, %rax
; X64-NEXT: mulq %rdi
-; X64-NEXT: movq %rdi, %r14
+; X64-NEXT: movq %rdi, %r11
; X64-NEXT: addq %rsi, %rax
+; X64-NEXT: movzbl %cl, %ecx
; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload
; X64-NEXT: addq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload
@@ -4637,299 +4490,297 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload
; X64-NEXT: addq %rax, %r15
; X64-NEXT: adcq %rdx, %r12
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movq %r10, %rbp
+; X64-NEXT: mulq %rbp
+; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq %r9, %rcx
-; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rdx, %r9
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: mulq %rcx
+; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %r9, %rbx
+; X64-NEXT: addq %rsi, %rbx
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: mulq %r14
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movq %rcx, %r10
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: addq %rbx, %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %rbp, %rcx
-; X64-NEXT: sbbq %rbp, %rbp
-; X64-NEXT: andl $1, %ebp
-; X64-NEXT: movq %rsi, %rbx
-; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: mulq %r14
+; X64-NEXT: setb %bl
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %r13
; X64-NEXT: movq %rax, %rsi
; X64-NEXT: addq %rcx, %rsi
-; X64-NEXT: adcq %rbp, %r13
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
+; X64-NEXT: movzbl %bl, %eax
+; X64-NEXT: adcq %rax, %r13
+; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload
-; X64-NEXT: addq %r8, %rsi
-; X64-NEXT: adcq %r10, %r13
+; X64-NEXT: addq %r9, %rsi
+; X64-NEXT: adcq %r8, %r13
; X64-NEXT: adcq $0, %r15
; X64-NEXT: adcq $0, %r12
-; X64-NEXT: movq %rdi, %rbp
-; X64-NEXT: movq %rbp, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload
-; X64-NEXT: mulq %r9
+; X64-NEXT: movq %r10, %rbx
+; X64-NEXT: movq %rbx, %rax
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r10
-; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: movq %rbx, %r14
-; X64-NEXT: mulq %r9
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: movq %rdi, %r9
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rdi
-; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %rcx, %rbx
+; X64-NEXT: movq %rax, %rbp
+; X64-NEXT: addq %rcx, %rbp
; X64-NEXT: adcq $0, %rdi
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: movq 24(%rax), %rcx
-; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: movq %rbx, %rax
; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rcx, %rbp
-; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rcx, %rbx
+; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r8
-; X64-NEXT: addq %rbx, %r8
+; X64-NEXT: addq %rbp, %r8
; X64-NEXT: adcq %rdi, %rcx
-; X64-NEXT: sbbq %rdi, %rdi
-; X64-NEXT: andl $1, %edi
-; X64-NEXT: movq %r14, %rax
-; X64-NEXT: mulq %rbp
+; X64-NEXT: setb %dil
+; X64-NEXT: movq %r9, %rax
+; X64-NEXT: mulq %rbx
; X64-NEXT: addq %rcx, %rax
-; X64-NEXT: adcq %rdi, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
+; X64-NEXT: movzbl %dil, %ecx
+; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload
-; X64-NEXT: adcq %r14, %rbp
-; X64-NEXT: addq %rax, %rbx
-; X64-NEXT: adcq %rdx, %rbp
+; X64-NEXT: addq %r14, %rbp
+; X64-NEXT: movq (%rsp), %rbx # 8-byte Reload
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload
+; X64-NEXT: adcq %r9, %rbx
+; X64-NEXT: addq %rax, %rbp
+; X64-NEXT: adcq %rdx, %rbx
; X64-NEXT: addq %rsi, %r10
; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %r13, %r8
; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq $0, %rbx
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: addq %r15, %rbx
-; X64-NEXT: adcq %r12, %rbp
-; X64-NEXT: movl $0, %r8d
-; X64-NEXT: adcq $0, %r8
-; X64-NEXT: sbbq %r10, %r10
-; X64-NEXT: andl $1, %r10d
+; X64-NEXT: adcq $0, %rbx
+; X64-NEXT: addq %r15, %rbp
+; X64-NEXT: adcq %r12, %rbx
+; X64-NEXT: setb %r15b
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %r9, %rsi
+; X64-NEXT: movq %r11, %rsi
; X64-NEXT: mulq %rsi
-; X64-NEXT: movq %rdx, %r9
-; X64-NEXT: movq %rax, %r15
-; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r11, %rax
+; X64-NEXT: movq %rdx, %r11
+; X64-NEXT: movq %rax, %r13
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
+; X64-NEXT: movq %r12, %rax
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rdi
-; X64-NEXT: addq %r9, %rdi
+; X64-NEXT: addq %r11, %rdi
; X64-NEXT: adcq $0, %rsi
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rdx, %r9
-; X64-NEXT: movq %rax, %r13
-; X64-NEXT: addq %rdi, %r13
-; X64-NEXT: adcq %rsi, %r9
-; X64-NEXT: sbbq %rsi, %rsi
-; X64-NEXT: andl $1, %esi
-; X64-NEXT: movq %r11, %rax
-; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rcx, %r12
-; X64-NEXT: addq %r9, %rax
-; X64-NEXT: adcq %rsi, %rdx
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
+; X64-NEXT: mulq %r8
+; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: movq %rax, %r11
+; X64-NEXT: addq %rdi, %r11
+; X64-NEXT: adcq %rsi, %rcx
+; X64-NEXT: setb %sil
+; X64-NEXT: movq %r12, %rax
+; X64-NEXT: mulq %r8
+; X64-NEXT: movq %r8, %r12
+; X64-NEXT: addq %rcx, %rax
+; X64-NEXT: movzbl %sil, %ecx
+; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: adcq %r14, %rsi
+; X64-NEXT: addq %r14, %rcx
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload
+; X64-NEXT: adcq %r9, %r14
; X64-NEXT: addq %rax, %rcx
-; X64-NEXT: adcq %rdx, %rsi
-; X64-NEXT: addq %rbx, %r15
-; X64-NEXT: adcq %rbp, %r13
-; X64-NEXT: adcq %r8, %rcx
-; X64-NEXT: adcq %r10, %rsi
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload
-; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload
+; X64-NEXT: adcq %rdx, %r14
+; X64-NEXT: addq %rbp, %r13
+; X64-NEXT: adcq %rbx, %r11
+; X64-NEXT: movzbl %r15b, %eax
+; X64-NEXT: adcq %rax, %rcx
+; X64-NEXT: adcq $0, %r14
+; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload
; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
-; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload
+; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
+; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
+; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: mulq %rsi
-; X64-NEXT: movq %rax, %r9
-; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; X64-NEXT: movq 24(%rax), %rbp
-; X64-NEXT: movq %rbp, %rax
-; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, %r14
+; X64-NEXT: movq %rdx, %rbx
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq 24(%rax), %rcx
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %rsi
-; X64-NEXT: movq %rsi, %r14
+; X64-NEXT: movq %rsi, %r11
; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %rcx, %rbx
+; X64-NEXT: movq %rax, %rbp
+; X64-NEXT: addq %rbx, %rbp
; X64-NEXT: adcq $0, %rsi
; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
-; X64-NEXT: mulq %rdi
-; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload
+; X64-NEXT: mulq %r9
+; X64-NEXT: movq %rdx, %rbx
; X64-NEXT: movq %rax, %r15
-; X64-NEXT: addq %rbx, %r15
-; X64-NEXT: adcq %rsi, %rcx
-; X64-NEXT: sbbq %rsi, %rsi
-; X64-NEXT: andl $1, %esi
-; X64-NEXT: movq %rbp, %rax
-; X64-NEXT: mulq %rdi
-; X64-NEXT: movq %rdi, %r11
-; X64-NEXT: addq %rcx, %rax
-; X64-NEXT: adcq %rsi, %rdx
+; X64-NEXT: addq %rbp, %r15
+; X64-NEXT: adcq %rsi, %rbx
+; X64-NEXT: setb %sil
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: mulq %r9
+; X64-NEXT: addq %rbx, %rax
+; X64-NEXT: movzbl %sil, %ecx
+; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload
; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload
; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload
; X64-NEXT: addq %rax, %r8
; X64-NEXT: adcq %rdx, %r10
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq %r14, %rsi
-; X64-NEXT: mulq %rsi
-; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movq %r11, %rbp
+; X64-NEXT: mulq %rbp
+; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload
-; X64-NEXT: movq %r14, %rax
-; X64-NEXT: mulq %rsi
-; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: mulq %rbp
+; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %rcx, %rbx
-; X64-NEXT: adcq $0, %rsi
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: mulq %r11
-; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: addq %rdi, %rbx
+; X64-NEXT: adcq $0, %rbp
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movq %rcx, %r11
+; X64-NEXT: mulq %r9
+; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: addq %rbx, %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %rsi, %rcx
-; X64-NEXT: sbbq %rbx, %rbx
-; X64-NEXT: andl $1, %ebx
-; X64-NEXT: movq %r14, %rax
-; X64-NEXT: mulq %r11
-; X64-NEXT: movq %rdx, %r11
-; X64-NEXT: movq %rax, %rsi
-; X64-NEXT: addq %rcx, %rsi
-; X64-NEXT: adcq %rbx, %r11
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload
-; X64-NEXT: addq %r9, %rsi
-; X64-NEXT: adcq %r15, %r11
+; X64-NEXT: adcq %rbp, %rdi
+; X64-NEXT: setb %cl
+; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: movq %rsi, %rbp
+; X64-NEXT: mulq %r9
+; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: movq %rax, %rbx
+; X64-NEXT: addq %rdi, %rbx
+; X64-NEXT: movzbl %cl, %eax
+; X64-NEXT: adcq %rax, %rsi
+; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
+; X64-NEXT: addq %r14, %rbx
+; X64-NEXT: adcq %r15, %rsi
; X64-NEXT: adcq $0, %r8
; X64-NEXT: adcq $0, %r10
-; X64-NEXT: movq %rdi, %rbx
-; X64-NEXT: movq %rbx, %rax
+; X64-NEXT: movq %r11, %rax
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r9
-; X64-NEXT: movq %r14, %rax
+; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: movq %rbp, %r14
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdi, %r15
; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, %rbp
; X64-NEXT: addq %rcx, %rbp
; X64-NEXT: adcq $0, %rdi
-; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: movq %r12, %r13
-; X64-NEXT: mulq %r13
+; X64-NEXT: movq %r11, %rax
+; X64-NEXT: mulq %r12
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: addq %rbp, %rax
-; X64-NEXT: movq %rax, %rbp
+; X64-NEXT: movq %rax, %r11
; X64-NEXT: adcq %rdi, %rcx
-; X64-NEXT: sbbq %rdi, %rdi
-; X64-NEXT: andl $1, %edi
+; X64-NEXT: setb %dil
; X64-NEXT: movq %r14, %rax
-; X64-NEXT: mulq %r13
+; X64-NEXT: mulq %r12
; X64-NEXT: addq %rcx, %rax
-; X64-NEXT: adcq %rdi, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload
+; X64-NEXT: movzbl %dil, %ecx
+; X64-NEXT: adcq %rcx, %rdx
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload
+; X64-NEXT: addq %r13, %rdi
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload
-; X64-NEXT: addq %r14, %rbx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
-; X64-NEXT: addq %rax, %rbx
-; X64-NEXT: adcq %rdx, %rcx
-; X64-NEXT: addq %rsi, %r9
+; X64-NEXT: adcq %r14, %rbp
+; X64-NEXT: addq %rax, %rdi
+; X64-NEXT: adcq %rdx, %rbp
+; X64-NEXT: addq %rbx, %r9
; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r11, %rbp
-; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq $0, %rbx
-; X64-NEXT: adcq $0, %rcx
-; X64-NEXT: addq %r8, %rbx
-; X64-NEXT: adcq %r10, %rcx
-; X64-NEXT: movl $0, %r12d
-; X64-NEXT: adcq $0, %r12
-; X64-NEXT: sbbq %r9, %r9
-; X64-NEXT: andl $1, %r9d
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
-; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: adcq %rsi, %r11
+; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq $0, %rdi
+; X64-NEXT: adcq $0, %rbp
+; X64-NEXT: addq %r8, %rdi
+; X64-NEXT: adcq %r10, %rbp
+; X64-NEXT: setb %r9b
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq %rcx, %rax
; X64-NEXT: mulq %r15
-; X64-NEXT: movq %rdx, %r8
+; X64-NEXT: movq %rdx, %r10
; X64-NEXT: movq %rax, %r11
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload
-; X64-NEXT: movq %r10, %rax
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
+; X64-NEXT: movq %r8, %rax
; X64-NEXT: mulq %r15
; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq %rax, %rdi
-; X64-NEXT: addq %r8, %rdi
+; X64-NEXT: movq %rax, %rbx
+; X64-NEXT: addq %r10, %rbx
; X64-NEXT: adcq $0, %rsi
-; X64-NEXT: movq %rbp, %rax
-; X64-NEXT: mulq %r13
-; X64-NEXT: movq %rdx, %r8
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: mulq %r12
+; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r15
-; X64-NEXT: addq %rdi, %r15
-; X64-NEXT: adcq %rsi, %r8
-; X64-NEXT: sbbq %rsi, %rsi
-; X64-NEXT: andl $1, %esi
-; X64-NEXT: movq %r10, %rax
-; X64-NEXT: movq %r10, %rbp
-; X64-NEXT: mulq %r13
-; X64-NEXT: addq %r8, %rax
-; X64-NEXT: adcq %rsi, %rdx
+; X64-NEXT: addq %rbx, %r15
+; X64-NEXT: adcq %rsi, %rcx
+; X64-NEXT: setb %bl
+; X64-NEXT: movq %r8, %rax
+; X64-NEXT: mulq %r12
+; X64-NEXT: addq %rcx, %rax
+; X64-NEXT: movzbl %bl, %ecx
+; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload
-; X64-NEXT: movq %r10, %rsi
-; X64-NEXT: addq %r14, %rsi
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
-; X64-NEXT: movq %r8, %rdi
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
-; X64-NEXT: addq %rax, %rsi
-; X64-NEXT: adcq %rdx, %rdi
-; X64-NEXT: addq %rbx, %r11
-; X64-NEXT: adcq %rcx, %r15
-; X64-NEXT: adcq %r12, %rsi
-; X64-NEXT: adcq %r9, %rdi
+; X64-NEXT: movq %r10, %rcx
+; X64-NEXT: addq %r13, %rcx
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
+; X64-NEXT: movq %rbx, %rsi
+; X64-NEXT: movq %rbx, %r12
+; X64-NEXT: adcq %r14, %rsi
+; X64-NEXT: addq %rax, %rcx
+; X64-NEXT: adcq %rdx, %rsi
+; X64-NEXT: addq %rdi, %r11
+; X64-NEXT: adcq %rbp, %r15
+; X64-NEXT: movzbl %r9b, %eax
+; X64-NEXT: adcq %rax, %rcx
+; X64-NEXT: adcq $0, %rsi
; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload
; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
; X64-NEXT: addq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload
; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload
; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
+; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
-; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill
; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill
; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill
+; X64-NEXT: adcq $0, {{[0-9]+}}(%rsp) # 8-byte Folded Spill
; X64-NEXT: adcq $0, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, %rax
@@ -4937,9 +4788,10 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %r14
+; X64-NEXT: movq %r8, %rbp
; X64-NEXT: movq %rbp, %rax
; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rcx, %r13
+; X64-NEXT: movq %rcx, %r11
; X64-NEXT: movq %rdx, %rbx
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: addq %rsi, %rcx
@@ -4948,291 +4800,279 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq %rax, %r11
-; X64-NEXT: addq %rcx, %r11
+; X64-NEXT: movq %rax, %r8
+; X64-NEXT: addq %rcx, %r8
; X64-NEXT: adcq %rbx, %rsi
-; X64-NEXT: sbbq %rcx, %rcx
-; X64-NEXT: andl $1, %ecx
+; X64-NEXT: setb %cl
; X64-NEXT: movq %rbp, %rax
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdi, %r15
; X64-NEXT: addq %rsi, %rax
+; X64-NEXT: movzbl %cl, %ecx
; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: movq %r10, %r9
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload
-; X64-NEXT: movq %r8, %r12
-; X64-NEXT: adcq (%rsp), %r12 # 8-byte Folded Reload
+; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload
+; X64-NEXT: movq %r12, %r10
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload
; X64-NEXT: addq %rax, %r9
-; X64-NEXT: adcq %rdx, %r12
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %r13, %rbp
-; X64-NEXT: mulq %rbp
-; X64-NEXT: movq %rdx, %r8
+; X64-NEXT: adcq %rdx, %r10
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: mulq %r11
+; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
-; X64-NEXT: movq %r13, %rax
-; X64-NEXT: mulq %rbp
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %r8, %rbx
+; X64-NEXT: addq %rcx, %rbx
; X64-NEXT: adcq $0, %rsi
-; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %rcx, %rbp
+; X64-NEXT: movq %rbp, %rax
; X64-NEXT: mulq %r15
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: addq %rbx, %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %rsi, %rcx
-; X64-NEXT: sbbq %rsi, %rsi
-; X64-NEXT: andl $1, %esi
-; X64-NEXT: movq %r13, %rax
+; X64-NEXT: setb %sil
+; X64-NEXT: movq %rdi, %rax
; X64-NEXT: mulq %r15
-; X64-NEXT: movq %rdx, %r8
+; X64-NEXT: movq %rdx, %r15
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rcx, %rbx
-; X64-NEXT: adcq %rsi, %r8
+; X64-NEXT: movzbl %sil, %eax
+; X64-NEXT: adcq %rax, %r15
; X64-NEXT: addq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload
; X64-NEXT: addq %r14, %rbx
-; X64-NEXT: adcq %r11, %r8
+; X64-NEXT: adcq %r8, %r15
; X64-NEXT: adcq $0, %r9
-; X64-NEXT: adcq $0, %r12
+; X64-NEXT: adcq $0, %r10
; X64-NEXT: movq %rbp, %rsi
; X64-NEXT: movq %rsi, %rax
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %r14
-; X64-NEXT: movq %rax, %r10
-; X64-NEXT: movq %r13, %rax
+; X64-NEXT: movq %rax, %r12
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: movq %rdi, %r8
; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rdx, %rdi
+; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: addq %r14, %rcx
-; X64-NEXT: adcq $0, %rdi
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; X64-NEXT: movq 56(%rax), %r15
+; X64-NEXT: adcq $0, %rbp
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: movq 56(%rax), %rdi
; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: mulq %r15
+; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: addq %rcx, %rax
-; X64-NEXT: movq %rax, %r11
-; X64-NEXT: adcq %rdi, %rsi
-; X64-NEXT: sbbq %rcx, %rcx
-; X64-NEXT: andl $1, %ecx
-; X64-NEXT: movq %r13, %rax
-; X64-NEXT: mulq %r15
+; X64-NEXT: movq %rax, %r14
+; X64-NEXT: addq %rcx, %r14
+; X64-NEXT: adcq %rbp, %rsi
+; X64-NEXT: setb %cl
+; X64-NEXT: movq %r8, %rax
+; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %rdi, %r8
; X64-NEXT: addq %rsi, %rax
+; X64-NEXT: movzbl %cl, %ecx
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
-; X64-NEXT: addq %r13, %rdi
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
+; X64-NEXT: addq %r11, %rcx
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
-; X64-NEXT: adcq %rbp, %rsi
-; X64-NEXT: addq %rax, %rdi
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
+; X64-NEXT: adcq %r13, %rsi
+; X64-NEXT: addq %rax, %rcx
; X64-NEXT: adcq %rdx, %rsi
-; X64-NEXT: addq %rbx, %r10
-; X64-NEXT: movq %r10, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r8, %r11
-; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq $0, %rdi
+; X64-NEXT: addq %rbx, %r12
+; X64-NEXT: adcq %r15, %r14
+; X64-NEXT: adcq $0, %rcx
; X64-NEXT: adcq $0, %rsi
-; X64-NEXT: addq %r9, %rdi
-; X64-NEXT: adcq %r12, %rsi
-; X64-NEXT: movl $0, %r14d
-; X64-NEXT: adcq $0, %r14
-; X64-NEXT: sbbq %r10, %r10
-; X64-NEXT: andl $1, %r10d
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
-; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rdx, %r8
-; X64-NEXT: movq %rax, %r12
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload
-; X64-NEXT: movq %r9, %rax
-; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rdx, %r11
-; X64-NEXT: movq %rax, %rcx
-; X64-NEXT: addq %r8, %rcx
-; X64-NEXT: adcq $0, %r11
-; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: mulq %r15
-; X64-NEXT: movq %rdx, %r8
+; X64-NEXT: addq %r9, %rcx
+; X64-NEXT: adcq %r10, %rsi
+; X64-NEXT: setb {{[0-9]+}}(%rsp) # 1-byte Folded Spill
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %rdx, %r9
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload
+; X64-NEXT: movq %r10, %rax
+; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %rdx, %r15
; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %rcx, %rbx
-; X64-NEXT: adcq %r11, %r8
-; X64-NEXT: sbbq %rcx, %rcx
-; X64-NEXT: andl $1, %ecx
-; X64-NEXT: movq %r9, %rax
-; X64-NEXT: mulq %r15
-; X64-NEXT: addq %r8, %rax
-; X64-NEXT: adcq %rcx, %rdx
+; X64-NEXT: addq %r9, %rbx
+; X64-NEXT: adcq $0, %r15
+; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: movq %r8, %rdi
+; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %rdx, %r9
+; X64-NEXT: movq %rax, %r8
+; X64-NEXT: addq %rbx, %r8
+; X64-NEXT: adcq %r15, %r9
+; X64-NEXT: setb %bl
+; X64-NEXT: movq %r10, %rax
+; X64-NEXT: mulq %rdi
+; X64-NEXT: addq %r9, %rax
+; X64-NEXT: movzbl %bl, %edi
+; X64-NEXT: adcq %rdi, %rdx
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload
-; X64-NEXT: addq %r13, %r15
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
-; X64-NEXT: adcq %rbp, %r11
+; X64-NEXT: addq %r11, %r15
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: adcq %r13, %rbp
; X64-NEXT: addq %rax, %r15
-; X64-NEXT: adcq %rdx, %r11
-; X64-NEXT: addq %rdi, %r12
-; X64-NEXT: adcq %rsi, %rbx
-; X64-NEXT: adcq %r14, %r15
-; X64-NEXT: adcq %r10, %r11
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
+; X64-NEXT: adcq %rdx, %rbp
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload
+; X64-NEXT: addq %rcx, %rdx
+; X64-NEXT: adcq %rsi, %r8
+; X64-NEXT: movzbl {{[0-9]+}}(%rsp), %eax # 1-byte Folded Reload
+; X64-NEXT: adcq %rax, %r15
+; X64-NEXT: adcq $0, %rbp
+; X64-NEXT: addq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: addq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: adcq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: adcq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: adcq %rcx, {{[0-9]+}}(%rsp) # 8-byte Folded Spill
-; X64-NEXT: adcq $0, %r12
-; X64-NEXT: adcq $0, %rbx
-; X64-NEXT: adcq $0, %r15
-; X64-NEXT: adcq $0, %r11
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: addq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: adcq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload
; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
-; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload
-; X64-NEXT: movl $0, %eax
-; X64-NEXT: adcq $0, %rax
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movl $0, %eax
-; X64-NEXT: adcq $0, %rax
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movl $0, %eax
-; X64-NEXT: adcq $0, %rax
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: sbbq %rax, %rax
-; X64-NEXT: andl $1, %eax
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
+; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq $0, %rdx
+; X64-NEXT: adcq $0, %r8
+; X64-NEXT: adcq $0, %r15
+; X64-NEXT: adcq $0, %rbp
+; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
+; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
+; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r15 # 8-byte Folded Reload
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload
+; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
-; X64-NEXT: mulq %rdi
-; X64-NEXT: movq %rdx, %r8
-; X64-NEXT: movq %rax, %r13
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: mulq %rdi
-; X64-NEXT: movq %rdi, %r14
-; X64-NEXT: movq %rdx, %rbx
-; X64-NEXT: movq %rax, %rbp
-; X64-NEXT: addq %r8, %rbp
-; X64-NEXT: adcq $0, %rbx
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: mulq %rsi
+; X64-NEXT: movq %rdx, %r11
+; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload
+; X64-NEXT: movq %r9, %rax
+; X64-NEXT: mulq %rsi
+; X64-NEXT: movq %rsi, %r10
+; X64-NEXT: movq %rdx, %rdi
+; X64-NEXT: movq %rax, %rbx
+; X64-NEXT: addq %r11, %rbx
+; X64-NEXT: adcq $0, %rdi
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload
-; X64-NEXT: mulq %r8
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r12
-; X64-NEXT: addq %rbp, %r12
-; X64-NEXT: adcq %rbx, %rcx
-; X64-NEXT: sbbq %rbp, %rbp
-; X64-NEXT: andl $1, %ebp
-; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: mulq %r8
+; X64-NEXT: addq %rbx, %r12
+; X64-NEXT: adcq %rdi, %rcx
+; X64-NEXT: setb %bl
+; X64-NEXT: movq %r9, %rax
+; X64-NEXT: mulq %rsi
+; X64-NEXT: movq %rsi, %r9
; X64-NEXT: addq %rcx, %rax
-; X64-NEXT: adcq %rbp, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload
-; X64-NEXT: movq (%rsp), %r10 # 8-byte Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload
-; X64-NEXT: addq %rax, %r9
-; X64-NEXT: adcq %rdx, %r10
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq %r14, %rbx
-; X64-NEXT: mulq %rbx
-; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: movq %rax, (%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: mulq %rbx
-; X64-NEXT: movq %rdx, %rbx
-; X64-NEXT: movq %rax, %rbp
-; X64-NEXT: addq %rcx, %rbp
-; X64-NEXT: adcq $0, %rbx
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq %rdi, %r14
-; X64-NEXT: mulq %r8
-; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: addq %rbp, %rax
+; X64-NEXT: movzbl %bl, %ecx
+; X64-NEXT: adcq %rcx, %rdx
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
+; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
+; X64-NEXT: addq %rax, %r8
+; X64-NEXT: adcq %rdx, %rcx
+; X64-NEXT: movq %rcx, %r14
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movq %r10, %rdi
+; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %rbx, %rcx
-; X64-NEXT: sbbq %rdi, %rdi
-; X64-NEXT: andl $1, %edi
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: mulq %r8
-; X64-NEXT: movq %rdx, %rbx
-; X64-NEXT: movq %rax, %rbp
-; X64-NEXT: addq %rcx, %rbp
-; X64-NEXT: adcq %rdi, %rbx
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
-; X64-NEXT: addq %r13, %rbp
-; X64-NEXT: adcq %r12, %rbx
-; X64-NEXT: adcq $0, %r9
-; X64-NEXT: movq %r9, %r12
-; X64-NEXT: adcq $0, %r10
-; X64-NEXT: movq %r10, %r8
-; X64-NEXT: movq %r14, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload
; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %rdx, %rdi
+; X64-NEXT: movq %rax, %rbx
+; X64-NEXT: addq %r11, %rbx
+; X64-NEXT: adcq $0, %rdi
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movq %rcx, %r13
+; X64-NEXT: mulq %r9
; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: movq %rax, %r9
+; X64-NEXT: addq %rbx, %rax
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq %rdi, %rcx
+; X64-NEXT: setb %bl
; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: movq %rsi, %r10
-; X64-NEXT: mulq %rdi
-; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: mulq %r9
+; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, %rdi
; X64-NEXT: addq %rcx, %rdi
-; X64-NEXT: adcq $0, %rsi
-; X64-NEXT: movq %r14, %rax
+; X64-NEXT: movzbl %bl, %eax
+; X64-NEXT: adcq %rax, %r11
+; X64-NEXT: addq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload
+; X64-NEXT: addq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
+; X64-NEXT: adcq %r12, %r11
+; X64-NEXT: adcq $0, %r8
+; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq $0, %r14
+; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r13, %rbx
+; X64-NEXT: movq %rbx, %rax
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rdx, %r14
-; X64-NEXT: addq %rdi, %rax
-; X64-NEXT: movq %rax, %rdi
-; X64-NEXT: adcq %rsi, %r14
-; X64-NEXT: sbbq %rsi, %rsi
-; X64-NEXT: andl $1, %esi
-; X64-NEXT: movq %r10, %rax
+; X64-NEXT: movq %rdx, %r8
+; X64-NEXT: movq %rax, %r12
+; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: movq %rsi, %r9
; X64-NEXT: mulq %rcx
-; X64-NEXT: addq %r14, %rax
-; X64-NEXT: adcq %rsi, %rdx
+; X64-NEXT: movq %rcx, %r10
+; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: movq %rax, %rcx
+; X64-NEXT: addq %r8, %rcx
+; X64-NEXT: adcq $0, %rsi
+; X64-NEXT: movq %rbx, %rax
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
+; X64-NEXT: mulq %r13
+; X64-NEXT: movq %rdx, %rbx
+; X64-NEXT: addq %rcx, %rax
+; X64-NEXT: movq %rax, %r8
+; X64-NEXT: adcq %rsi, %rbx
+; X64-NEXT: setb %cl
+; X64-NEXT: movq %r9, %rax
+; X64-NEXT: mulq %r13
+; X64-NEXT: movq %r13, %r9
+; X64-NEXT: addq %rbx, %rax
+; X64-NEXT: movzbl %cl, %ecx
+; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload
+; X64-NEXT: addq %r13, %rsi
+; X64-NEXT: movq (%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload
-; X64-NEXT: addq %r14, %rsi
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
-; X64-NEXT: adcq %r13, %rcx
+; X64-NEXT: adcq %r14, %rcx
; X64-NEXT: addq %rax, %rsi
; X64-NEXT: adcq %rdx, %rcx
-; X64-NEXT: addq %rbp, %r9
-; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %rbx, %rdi
-; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: addq %rdi, %r12
+; X64-NEXT: adcq %r11, %r8
+; X64-NEXT: movq %r8, %r11
; X64-NEXT: adcq $0, %rsi
; X64-NEXT: adcq $0, %rcx
-; X64-NEXT: addq %r12, %rsi
+; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r8, %rcx
-; X64-NEXT: movq %rcx, %r12
-; X64-NEXT: movl $0, %r10d
-; X64-NEXT: adcq $0, %r10
-; X64-NEXT: sbbq %r9, %r9
-; X64-NEXT: andl $1, %r9d
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
+; X64-NEXT: movq %rcx, (%rsp) # 8-byte Spill
+; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq %r10, %rsi
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
@@ -5244,82 +5084,85 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rcx, %rdi
; X64-NEXT: adcq $0, %rsi
; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
-; X64-NEXT: mulq %rbp
+; X64-NEXT: mulq %r9
; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %rdi, %rbx
+; X64-NEXT: movq %rax, %r10
+; X64-NEXT: addq %rdi, %r10
; X64-NEXT: adcq %rsi, %rcx
-; X64-NEXT: sbbq %rsi, %rsi
-; X64-NEXT: andl $1, %esi
+; X64-NEXT: setb %bl
; X64-NEXT: movq %r8, %rax
-; X64-NEXT: mulq %rbp
+; X64-NEXT: mulq %r9
; X64-NEXT: addq %rcx, %rax
-; X64-NEXT: adcq %rsi, %rdx
+; X64-NEXT: movzbl %bl, %ecx
+; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: addq %r14, %rsi
+; X64-NEXT: addq %r13, %rsi
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: adcq %r13, %rcx
+; X64-NEXT: adcq %r14, %rcx
; X64-NEXT: addq %rax, %rsi
; X64-NEXT: adcq %rdx, %rcx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload
-; X64-NEXT: adcq %r12, %rbx
-; X64-NEXT: adcq %r10, %rsi
-; X64-NEXT: adcq %r9, %rcx
-; X64-NEXT: addq {{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload
+; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
+; X64-NEXT: adcq (%rsp), %r10 # 8-byte Folded Reload
+; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax # 1-byte Folded Reload
+; X64-NEXT: adcq %rax, %rsi
+; X64-NEXT: adcq $0, %rcx
+; X64-NEXT: addq {{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload
; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; X64-NEXT: addq %rax, (%rsp) # 8-byte Folded Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; X64-NEXT: adcq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill
-; X64-NEXT: adcq %r15, {{[0-9]+}}(%rsp) # 8-byte Folded Spill
-; X64-NEXT: adcq %r11, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r13 # 8-byte Folded Reload
-; X64-NEXT: movq %r13, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
-; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: addq %rax, {{[0-9]+}}(%rsp) # 8-byte Folded Spill
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: adcq %rax, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill
+; X64-NEXT: adcq %r15, %r12
+; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq %rbp, %r11
+; X64-NEXT: movq %r11, (%rsp) # 8-byte Spill
+; X64-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax # 1-byte Folded Reload
+; X64-NEXT: adcq %rax, %r14
+; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq $0, %r10
+; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq $0, %rsi
; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
+; X64-NEXT: adcq $0, %rcx
; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload
-; X64-NEXT: movq 64(%r9), %r11
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq 64(%rcx), %r11
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: mulq %r11
-; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload
-; X64-NEXT: movq %r10, %rax
+; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: movq %rax, %r13
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload
+; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %rcx, %rbx
+; X64-NEXT: addq %rsi, %rbx
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: movq 72(%r9), %rcx
+; X64-NEXT: movq 72(%rcx), %rsi
; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rcx, %rsi
-; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: mulq %rsi
+; X64-NEXT: movq %rsi, %rcx
+; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %r8
; X64-NEXT: addq %rbx, %r8
-; X64-NEXT: adcq %rbp, %rcx
-; X64-NEXT: sbbq %rbp, %rbp
-; X64-NEXT: andl $1, %ebp
-; X64-NEXT: movq %r10, %rax
-; X64-NEXT: mulq %rsi
-; X64-NEXT: movq %rsi, %r10
+; X64-NEXT: adcq %rbp, %rsi
+; X64-NEXT: setb %bl
+; X64-NEXT: movq %r9, %rax
+; X64-NEXT: mulq %rcx
+; X64-NEXT: movq %rcx, %r10
; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %rdi
-; X64-NEXT: addq %rcx, %rdi
-; X64-NEXT: adcq %rbp, %rsi
+; X64-NEXT: addq %rsi, %rdi
+; X64-NEXT: movzbl %bl, %eax
+; X64-NEXT: adcq %rax, %rcx
; X64-NEXT: movq %r11, %rax
-; X64-NEXT: xorl %ecx, %ecx
-; X64-NEXT: mulq %rcx
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: mulq %rdx
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: movq %rdx, %r14
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
@@ -5327,7 +5170,7 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload
; X64-NEXT: adcq %r14, %r15
; X64-NEXT: addq %rdi, %r12
-; X64-NEXT: adcq %rsi, %r15
+; X64-NEXT: adcq %rcx, %r15
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: movq %r11, %rsi
@@ -5335,8 +5178,8 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
-; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload
+; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rdi
@@ -5349,165 +5192,159 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: addq %rdi, %rax
; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %rsi, %rcx
-; X64-NEXT: sbbq %rsi, %rsi
-; X64-NEXT: andl $1, %esi
-; X64-NEXT: movq %rbp, %rax
-; X64-NEXT: movq %rbp, %rdi
+; X64-NEXT: setb %sil
+; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %r10
; X64-NEXT: addq %rcx, %rax
-; X64-NEXT: adcq %rsi, %rdx
+; X64-NEXT: movzbl %sil, %ecx
+; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: addq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
-; X64-NEXT: adcq %r13, %r14
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
; X64-NEXT: addq %rax, %rbx
; X64-NEXT: adcq %rdx, %r14
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
+; X64-NEXT: addq %r13, %rbx
; X64-NEXT: adcq %r8, %r14
; X64-NEXT: adcq $0, %r12
; X64-NEXT: adcq $0, %r15
-; X64-NEXT: movq %r9, %rbp
-; X64-NEXT: movq 80(%rbp), %r8
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: movq 80(%rbp), %rdi
; X64-NEXT: movq %r11, %rax
-; X64-NEXT: movq %r11, %r9
-; X64-NEXT: mulq %r8
-; X64-NEXT: movq %rdx, %r10
-; X64-NEXT: movq %rax, %r11
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: mulq %r8
+; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %rdx, %r8
+; X64-NEXT: movq %rax, %r13
+; X64-NEXT: movq %r9, %rax
+; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rcx
-; X64-NEXT: addq %r10, %rcx
+; X64-NEXT: addq %r8, %rcx
; X64-NEXT: adcq $0, %rsi
; X64-NEXT: movq 88(%rbp), %r10
-; X64-NEXT: movq %r9, %rax
+; X64-NEXT: movq %r11, %rax
; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %rbp
-; X64-NEXT: movq %rax, %r9
-; X64-NEXT: addq %rcx, %r9
+; X64-NEXT: movq %rax, %r8
+; X64-NEXT: addq %rcx, %r8
; X64-NEXT: adcq %rsi, %rbp
-; X64-NEXT: sbbq %rsi, %rsi
-; X64-NEXT: andl $1, %esi
-; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: setb %r11b
+; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %r10
-; X64-NEXT: movq %rdx, %rdi
-; X64-NEXT: movq %rax, %rcx
-; X64-NEXT: addq %rbp, %rcx
-; X64-NEXT: adcq %rsi, %rdi
-; X64-NEXT: movq %r8, %rax
+; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: movq %rax, %rsi
+; X64-NEXT: addq %rbp, %rsi
+; X64-NEXT: movzbl %r11b, %eax
+; X64-NEXT: adcq %rax, %rcx
+; X64-NEXT: movq %rdi, %rax
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: mulq %rdx
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: addq %rax, %rsi
-; X64-NEXT: movq %r13, %rax
+; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, %r9
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: addq %r9, %rbp
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: adcq %rdx, %rax
-; X64-NEXT: movq %rdx, %r13
-; X64-NEXT: addq %rcx, %rsi
-; X64-NEXT: adcq %rdi, %rax
-; X64-NEXT: addq %rbx, %r11
-; X64-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r14, %r9
-; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq $0, %rsi
+; X64-NEXT: addq %rsi, %rbp
+; X64-NEXT: adcq %rcx, %rax
+; X64-NEXT: addq %rbx, %r13
+; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq %r14, %r8
+; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq $0, %rbp
; X64-NEXT: adcq $0, %rax
-; X64-NEXT: addq %r12, %rsi
-; X64-NEXT: movq %rsi, %r14
+; X64-NEXT: addq %r12, %rbp
+; X64-NEXT: movq %rbp, %r8
; X64-NEXT: adcq %r15, %rax
-; X64-NEXT: movq %rax, %r9
-; X64-NEXT: movl $0, %r12d
-; X64-NEXT: adcq $0, %r12
-; X64-NEXT: sbbq %r11, %r11
-; X64-NEXT: andl $1, %r11d
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: mulq %r8
-; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: movq %rax, %r15
+; X64-NEXT: movq %rax, %r11
+; X64-NEXT: setb %r14b
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %rdx, %r15
+; X64-NEXT: movq %rax, %r12
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
; X64-NEXT: movq %rbp, %rax
-; X64-NEXT: mulq %r8
+; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %rcx, %rbx
+; X64-NEXT: addq %r15, %rbx
; X64-NEXT: adcq $0, %rsi
-; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: movq %rcx, %rax
; X64-NEXT: mulq %r10
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: addq %rbx, %rax
-; X64-NEXT: movq %rax, %rdi
+; X64-NEXT: movq %rax, %rbx
; X64-NEXT: adcq %rsi, %rcx
-; X64-NEXT: sbbq %rsi, %rsi
-; X64-NEXT: andl $1, %esi
+; X64-NEXT: setb %sil
; X64-NEXT: movq %rbp, %rax
; X64-NEXT: mulq %r10
; X64-NEXT: addq %rcx, %rax
-; X64-NEXT: adcq %rsi, %rdx
+; X64-NEXT: movzbl %sil, %ecx
+; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
+; X64-NEXT: addq %r9, %rsi
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: adcq %r13, %rcx
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
; X64-NEXT: addq %rax, %rsi
; X64-NEXT: adcq %rdx, %rcx
-; X64-NEXT: addq %r14, %r15
-; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r9, %rdi
-; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r12, %rsi
+; X64-NEXT: addq %r8, %r12
+; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq %r11, %rbx
+; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movzbl %r14b, %eax
+; X64-NEXT: adcq %rax, %rsi
; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r11, %rcx
+; X64-NEXT: adcq $0, %rcx
; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: imulq %rax, %r10
-; X64-NEXT: movq %rax, %rdi
-; X64-NEXT: mulq %r8
-; X64-NEXT: movq %rax, %r9
+; X64-NEXT: movq %rax, %r14
+; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %rax, %r8
; X64-NEXT: addq %r10, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload
-; X64-NEXT: imulq %r10, %r8
-; X64-NEXT: addq %rdx, %r8
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: imulq %rbp, %rdi
+; X64-NEXT: addq %rdx, %rdi
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: movq %rax, %rsi
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload
-; X64-NEXT: imulq %rbx, %rsi
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
-; X64-NEXT: mulq %rbp
-; X64-NEXT: movq %rax, %r11
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
+; X64-NEXT: imulq %r11, %rsi
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: mulq %rcx
+; X64-NEXT: movq %rax, %r9
; X64-NEXT: addq %rsi, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
-; X64-NEXT: imulq %rbp, %rax
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: imulq %rcx, %rax
; X64-NEXT: addq %rdx, %rax
-; X64-NEXT: addq %r9, %r11
-; X64-NEXT: adcq %r8, %rax
-; X64-NEXT: movq %rax, %r14
-; X64-NEXT: movq %rbp, %rax
-; X64-NEXT: mulq %rdi
+; X64-NEXT: addq %r8, %r9
+; X64-NEXT: adcq %rdi, %rax
+; X64-NEXT: movq %rax, %r8
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movq %rcx, %rdi
+; X64-NEXT: mulq %r14
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: movq %rbx, %r8
-; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %r11, %rax
+; X64-NEXT: mulq %r14
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: addq %rcx, %rbx
; X64-NEXT: adcq $0, %rsi
-; X64-NEXT: movq %rbp, %rax
-; X64-NEXT: movq %r10, %rbp
+; X64-NEXT: movq %rdi, %rax
; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, %r15
; X64-NEXT: addq %rbx, %r15
; X64-NEXT: adcq %rsi, %rdi
-; X64-NEXT: sbbq %rcx, %rcx
-; X64-NEXT: andl $1, %ecx
-; X64-NEXT: movq %r8, %rax
+; X64-NEXT: setb %cl
+; X64-NEXT: movq %r11, %rax
; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rdx, %r12
-; X64-NEXT: movq %rax, %r9
-; X64-NEXT: addq %rdi, %r9
-; X64-NEXT: adcq %rcx, %r12
-; X64-NEXT: addq %r11, %r9
-; X64-NEXT: adcq %r14, %r12
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdx # 8-byte Reload
+; X64-NEXT: movq %rax, %r13
+; X64-NEXT: addq %rdi, %r13
+; X64-NEXT: movzbl %cl, %eax
+; X64-NEXT: adcq %rax, %r12
+; X64-NEXT: addq %r9, %r13
+; X64-NEXT: adcq %r8, %r12
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload
; X64-NEXT: movq 120(%rdx), %rcx
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload
; X64-NEXT: imulq %r10, %rcx
@@ -5526,12 +5363,12 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: imulq %rbx, %rcx
; X64-NEXT: mulq %rdi
-; X64-NEXT: movq %rax, %r13
+; X64-NEXT: movq %rax, %r9
; X64-NEXT: addq %rcx, %rdx
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: imulq %rdi, %rax
; X64-NEXT: addq %rdx, %rax
-; X64-NEXT: addq %r11, %r13
+; X64-NEXT: addq %r11, %r9
; X64-NEXT: adcq %rsi, %rax
; X64-NEXT: movq %rax, %r11
; X64-NEXT: movq %rdi, %rax
@@ -5540,371 +5377,367 @@ define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
; X64-NEXT: movq %rax, %r14
; X64-NEXT: movq %rbx, %rax
; X64-NEXT: mulq %r10
-; X64-NEXT: movq %rdx, %rbp
-; X64-NEXT: movq %rax, %rsi
-; X64-NEXT: addq %rcx, %rsi
-; X64-NEXT: adcq $0, %rbp
+; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: movq %rax, %rbp
+; X64-NEXT: addq %rcx, %rbp
+; X64-NEXT: adcq $0, %rsi
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %rdi
-; X64-NEXT: addq %rsi, %rdi
-; X64-NEXT: adcq %rbp, %rcx
-; X64-NEXT: sbbq %rsi, %rsi
-; X64-NEXT: andl $1, %esi
+; X64-NEXT: addq %rbp, %rdi
+; X64-NEXT: adcq %rsi, %rcx
+; X64-NEXT: setb %sil
; X64-NEXT: movq %rbx, %rax
; X64-NEXT: mulq %r8
; X64-NEXT: addq %rcx, %rax
-; X64-NEXT: adcq %rsi, %rdx
-; X64-NEXT: addq %r13, %rax
+; X64-NEXT: movzbl %sil, %ecx
+; X64-NEXT: adcq %rcx, %rdx
+; X64-NEXT: addq %r9, %rax
; X64-NEXT: adcq %r11, %rdx
; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
; X64-NEXT: adcq %r15, %rdi
-; X64-NEXT: adcq %r9, %rax
+; X64-NEXT: adcq %r13, %rax
; X64-NEXT: adcq %r12, %rdx
; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
-; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq 80(%rsi), %rcx
-; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload
-; X64-NEXT: mulq %r8
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq 80(%rsi), %rdi
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rdx, %r9
-; X64-NEXT: movq 88(%rsi), %r10
-; X64-NEXT: movq %rsi, %r12
-; X64-NEXT: movq %r10, %rax
-; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: mulq %r8
+; X64-NEXT: movq %rdx, %r8
+; X64-NEXT: movq 88(%rsi), %rax
+; X64-NEXT: movq %rsi, %r9
+; X64-NEXT: movq %rax, %rsi
+; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: mulq %rcx
+; X64-NEXT: movq %rcx, %r11
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %r9, %rbx
+; X64-NEXT: addq %r8, %rbx
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %rcx, %r9
-; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload
; X64-NEXT: mulq %r15
-; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: addq %rbx, %rax
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %rbp, %rsi
-; X64-NEXT: sbbq %rdi, %rdi
-; X64-NEXT: andl $1, %edi
-; X64-NEXT: movq %r10, %rax
+; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: movq %rax, %r14
+; X64-NEXT: addq %rbx, %r14
+; X64-NEXT: adcq %rbp, %rcx
+; X64-NEXT: setb %r8b
+; X64-NEXT: movq %rsi, %rax
; X64-NEXT: mulq %r15
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %rsi, %rbx
-; X64-NEXT: adcq %rdi, %rbp
-; X64-NEXT: movq %r9, %rax
+; X64-NEXT: addq %rcx, %rbx
+; X64-NEXT: movzbl %r8b, %eax
+; X64-NEXT: adcq %rax, %rbp
+; X64-NEXT: movq %rdi, %rax
; X64-NEXT: xorl %ecx, %ecx
; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rdx, %r11
-; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rax, %r10
-; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r9 # 8-byte Reload
-; X64-NEXT: addq %r9, %r10
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
-; X64-NEXT: adcq %r13, %r11
-; X64-NEXT: addq %rbx, %r10
-; X64-NEXT: adcq %rbp, %r11
-; X64-NEXT: movq %r12, %rcx
-; X64-NEXT: movq 64(%rcx), %rdi
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: mulq %r8
-; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq 72(%rcx), %r14
-; X64-NEXT: movq %r14, %rax
-; X64-NEXT: mulq %r8
+; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, %rsi
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload
+; X64-NEXT: addq %r12, %rsi
+; X64-NEXT: movq %rdx, %r10
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %r8 # 8-byte Reload
+; X64-NEXT: adcq %r8, %r10
+; X64-NEXT: addq %rbx, %rsi
+; X64-NEXT: adcq %rbp, %r10
+; X64-NEXT: movq %r9, %rdi
+; X64-NEXT: movq 64(%rdi), %r13
+; X64-NEXT: movq %r13, %rax
+; X64-NEXT: mulq %r11
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: movq 72(%rdi), %r9
+; X64-NEXT: movq %r9, %rax
+; X64-NEXT: mulq %r11
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %rsi, %rbx
+; X64-NEXT: addq %rcx, %rbx
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: movq %r13, %rax
; X64-NEXT: mulq %r15
-; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: addq %rbx, %rax
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %rbp, %rsi
-; X64-NEXT: sbbq %rcx, %rcx
-; X64-NEXT: andl $1, %ecx
-; X64-NEXT: movq %r14, %rax
+; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq %rbp, %rcx
+; X64-NEXT: setb %r11b
+; X64-NEXT: movq %r9, %rax
+; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %r15
; X64-NEXT: movq %rdx, %rbx
; X64-NEXT: movq %rax, %rbp
-; X64-NEXT: addq %rsi, %rbp
-; X64-NEXT: adcq %rcx, %rbx
-; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: addq %rcx, %rbp
+; X64-NEXT: movzbl %r11b, %eax
+; X64-NEXT: adcq %rax, %rbx
+; X64-NEXT: movq %r13, %rax
; X64-NEXT: xorl %ecx, %ecx
; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rdx, %r8
+; X64-NEXT: movq %rdx, %r11
; X64-NEXT: movq %rax, %r15
-; X64-NEXT: addq %r15, %r9
-; X64-NEXT: movq %r13, %rax
-; X64-NEXT: adcq %r8, %rax
-; X64-NEXT: addq %rbp, %r9
-; X64-NEXT: adcq %rbx, %rax
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r9 # 8-byte Folded Reload
-; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload
-; X64-NEXT: movq %rax, %r13
+; X64-NEXT: movq %r12, %rcx
+; X64-NEXT: addq %r15, %rcx
+; X64-NEXT: adcq %r11, %r8
+; X64-NEXT: addq %rbp, %rcx
+; X64-NEXT: adcq %rbx, %r8
+; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
+; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq %r14, %r8
+; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq $0, %rsi
; X64-NEXT: adcq $0, %r10
-; X64-NEXT: adcq $0, %r11
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: mulq %rsi
+; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r13, %rax
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: movq %r14, %rax
-; X64-NEXT: movq %r14, %r12
-; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: mulq %rsi
-; X64-NEXT: movq %rsi, %r14
-; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: movq %rax, %r12
+; X64-NEXT: movq %r9, %rax
+; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %rdi, %r8
+; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, %rbp
; X64-NEXT: addq %rcx, %rbp
-; X64-NEXT: adcq $0, %rsi
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rdx, %r9
-; X64-NEXT: movq %rax, %rdi
-; X64-NEXT: addq %rbp, %rdi
-; X64-NEXT: adcq %rsi, %r9
-; X64-NEXT: sbbq %rsi, %rsi
-; X64-NEXT: andl $1, %esi
-; X64-NEXT: movq %r12, %rax
-; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rcx, %rbp
-; X64-NEXT: addq %r9, %rax
-; X64-NEXT: adcq %rsi, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload
-; X64-NEXT: addq %r12, %r15
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
+; X64-NEXT: adcq $0, %rdi
+; X64-NEXT: movq %r13, %rax
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload
+; X64-NEXT: mulq %rbx
+; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: addq %rbp, %rax
+; X64-NEXT: movq %rax, %rbp
+; X64-NEXT: adcq %rdi, %rcx
+; X64-NEXT: setb %dil
+; X64-NEXT: movq %r9, %rax
+; X64-NEXT: mulq %rbx
+; X64-NEXT: addq %rcx, %rax
+; X64-NEXT: movzbl %dil, %ecx
+; X64-NEXT: adcq %rcx, %rdx
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload
+; X64-NEXT: addq %r14, %r15
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
+; X64-NEXT: adcq %r13, %r11
; X64-NEXT: addq %rax, %r15
-; X64-NEXT: adcq %rdx, %r8
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
-; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r13, %rdi
-; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq %rdx, %r11
+; X64-NEXT: addq {{[0-9]+}}(%rsp), %r12 # 8-byte Folded Reload
+; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload
+; X64-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq $0, %r15
-; X64-NEXT: adcq $0, %r8
-; X64-NEXT: addq %r10, %r15
-; X64-NEXT: adcq %r11, %r8
-; X64-NEXT: movl $0, %r9d
-; X64-NEXT: adcq $0, %r9
-; X64-NEXT: sbbq %r13, %r13
-; X64-NEXT: andl $1, %r13d
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %r14, %rsi
-; X64-NEXT: mulq %rsi
-; X64-NEXT: movq %rdx, %r14
-; X64-NEXT: movq %rax, %r10
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: mulq %rsi
-; X64-NEXT: movq %rsi, %r11
-; X64-NEXT: movq %rdx, %rsi
+; X64-NEXT: adcq $0, %r11
+; X64-NEXT: addq %rsi, %r15
+; X64-NEXT: adcq %r10, %r11
+; X64-NEXT: setb %r10b
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: movq %r8, %rdi
+; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: movq %rax, %r9
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %rdi, %r12
+; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %r14, %rbx
-; X64-NEXT: adcq $0, %rsi
-; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: mulq %rbp
+; X64-NEXT: addq %rcx, %rbx
+; X64-NEXT: adcq $0, %rdi
+; X64-NEXT: movq %rsi, %rax
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
+; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: addq %rbx, %rax
; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: adcq %rsi, %rcx
-; X64-NEXT: sbbq %rsi, %rsi
-; X64-NEXT: andl $1, %esi
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq %rdi, %r14
-; X64-NEXT: mulq %rbp
+; X64-NEXT: adcq %rdi, %rcx
+; X64-NEXT: setb %r8b
+; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: mulq %rsi
+; X64-NEXT: movq %rsi, %rdi
; X64-NEXT: addq %rcx, %rax
-; X64-NEXT: adcq %rsi, %rdx
+; X64-NEXT: movzbl %r8b, %ecx
+; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: addq %r12, %rsi
+; X64-NEXT: addq %r14, %rsi
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
+; X64-NEXT: adcq %r13, %rcx
; X64-NEXT: addq %rax, %rsi
; X64-NEXT: adcq %rdx, %rcx
-; X64-NEXT: addq %r15, %r10
-; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r8, %rbx
+; X64-NEXT: addq %r15, %r9
+; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: adcq %r11, %rbx
; X64-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r9, %rsi
+; X64-NEXT: movzbl %r10b, %eax
+; X64-NEXT: adcq %rax, %rsi
; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r13, %rcx
+; X64-NEXT: adcq $0, %rcx
; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq 96(%rsi), %rcx
-; X64-NEXT: imulq %rcx, %rbp
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: movq 96(%rbp), %rcx
+; X64-NEXT: imulq %rcx, %rdi
; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %r11, %rdi
-; X64-NEXT: mulq %rdi
+; X64-NEXT: movq %r12, %rsi
+; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rax, %r9
-; X64-NEXT: addq %rbp, %rdx
-; X64-NEXT: movq 104(%rsi), %r8
-; X64-NEXT: imulq %r8, %rdi
-; X64-NEXT: addq %rdx, %rdi
-; X64-NEXT: movq %rdi, %r10
-; X64-NEXT: movq 112(%rsi), %rax
-; X64-NEXT: movq %rsi, %rbp
-; X64-NEXT: movq %rax, %rdi
+; X64-NEXT: addq %rdi, %rdx
+; X64-NEXT: movq 104(%rbp), %r8
+; X64-NEXT: imulq %r8, %rsi
+; X64-NEXT: addq %rdx, %rsi
+; X64-NEXT: movq %rsi, %r11
+; X64-NEXT: movq 112(%rbp), %rax
+; X64-NEXT: movq %rbp, %rdi
+; X64-NEXT: movq %rax, %rsi
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: imulq %rbp, %rsi
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
+; X64-NEXT: mulq %rbx
+; X64-NEXT: movq %rax, %r10
+; X64-NEXT: addq %rsi, %rdx
+; X64-NEXT: movq 120(%rdi), %rdi
; X64-NEXT: imulq %rbx, %rdi
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: mulq %rsi
-; X64-NEXT: movq %rax, %r11
-; X64-NEXT: addq %rdi, %rdx
-; X64-NEXT: movq 120(%rbp), %rdi
-; X64-NEXT: imulq %rsi, %rdi
; X64-NEXT: addq %rdx, %rdi
-; X64-NEXT: addq %r9, %r11
-; X64-NEXT: adcq %r10, %rdi
-; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: movq %rsi, %r10
-; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: addq %r9, %r10
+; X64-NEXT: adcq %r11, %rdi
; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: movq %rbx, %r9
+; X64-NEXT: movq %rbx, %rsi
+; X64-NEXT: mulq %rcx
+; X64-NEXT: movq %rdx, %rbx
+; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: movq %rbp, %r9
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: movq %rax, %rbx
-; X64-NEXT: addq %rsi, %rbx
+; X64-NEXT: movq %rax, %rbp
+; X64-NEXT: addq %rbx, %rbp
; X64-NEXT: adcq $0, %rcx
-; X64-NEXT: movq %r10, %rax
+; X64-NEXT: movq %rsi, %rax
; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq %rax, %r15
-; X64-NEXT: addq %rbx, %r15
+; X64-NEXT: movq %rax, %r12
+; X64-NEXT: addq %rbp, %r12
; X64-NEXT: adcq %rcx, %rsi
-; X64-NEXT: sbbq %rcx, %rcx
-; X64-NEXT: andl $1, %ecx
+; X64-NEXT: setb %cl
; X64-NEXT: movq %r9, %rax
; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %rbx
-; X64-NEXT: movq %rax, %r8
-; X64-NEXT: addq %rsi, %r8
-; X64-NEXT: adcq %rcx, %rbx
-; X64-NEXT: addq %r11, %r8
+; X64-NEXT: movq %rax, %rbp
+; X64-NEXT: addq %rsi, %rbp
+; X64-NEXT: movzbl %cl, %eax
+; X64-NEXT: adcq %rax, %rbx
+; X64-NEXT: addq %r10, %rbp
; X64-NEXT: adcq %rdi, %rbx
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: imulq %rax, %rsi
-; X64-NEXT: movq %rax, %r9
+; X64-NEXT: movq %rax, %r13
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rax, %r11
+; X64-NEXT: movq %rax, %r8
; X64-NEXT: addq %rsi, %rdx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %r12 # 8-byte Reload
-; X64-NEXT: imulq %r12, %rcx
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
+; X64-NEXT: imulq %r11, %rcx
; X64-NEXT: addq %rdx, %rcx
-; X64-NEXT: movq %rcx, %rbp
+; X64-NEXT: movq %rcx, %r9
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: movq %rax, %rcx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: imulq %rsi, %rcx
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
-; X64-NEXT: mulq %rdi
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %r15 # 8-byte Reload
+; X64-NEXT: imulq %r15, %rcx
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %r14 # 8-byte Reload
+; X64-NEXT: mulq %r14
; X64-NEXT: movq %rax, %r10
; X64-NEXT: addq %rcx, %rdx
-; X64-NEXT: movq %r14, %r13
-; X64-NEXT: imulq %rdi, %r13
-; X64-NEXT: addq %rdx, %r13
-; X64-NEXT: addq %r11, %r10
-; X64-NEXT: adcq %rbp, %r13
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: movq %rdi, %r11
-; X64-NEXT: mulq %r9
-; X64-NEXT: movq %rdx, %rbp
-; X64-NEXT: movq %rax, %rdi
-; X64-NEXT: movq %rsi, %rax
-; X64-NEXT: movq %rsi, %r14
-; X64-NEXT: mulq %r9
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: imulq %r14, %rax
+; X64-NEXT: addq %rdx, %rax
+; X64-NEXT: addq %r8, %r10
+; X64-NEXT: adcq %r9, %rax
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r14, %rax
+; X64-NEXT: mulq %r13
+; X64-NEXT: movq %rdx, %rdi
+; X64-NEXT: movq %rax, %r8
+; X64-NEXT: movq %r15, %rax
+; X64-NEXT: mulq %r13
; X64-NEXT: movq %rdx, %r9
; X64-NEXT: movq %rax, %rcx
-; X64-NEXT: addq %rbp, %rcx
+; X64-NEXT: addq %rdi, %rcx
; X64-NEXT: adcq $0, %r9
-; X64-NEXT: movq %r11, %rax
-; X64-NEXT: movq %r12, %rbp
-; X64-NEXT: mulq %rbp
-; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq %rax, %r11
-; X64-NEXT: addq %rcx, %r11
-; X64-NEXT: adcq %r9, %rsi
-; X64-NEXT: sbbq %rcx, %rcx
-; X64-NEXT: andl $1, %ecx
; X64-NEXT: movq %r14, %rax
-; X64-NEXT: mulq %rbp
-; X64-NEXT: addq %rsi, %rax
+; X64-NEXT: mulq %r11
+; X64-NEXT: movq %rdx, %rdi
+; X64-NEXT: movq %rax, %rsi
+; X64-NEXT: addq %rcx, %rsi
+; X64-NEXT: adcq %r9, %rdi
+; X64-NEXT: setb %cl
+; X64-NEXT: movq %r15, %rax
+; X64-NEXT: mulq %r11
+; X64-NEXT: addq %rdi, %rax
+; X64-NEXT: movzbl %cl, %ecx
; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: addq %r10, %rax
-; X64-NEXT: adcq %r13, %rdx
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
-; X64-NEXT: adcq %r15, %r11
-; X64-NEXT: adcq %r8, %rax
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
+; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
+; X64-NEXT: adcq %r12, %rsi
+; X64-NEXT: adcq %rbp, %rax
; X64-NEXT: adcq %rbx, %rdx
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload
+; X64-NEXT: addq {{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload
; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: addq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
-; X64-NEXT: addq (%rsp), %rcx # 8-byte Folded Reload
-; X64-NEXT: movq %rcx, %r8
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: movq %rsi, %r9
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rbx # 8-byte Folded Reload
-; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload
+; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
+; X64-NEXT: addq {{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
+; X64-NEXT: movq %rcx, %r9
; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
-; X64-NEXT: adcq {{[0-9]+}}(%rsp), %r11 # 8-byte Folded Reload
+; X64-NEXT: movq %rdi, %r10
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload
+; X64-NEXT: adcq (%rsp), %rbx # 8-byte Folded Reload
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r8 # 8-byte Folded Reload
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload
; X64-NEXT: adcq {{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq %rsi, (%rcx)
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq %rsi, 8(%rcx)
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq %rsi, 16(%rcx)
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq %rsi, 24(%rcx)
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq %rsi, 32(%rcx)
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq %rsi, 40(%rcx)
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq %rsi, 48(%rcx)
-; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
-; X64-NEXT: movq %rsi, 56(%rcx)
-; X64-NEXT: movq %r8, 64(%rcx)
-; X64-NEXT: movq %r9, 72(%rcx)
-; X64-NEXT: movq %rbx, 80(%rcx)
-; X64-NEXT: movq %rbp, 88(%rcx)
-; X64-NEXT: movq %rdi, 96(%rcx)
-; X64-NEXT: movq %r11, 104(%rcx)
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq %rdi, (%rcx)
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq %rdi, 8(%rcx)
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq %rdi, 16(%rcx)
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq %rdi, 24(%rcx)
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq %rdi, 32(%rcx)
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq %rdi, 40(%rcx)
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq %rdi, 48(%rcx)
+; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
+; X64-NEXT: movq %rdi, 56(%rcx)
+; X64-NEXT: movq %r9, 64(%rcx)
+; X64-NEXT: movq %r10, 72(%rcx)
+; X64-NEXT: movq %rbp, 80(%rcx)
+; X64-NEXT: movq %rbx, 88(%rcx)
+; X64-NEXT: movq %r8, 96(%rcx)
+; X64-NEXT: movq %rsi, 104(%rcx)
; X64-NEXT: movq %rax, 112(%rcx)
; X64-NEXT: movq %rdx, 120(%rcx)
; X64-NEXT: addq $352, %rsp # imm = 0x160
diff --git a/test/CodeGen/X86/mul-i256.ll b/test/CodeGen/X86/mul-i256.ll
index 3414847186528..acd86e949894e 100644
--- a/test/CodeGen/X86/mul-i256.ll
+++ b/test/CodeGen/X86/mul-i256.ll
@@ -3,7 +3,6 @@
; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
define void @test(i256* %a, i256* %b, i256* %out) #0 {
; X32-LABEL: test:
@@ -138,18 +137,17 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 {
; X32-NEXT: adcl $0, %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: xorl %edx, %edx
; X32-NEXT: addl %ecx, %edi
; X32-NEXT: adcl %eax, %ebx
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
@@ -205,76 +203,70 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 {
; X64-NEXT: pushq %r14
; X64-NEXT: .Lcfi1:
; X64-NEXT: .cfi_def_cfa_offset 24
-; X64-NEXT: pushq %r12
+; X64-NEXT: pushq %rbx
; X64-NEXT: .Lcfi2:
; X64-NEXT: .cfi_def_cfa_offset 32
-; X64-NEXT: pushq %rbx
; X64-NEXT: .Lcfi3:
-; X64-NEXT: .cfi_def_cfa_offset 40
+; X64-NEXT: .cfi_offset %rbx, -32
; X64-NEXT: .Lcfi4:
-; X64-NEXT: .cfi_offset %rbx, -40
-; X64-NEXT: .Lcfi5:
-; X64-NEXT: .cfi_offset %r12, -32
-; X64-NEXT: .Lcfi6:
; X64-NEXT: .cfi_offset %r14, -24
-; X64-NEXT: .Lcfi7:
+; X64-NEXT: .Lcfi5:
; X64-NEXT: .cfi_offset %r15, -16
; X64-NEXT: movq %rdx, %r9
-; X64-NEXT: movq (%rdi), %r14
+; X64-NEXT: movq (%rdi), %r11
; X64-NEXT: movq 8(%rdi), %r8
-; X64-NEXT: movq 16(%rdi), %rcx
-; X64-NEXT: movq 16(%rsi), %rbx
-; X64-NEXT: movq (%rsi), %r12
+; X64-NEXT: movq 16(%rdi), %rbx
+; X64-NEXT: movq 16(%rsi), %r10
+; X64-NEXT: movq (%rsi), %rcx
; X64-NEXT: movq 8(%rsi), %r15
; X64-NEXT: movq 24(%rdi), %rdi
-; X64-NEXT: imulq %r12, %rdi
-; X64-NEXT: movq %r12, %rax
-; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rax, %r10
+; X64-NEXT: imulq %rcx, %rdi
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: mulq %rbx
+; X64-NEXT: movq %rax, %r14
; X64-NEXT: addq %rdi, %rdx
-; X64-NEXT: imulq %r15, %rcx
-; X64-NEXT: addq %rdx, %rcx
-; X64-NEXT: movq %rbx, %rdi
+; X64-NEXT: imulq %r15, %rbx
+; X64-NEXT: addq %rdx, %rbx
+; X64-NEXT: movq %r10, %rdi
; X64-NEXT: imulq %r8, %rdi
-; X64-NEXT: movq %rbx, %rax
-; X64-NEXT: mulq %r14
-; X64-NEXT: movq %rax, %r11
+; X64-NEXT: movq %r10, %rax
+; X64-NEXT: mulq %r11
+; X64-NEXT: movq %rax, %r10
; X64-NEXT: addq %rdi, %rdx
-; X64-NEXT: movq 24(%rsi), %rbx
-; X64-NEXT: imulq %r14, %rbx
-; X64-NEXT: addq %rdx, %rbx
-; X64-NEXT: addq %r10, %r11
-; X64-NEXT: adcq %rcx, %rbx
-; X64-NEXT: movq %r14, %rax
-; X64-NEXT: mulq %r12
+; X64-NEXT: movq 24(%rsi), %rdi
+; X64-NEXT: imulq %r11, %rdi
+; X64-NEXT: addq %rdx, %rdi
+; X64-NEXT: addq %r14, %r10
+; X64-NEXT: adcq %rbx, %rdi
+; X64-NEXT: movq %r11, %rax
+; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq %rax, %r10
+; X64-NEXT: movq %rax, %r14
; X64-NEXT: movq %r8, %rax
-; X64-NEXT: mulq %r12
+; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %rcx
-; X64-NEXT: movq %rax, %rdi
-; X64-NEXT: addq %rsi, %rdi
+; X64-NEXT: movq %rax, %rbx
+; X64-NEXT: addq %rsi, %rbx
; X64-NEXT: adcq $0, %rcx
-; X64-NEXT: movq %r14, %rax
+; X64-NEXT: movq %r11, %rax
; X64-NEXT: mulq %r15
; X64-NEXT: movq %rdx, %rsi
-; X64-NEXT: movq %rax, %r14
-; X64-NEXT: addq %rdi, %r14
+; X64-NEXT: movq %rax, %r11
+; X64-NEXT: addq %rbx, %r11
; X64-NEXT: adcq %rcx, %rsi
-; X64-NEXT: sbbq %rcx, %rcx
-; X64-NEXT: andl $1, %ecx
+; X64-NEXT: setb %al
+; X64-NEXT: movzbl %al, %ecx
; X64-NEXT: movq %r8, %rax
; X64-NEXT: mulq %r15
; X64-NEXT: addq %rsi, %rax
; X64-NEXT: adcq %rcx, %rdx
-; X64-NEXT: addq %r11, %rax
-; X64-NEXT: adcq %rbx, %rdx
-; X64-NEXT: movq %r10, (%r9)
-; X64-NEXT: movq %r14, 8(%r9)
+; X64-NEXT: addq %r10, %rax
+; X64-NEXT: adcq %rdi, %rdx
+; X64-NEXT: movq %r14, (%r9)
+; X64-NEXT: movq %r11, 8(%r9)
; X64-NEXT: movq %rax, 16(%r9)
; X64-NEXT: movq %rdx, 24(%r9)
; X64-NEXT: popq %rbx
-; X64-NEXT: popq %r12
; X64-NEXT: popq %r14
; X64-NEXT: popq %r15
; X64-NEXT: retq
@@ -286,4 +278,4 @@ entry:
ret void
}
-attributes #0 = { norecurse nounwind uwtable "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" }
+attributes #0 = { norecurse nounwind uwtable }
diff --git a/test/CodeGen/X86/mul-i512.ll b/test/CodeGen/X86/mul-i512.ll
index 14fbeae527960..3da17b69ffb52 100644
--- a/test/CodeGen/X86/mul-i512.ll
+++ b/test/CodeGen/X86/mul-i512.ll
@@ -74,14 +74,13 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: movl 20(%eax), %edi
; X32-NEXT: movl 24(%eax), %ebx
; X32-NEXT: movl 28(%eax), %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl %eax
; X32-NEXT: pushl %ebx
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
@@ -107,6 +106,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: pushl $0
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl %ebx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
@@ -123,8 +123,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: pushl $0
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: pushl %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl %eax
; X32-NEXT: calll __multi3
; X32-NEXT: addl $32, %esp
@@ -133,10 +132,11 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: pushl $0
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
; X32-NEXT: pushl %esi
-; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl %eax
; X32-NEXT: calll __multi3
@@ -145,25 +145,24 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: pushl %esi
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl %edi
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl %eax
; X32-NEXT: calll __multi3
; X32-NEXT: addl $32, %esp
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: pushl %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: pushl %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl %eax
; X32-NEXT: calll __multi3
@@ -172,7 +171,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
-; X32-NEXT: pushl %esi
+; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
@@ -183,14 +182,14 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %edi
+; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: pushl %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: pushl %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %eax
; X32-NEXT: calll __multi3
; X32-NEXT: addl $32, %esp
@@ -198,8 +197,8 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: pushl %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
@@ -213,7 +212,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl %edi
-; X32-NEXT: pushl %esi
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl %eax
@@ -223,11 +222,11 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: pushl %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: pushl %esi
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl %eax
; X32-NEXT: calll __multi3
@@ -240,20 +239,20 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl %edi
-; X32-NEXT: pushl %esi
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %eax
; X32-NEXT: calll __multi3
; X32-NEXT: addl $32, %esp
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: pushl %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: pushl %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %esi
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl %eax
; X32-NEXT: calll __multi3
@@ -262,8 +261,8 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: pushl %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
@@ -274,21 +273,21 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: pushl %esi
; X32-NEXT: pushl %edi
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: pushl %edi
; X32-NEXT: pushl %esi
-; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %eax
; X32-NEXT: calll __multi3
; X32-NEXT: addl $32, %esp
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: pushl %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: pushl %ebx
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
@@ -298,11 +297,11 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: calll __multi3
; X32-NEXT: addl $32, %esp
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
-; X32-NEXT: pushl %esi
-; X32-NEXT: pushl %ebx
; X32-NEXT: pushl %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: pushl %esi
; X32-NEXT: pushl %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: pushl %esi
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
@@ -313,8 +312,8 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: pushl %edi
; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %esi
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
@@ -323,7 +322,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: calll __multi3
; X32-NEXT: addl $32, %esp
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
-; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl (%esp) # 4-byte Folded Reload
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
@@ -349,10 +348,10 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: pushl %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
; X32-NEXT: pushl %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: pushl %edi
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
@@ -365,18 +364,18 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: pushl %esi
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
+; X32-NEXT: pushl %esi
; X32-NEXT: pushl %eax
; X32-NEXT: calll __multi3
; X32-NEXT: addl $32, %esp
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
-; X32-NEXT: pushl %edi
; X32-NEXT: pushl %ebx
+; X32-NEXT: pushl %edi
; X32-NEXT: pushl $0
; X32-NEXT: pushl $0
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
@@ -494,134 +493,142 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: addl %esi, %eax
; X32-NEXT: adcl %edx, %ecx
-; X32-NEXT: movl $0, %edi
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: sbbl %edx, %edx
-; X32-NEXT: andl $1, %edx
+; X32-NEXT: setb %dl
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movzbl %dl, %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl $0, %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: addl %edx, %ebx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl %edx, %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl %edx, %ebx
+; X32-NEXT: movl %ebx, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %esi, %edx
-; X32-NEXT: addl %eax, %ebx
+; X32-NEXT: movl %esi, (%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
+; X32-NEXT: adcl %esi, %ebx
+; X32-NEXT: movl %ebx, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: addl %eax, %edi
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl %ecx, %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl $0, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl %eax, %edx
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: setb %al
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl %edx, %eax
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movl $0, %edx
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: sbbl %esi, %esi
-; X32-NEXT: andl $1, %esi
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: addl %edi, %ebx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: addl %eax, %ebx
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl %ecx, %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: adcl %edx, %eax
+; X32-NEXT: addl %eax, %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: adcl %eax, %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl %eax, (%esp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl %ebx, %eax
+; X32-NEXT: addl %edx, %edi
+; X32-NEXT: movl %ecx, %edx
+; X32-NEXT: adcl %esi, %edx
+; X32-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, %ecx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl $0, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl %edi, %edx
-; X32-NEXT: adcl %ebx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ebx, %ebx
-; X32-NEXT: andl $1, %ebx
+; X32-NEXT: addl %eax, %edx
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
+; X32-NEXT: addl %eax, %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
-; X32-NEXT: addl %ecx, %eax
+; X32-NEXT: adcl %ecx, %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
@@ -629,140 +636,125 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl %edi, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: addl %edx, %eax
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: adcl %ebx, %edi
+; X32-NEXT: addl %edx, %ebx
+; X32-NEXT: adcl %esi, %eax
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: movl (%esp), %eax # 4-byte Reload
; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl %edx, %eax
+; X32-NEXT: adcl $0, %ebx
+; X32-NEXT: movl %esi, %eax
; X32-NEXT: adcl $0, %eax
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: movl %esi, %edx
; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl %edi, %esi
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
+; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
-; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: andl $1, %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: setb (%esp) # 1-byte Folded Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl $0, %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: addl %edx, %eax
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movl $0, %edx
-; X32-NEXT: adcl $0, %edx
-; X32-NEXT: sbbl %esi, %esi
-; X32-NEXT: andl $1, %esi
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: adcl $0, %edx
+; X32-NEXT: addl %eax, %esi
+; X32-NEXT: adcl %ecx, %edx
+; X32-NEXT: setb %al
+; X32-NEXT: addl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movzbl %al, %ebx
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
-; X32-NEXT: addl %eax, %edi
-; X32-NEXT: adcl %ecx, %ebx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
-; X32-NEXT: adcl %esi, %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
+; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: addl %esi, %edi
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: adcl %edx, %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
-; X32-NEXT: adcl %edx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl %ebx, %edx
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
-; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
+; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: movzbl (%esp), %eax # 1-byte Folded Reload
+; X32-NEXT: adcl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, %edi
+; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: movl %eax, %esi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: adcl $0, %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT: adcl $0, %ebx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl $0, %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: adcl $0, %esi
-; X32-NEXT: addl %edi, %edx
-; X32-NEXT: adcl %ebx, %esi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
+; X32-NEXT: addl %eax, %edx
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: setb %al
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
@@ -777,7 +769,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
@@ -789,25 +781,24 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: adcl $0, %ecx
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: adcl $0, %esi
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: adcl $0, %edi
-; X32-NEXT: addl %eax, %esi
-; X32-NEXT: adcl %ecx, %edi
-; X32-NEXT: movl $0, %eax
-; X32-NEXT: adcl $0, %eax
-; X32-NEXT: sbbl %ecx, %ecx
-; X32-NEXT: andl $1, %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: adcl $0, %esi
+; X32-NEXT: addl %eax, %edi
+; X32-NEXT: adcl %ecx, %esi
+; X32-NEXT: setb %al
+; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
+; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
+; X32-NEXT: movzbl %al, %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %eax
+; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
@@ -828,8 +819,8 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl %esi, %ebx
-; X32-NEXT: adcl %edi, %ecx
+; X32-NEXT: addl %edi, %ebx
+; X32-NEXT: adcl %esi, %ecx
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
@@ -838,7 +829,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
+; X32-NEXT: movl (%esp), %edi # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
@@ -853,7 +844,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
-; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
+; X32-NEXT: movl %edi, (%esp) # 4-byte Spill
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
@@ -864,36 +855,36 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl 16(%ebp), %edi
-; X32-NEXT: movl %esi, 4(%edi)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
; X32-NEXT: movl 16(%ebp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, (%esi)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, 8(%esi)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, 12(%esi)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, 16(%esi)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, 20(%esi)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, 24(%esi)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, 28(%esi)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, 32(%esi)
-; X32-NEXT: movl %ebx, 36(%esi)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, 40(%esi)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, 44(%esi)
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
-; X32-NEXT: movl %edi, 48(%esi)
-; X32-NEXT: movl %ecx, 52(%esi)
-; X32-NEXT: movl %edx, 56(%esi)
-; X32-NEXT: movl %eax, 60(%esi)
+; X32-NEXT: movl %edi, 4(%esi)
+; X32-NEXT: movl 16(%ebp), %edi
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, (%edi)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 8(%edi)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 12(%edi)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 16(%edi)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 20(%edi)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 24(%edi)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 28(%edi)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 32(%edi)
+; X32-NEXT: movl %ebx, 36(%edi)
+; X32-NEXT: movl (%esp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 40(%edi)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 44(%edi)
+; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; X32-NEXT: movl %esi, 48(%edi)
+; X32-NEXT: movl %ecx, 52(%edi)
+; X32-NEXT: movl %edx, 56(%edi)
+; X32-NEXT: movl %eax, 60(%edi)
; X32-NEXT: leal -12(%ebp), %esp
; X32-NEXT: popl %esi
; X32-NEXT: popl %edi
@@ -912,35 +903,36 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: pushq %rax
; X64-NEXT: movq %rdx, (%rsp) # 8-byte Spill
; X64-NEXT: movq 24(%rdi), %r11
-; X64-NEXT: movq 16(%rdi), %r14
+; X64-NEXT: movq 16(%rdi), %r15
; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq (%rsi), %rdx
; X64-NEXT: movq 8(%rsi), %rbp
-; X64-NEXT: movq %r14, %rax
+; X64-NEXT: movq %r15, %rax
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: mulq %rsi
-; X64-NEXT: movq %rdx, %r8
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdx, %r9
+; X64-NEXT: movq %rax, %r8
; X64-NEXT: movq %r11, %rax
; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rsi, %r10
; X64-NEXT: movq %rdx, %rbx
; X64-NEXT: movq %rax, %rsi
-; X64-NEXT: addq %r8, %rsi
+; X64-NEXT: addq %r9, %rsi
; X64-NEXT: adcq $0, %rbx
-; X64-NEXT: movq %r14, %rax
-; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %r15, %rax
+; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %rbp
; X64-NEXT: movq %rdx, %rcx
; X64-NEXT: movq %rax, %r9
; X64-NEXT: addq %rsi, %r9
; X64-NEXT: adcq %rbx, %rcx
-; X64-NEXT: sbbq %rbx, %rbx
-; X64-NEXT: andl $1, %ebx
+; X64-NEXT: setb %al
+; X64-NEXT: movzbl %al, %ebx
; X64-NEXT: movq %r11, %rax
; X64-NEXT: mulq %rbp
-; X64-NEXT: movq %rbp, %r8
+; X64-NEXT: movq %rbp, %r14
+; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rbp
; X64-NEXT: addq %rcx, %rbp
@@ -952,46 +944,44 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: mulq %rcx
; X64-NEXT: movq %rdx, %r13
; X64-NEXT: movq %rax, %r10
-; X64-NEXT: movq %r14, %rax
+; X64-NEXT: movq %r15, %rax
; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rdx, %r12
-; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rax, %r15
; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: addq %r10, %r15
-; X64-NEXT: adcq %r13, %r12
+; X64-NEXT: adcq %r13, %rdx
; X64-NEXT: addq %rbp, %r15
-; X64-NEXT: adcq %rsi, %r12
+; X64-NEXT: adcq %rsi, %rdx
+; X64-NEXT: movq %rdx, %r12
; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq (%rdi), %r14
-; X64-NEXT: movq %r14, %rax
+; X64-NEXT: movq (%rdi), %rcx
+; X64-NEXT: movq %rcx, %rax
; X64-NEXT: mulq %rbx
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdx, %r11
-; X64-NEXT: movq 8(%rdi), %rcx
-; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq 8(%rdi), %rdi
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %rbx
; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rsi
; X64-NEXT: addq %r11, %rsi
; X64-NEXT: adcq $0, %rbp
-; X64-NEXT: movq %r14, %rax
-; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: mulq %r8
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: mulq %r14
; X64-NEXT: movq %rdx, %rbx
; X64-NEXT: addq %rsi, %rax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %rbp, %rbx
-; X64-NEXT: sbbq %rdi, %rdi
-; X64-NEXT: andl $1, %edi
-; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: mulq %r8
+; X64-NEXT: setb %r11b
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: mulq %r14
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rbp
; X64-NEXT: addq %rbx, %rbp
-; X64-NEXT: adcq %rdi, %rsi
-; X64-NEXT: movq %r14, %rcx
+; X64-NEXT: movzbl %r11b, %eax
+; X64-NEXT: adcq %rax, %rsi
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: mulq %rdx
@@ -1001,10 +991,11 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: adcq %r14, %r13
; X64-NEXT: addq %rbp, %r10
; X64-NEXT: adcq %rsi, %r13
-; X64-NEXT: addq -{{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload
+; X64-NEXT: addq %r8, %r10
; X64-NEXT: adcq %r9, %r13
; X64-NEXT: adcq $0, %r15
; X64-NEXT: adcq $0, %r12
+; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: movq 16(%rsi), %r8
; X64-NEXT: movq %rcx, %rax
@@ -1012,7 +1003,7 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %rdi
-; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
+; X64-NEXT: movq %rax, %r12
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: mulq %r8
@@ -1027,14 +1018,14 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: addq %rbx, %rax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %rbp, %rsi
-; X64-NEXT: sbbq %rbp, %rbp
-; X64-NEXT: andl $1, %ebp
+; X64-NEXT: setb %bpl
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: mulq %rdi
; X64-NEXT: movq %rdx, %rbx
; X64-NEXT: movq %rax, %r9
; X64-NEXT: addq %rsi, %r9
-; X64-NEXT: adcq %rbp, %rbx
+; X64-NEXT: movzbl %bpl, %eax
+; X64-NEXT: adcq %rax, %rbx
; X64-NEXT: movq %r8, %rax
; X64-NEXT: xorl %ecx, %ecx
; X64-NEXT: mulq %rcx
@@ -1044,16 +1035,14 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: adcq %rdx, %r14
; X64-NEXT: addq %r9, %r11
; X64-NEXT: adcq %rbx, %r14
-; X64-NEXT: addq %r10, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill
+; X64-NEXT: addq %r10, %r12
+; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %r13, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill
; X64-NEXT: adcq $0, %r11
; X64-NEXT: adcq $0, %r14
; X64-NEXT: addq %r15, %r11
-; X64-NEXT: adcq %r12, %r14
-; X64-NEXT: adcq $0, %rcx
-; X64-NEXT: movq %rcx, %r13
-; X64-NEXT: sbbq %r9, %r9
-; X64-NEXT: andl $1, %r9d
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload
+; X64-NEXT: setb %r9b
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq %rcx, %rax
; X64-NEXT: mulq %r8
@@ -1072,12 +1061,12 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: addq %rbx, %rax
; X64-NEXT: movq %rax, %rbx
; X64-NEXT: adcq %rsi, %rcx
-; X64-NEXT: sbbq %rsi, %rsi
-; X64-NEXT: andl $1, %esi
+; X64-NEXT: setb %sil
; X64-NEXT: movq %r10, %rax
; X64-NEXT: mulq %rdi
; X64-NEXT: addq %rcx, %rax
-; X64-NEXT: adcq %rsi, %rdx
+; X64-NEXT: movzbl %sil, %ecx
+; X64-NEXT: adcq %rcx, %rdx
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
; X64-NEXT: addq %rbp, %rsi
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
@@ -1088,9 +1077,10 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: movq %r12, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: adcq %r14, %rbx
; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r13, %rsi
+; X64-NEXT: movzbl %r9b, %eax
+; X64-NEXT: adcq %rax, %rsi
; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: adcq %r9, %rcx
+; X64-NEXT: adcq $0, %rcx
; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
; X64-NEXT: movq 32(%rcx), %rsi
@@ -1105,42 +1095,44 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: movq 48(%rcx), %rax
; X64-NEXT: movq %rcx, %rbx
; X64-NEXT: movq %rax, %rdi
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
-; X64-NEXT: imulq %r11, %rdi
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
-; X64-NEXT: mulq %rcx
-; X64-NEXT: movq %rax, %r12
+; X64-NEXT: imulq %rcx, %rdi
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
+; X64-NEXT: mulq %rbp
+; X64-NEXT: movq %rax, %r14
; X64-NEXT: addq %rdi, %rdx
; X64-NEXT: movq 56(%rbx), %rbx
-; X64-NEXT: imulq %rcx, %rbx
+; X64-NEXT: imulq %rbp, %rbx
; X64-NEXT: addq %rdx, %rbx
-; X64-NEXT: addq %r10, %r12
+; X64-NEXT: addq %r10, %r14
; X64-NEXT: adcq %r8, %rbx
-; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movq %rbp, %rax
+; X64-NEXT: movq %rbp, %r10
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rdi
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
-; X64-NEXT: movq %r11, %rax
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movq %rcx, %r8
; X64-NEXT: mulq %rsi
; X64-NEXT: movq %rdx, %rsi
; X64-NEXT: movq %rax, %rbp
; X64-NEXT: addq %rdi, %rbp
; X64-NEXT: adcq $0, %rsi
-; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movq %r10, %rax
; X64-NEXT: mulq %r9
; X64-NEXT: movq %rdx, %rdi
-; X64-NEXT: movq %rax, %r15
-; X64-NEXT: addq %rbp, %r15
+; X64-NEXT: movq %rax, %r13
+; X64-NEXT: addq %rbp, %r13
; X64-NEXT: adcq %rsi, %rdi
-; X64-NEXT: sbbq %rsi, %rsi
-; X64-NEXT: andl $1, %esi
-; X64-NEXT: movq %r11, %rax
+; X64-NEXT: setb %cl
+; X64-NEXT: movq %r8, %rax
; X64-NEXT: mulq %r9
; X64-NEXT: movq %rdx, %r11
-; X64-NEXT: movq %rax, %r14
-; X64-NEXT: addq %rdi, %r14
-; X64-NEXT: adcq %rsi, %r11
-; X64-NEXT: addq %r12, %r14
+; X64-NEXT: movq %rax, %r9
+; X64-NEXT: addq %rdi, %r9
+; X64-NEXT: movzbl %cl, %eax
+; X64-NEXT: adcq %rax, %r11
+; X64-NEXT: addq %r14, %r9
; X64-NEXT: adcq %rbx, %r11
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload
; X64-NEXT: movq 56(%rdx), %rcx
@@ -1152,49 +1144,50 @@ define void @test_512(i512* %a, i512* %b, i512* %out) nounwind {
; X64-NEXT: mulq %rbx
; X64-NEXT: movq %rax, %rsi
; X64-NEXT: addq %rcx, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
-; X64-NEXT: imulq %r8, %rbx
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload
+; X64-NEXT: imulq %r15, %rbx
; X64-NEXT: addq %rdx, %rbx
; X64-NEXT: movq 32(%rbp), %rdi
-; X64-NEXT: movq 40(%rbp), %r12
+; X64-NEXT: movq 40(%rbp), %r8
; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
; X64-NEXT: movq %rax, %rcx
-; X64-NEXT: imulq %r12, %rcx
+; X64-NEXT: imulq %r8, %rcx
; X64-NEXT: mulq %rdi
-; X64-NEXT: movq %rax, %r9
+; X64-NEXT: movq %rax, %r14
; X64-NEXT: addq %rcx, %rdx
-; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r13 # 8-byte Reload
-; X64-NEXT: imulq %rdi, %r13
-; X64-NEXT: addq %rdx, %r13
-; X64-NEXT: addq %rsi, %r9
-; X64-NEXT: adcq %rbx, %r13
+; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
+; X64-NEXT: imulq %rdi, %rax
+; X64-NEXT: addq %rdx, %rax
+; X64-NEXT: addq %rsi, %r14
+; X64-NEXT: adcq %rbx, %rax
+; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: mulq %r10
-; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: movq %rdx, %r12
; X64-NEXT: movq %rax, %rsi
-; X64-NEXT: movq %r12, %rax
+; X64-NEXT: movq %r8, %rax
; X64-NEXT: mulq %r10
-; X64-NEXT: movq %rdx, %rbx
-; X64-NEXT: movq %rax, %rbp
-; X64-NEXT: addq %rcx, %rbp
-; X64-NEXT: adcq $0, %rbx
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: mulq %r8
; X64-NEXT: movq %rdx, %rcx
+; X64-NEXT: movq %rax, %rbx
+; X64-NEXT: addq %r12, %rbx
+; X64-NEXT: adcq $0, %rcx
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: mulq %r15
+; X64-NEXT: movq %rdx, %rbp
; X64-NEXT: movq %rax, %rdi
-; X64-NEXT: addq %rbp, %rdi
-; X64-NEXT: adcq %rbx, %rcx
-; X64-NEXT: sbbq %rbx, %rbx
-; X64-NEXT: andl $1, %ebx
-; X64-NEXT: movq %r12, %rax
-; X64-NEXT: mulq %r8
-; X64-NEXT: addq %rcx, %rax
-; X64-NEXT: adcq %rbx, %rdx
-; X64-NEXT: addq %r9, %rax
-; X64-NEXT: adcq %r13, %rdx
+; X64-NEXT: addq %rbx, %rdi
+; X64-NEXT: adcq %rcx, %rbp
+; X64-NEXT: setb %cl
+; X64-NEXT: movq %r8, %rax
+; X64-NEXT: mulq %r15
+; X64-NEXT: addq %rbp, %rax
+; X64-NEXT: movzbl %cl, %ecx
+; X64-NEXT: adcq %rcx, %rdx
+; X64-NEXT: addq %r14, %rax
+; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload
; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
-; X64-NEXT: adcq %r15, %rdi
-; X64-NEXT: adcq %r14, %rax
+; X64-NEXT: adcq %r13, %rdi
+; X64-NEXT: adcq %r9, %rax
; X64-NEXT: adcq %r11, %rdx
; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rsi # 8-byte Folded Reload
; X64-NEXT: adcq -{{[0-9]+}}(%rsp), %rdi # 8-byte Folded Reload
diff --git a/test/CodeGen/X86/oddshuffles.ll b/test/CodeGen/X86/oddshuffles.ll
index 0bda41a30c697..d26cf02dd9424 100644
--- a/test/CodeGen/X86/oddshuffles.ll
+++ b/test/CodeGen/X86/oddshuffles.ll
@@ -746,9 +746,9 @@ define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
; SSE2-LABEL: interleave_24i8_in:
; SSE2: # BB#0:
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
-; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
@@ -791,17 +791,17 @@ define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
; SSE42: # BB#0:
; SSE42-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; SSE42-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
; SSE42-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; SSE42-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
-; SSE42-NEXT: movdqa %xmm0, %xmm2
-; SSE42-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0,8],zero,xmm2[1,9],zero,xmm2[2,10],zero,xmm2[3,11],zero,xmm2[4,12],zero,xmm2[5]
-; SSE42-NEXT: movdqa %xmm1, %xmm3
+; SSE42-NEXT: movdqa %xmm0, %xmm1
+; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,8],zero,xmm1[1,9],zero,xmm1[2,10],zero,xmm1[3,11],zero,xmm1[4,12],zero,xmm1[5]
+; SSE42-NEXT: movdqa %xmm2, %xmm3
; SSE42-NEXT: pshufb {{.*#+}} xmm3 = zero,zero,xmm3[0],zero,zero,xmm3[1],zero,zero,xmm3[2],zero,zero,xmm3[3],zero,zero,xmm3[4],zero
-; SSE42-NEXT: por %xmm2, %xmm3
+; SSE42-NEXT: por %xmm1, %xmm3
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[13],zero,xmm0[6,14],zero,xmm0[7,15],zero,xmm0[u,u,u,u,u,u,u,u]
-; SSE42-NEXT: pshufb {{.*#+}} xmm1 = zero,xmm1[5],zero,zero,xmm1[6],zero,zero,xmm1[7,u,u,u,u,u,u,u,u]
-; SSE42-NEXT: por %xmm0, %xmm1
-; SSE42-NEXT: movq %xmm1, 16(%rdi)
+; SSE42-NEXT: pshufb {{.*#+}} xmm2 = zero,xmm2[5],zero,zero,xmm2[6],zero,zero,xmm2[7,u,u,u,u,u,u,u,u]
+; SSE42-NEXT: por %xmm0, %xmm2
+; SSE42-NEXT: movq %xmm2, 16(%rdi)
; SSE42-NEXT: movdqu %xmm3, (%rdi)
; SSE42-NEXT: retq
;
@@ -809,16 +809,16 @@ define void @interleave_24i8_in(<24 x i8>* %p, <8 x i8>* %q1, <8 x i8>* %q2, <8
; AVX: # BB#0:
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
-; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[0,8],zero,xmm0[1,9],zero,xmm0[2,10],zero,xmm0[3,11],zero,xmm0[4,12],zero,xmm0[5]
-; AVX-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm1[0],zero,zero,xmm1[1],zero,zero,xmm1[2],zero,zero,xmm1[3],zero,zero,xmm1[4],zero
-; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,8],zero,xmm0[1,9],zero,xmm0[2,10],zero,xmm0[3,11],zero,xmm0[4,12],zero,xmm0[5]
+; AVX-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm2[0],zero,zero,xmm2[1],zero,zero,xmm2[2],zero,zero,xmm2[3],zero,zero,xmm2[4],zero
+; AVX-NEXT: vpor %xmm3, %xmm1, %xmm1
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[13],zero,xmm0[6,14],zero,xmm0[7,15],zero,xmm0[u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm1[5],zero,zero,xmm1[6],zero,zero,xmm1[7,u,u,u,u,u,u,u,u]
-; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpshufb {{.*#+}} xmm2 = zero,xmm2[5],zero,zero,xmm2[6],zero,zero,xmm2[7,u,u,u,u,u,u,u,u]
+; AVX-NEXT: vpor %xmm2, %xmm0, %xmm0
; AVX-NEXT: vmovq %xmm0, 16(%rdi)
-; AVX-NEXT: vmovdqu %xmm2, (%rdi)
+; AVX-NEXT: vmovdqu %xmm1, (%rdi)
; AVX-NEXT: retq
%s1 = load <8 x i8>, <8 x i8>* %q1, align 4
%s2 = load <8 x i8>, <8 x i8>* %q2, align 4
diff --git a/test/CodeGen/X86/overflow.ll b/test/CodeGen/X86/overflow.ll
index ff25b5de49333..00dadc4a80f6f 100644
--- a/test/CodeGen/X86/overflow.ll
+++ b/test/CodeGen/X86/overflow.ll
@@ -27,16 +27,14 @@ define i128 @mulhioverflow(i64 %a, i64 %b, i64 %c) nounwind {
; X32-NEXT: addl $32, %esp
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andl $1, %edi
-; X32-NEXT: xorl %ecx, %ecx
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
; X32-NEXT: adcl $0, %eax
-; X32-NEXT: adcl $0, %ecx
-; X32-NEXT: sbbl %edx, %edx
-; X32-NEXT: andl $1, %edx
+; X32-NEXT: setb %cl
+; X32-NEXT: movzbl %cl, %ecx
; X32-NEXT: movl %edi, (%esi)
; X32-NEXT: movl %eax, 4(%esi)
; X32-NEXT: movl %ecx, 8(%esi)
-; X32-NEXT: movl %edx, 12(%esi)
+; X32-NEXT: movl $0, 12(%esi)
; X32-NEXT: movl %esi, %eax
; X32-NEXT: leal -8(%ebp), %esp
; X32-NEXT: popl %esi
diff --git a/test/CodeGen/X86/pmul.ll b/test/CodeGen/X86/pmul.ll
index 50a661fcca114..88cb7a6d58258 100644
--- a/test/CodeGen/X86/pmul.ll
+++ b/test/CodeGen/X86/pmul.ll
@@ -1152,9 +1152,9 @@ define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; SSE2-NEXT: pmuludq %xmm4, %xmm2
; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; SSE2-NEXT: pmuludq %xmm0, %xmm1
+; SSE2-NEXT: pmuludq %xmm4, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3]
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: retq
@@ -1166,9 +1166,9 @@ define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: pmuludq %xmm2, %xmm4
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: pmuludq %xmm3, %xmm0
+; SSE41-NEXT: pmuludq %xmm2, %xmm4
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm4[1,3]
; SSE41-NEXT: retq
;
@@ -1312,17 +1312,17 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
-; SSE2-NEXT: movdqa %xmm2, %xmm7
-; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
-; SSE2-NEXT: pmuludq %xmm7, %xmm4
+; SSE2-NEXT: movdqa %xmm2, %xmm8
+; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm6[0],xmm8[1],xmm6[1]
; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
-; SSE2-NEXT: pmuludq %xmm0, %xmm2
-; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm2[1,3]
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
-; SSE2-NEXT: pmuludq %xmm0, %xmm5
+; SSE2-NEXT: movdqa %xmm3, %xmm7
+; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
; SSE2-NEXT: pmuludq %xmm1, %xmm3
+; SSE2-NEXT: pmuludq %xmm7, %xmm5
+; SSE2-NEXT: pmuludq %xmm0, %xmm2
+; SSE2-NEXT: pmuludq %xmm8, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm2[1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm3[1,3]
; SSE2-NEXT: movaps %xmm4, %xmm0
; SSE2-NEXT: movaps %xmm5, %xmm1
@@ -1331,22 +1331,22 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; SSE41-LABEL: mul_v8i64_zero_upper:
; SSE41: # BB#0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm8 = xmm4[0],zero,xmm4[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
-; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
-; SSE41-NEXT: pmuludq %xmm4, %xmm1
+; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero
-; SSE41-NEXT: pmuludq %xmm5, %xmm0
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
-; SSE41-NEXT: pmuludq %xmm6, %xmm2
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero
; SSE41-NEXT: pmuludq %xmm7, %xmm1
+; SSE41-NEXT: pmuludq %xmm6, %xmm2
+; SSE41-NEXT: pmuludq %xmm5, %xmm0
+; SSE41-NEXT: pmuludq %xmm8, %xmm4
+; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm4[1,3]
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
; SSE41-NEXT: retq
;
@@ -1356,11 +1356,11 @@ define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
-; AVX2-NEXT: vpmuludq %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm2[1,3],ymm0[1,3],ymm2[5,7],ymm0[5,7]
+; AVX2-NEXT: vpmuludq %ymm3, %ymm2, %ymm1
+; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm1[1,3],ymm0[1,3],ymm1[5,7],ymm0[5,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: retq
;
@@ -1467,22 +1467,22 @@ define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
; SSE41-LABEL: mul_v8i64_sext:
; SSE41: # BB#0:
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,1,2,3]
-; SSE41-NEXT: pmovsxwq %xmm3, %xmm4
+; SSE41-NEXT: pmovsxwq %xmm3, %xmm8
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
-; SSE41-NEXT: pmovsxwq %xmm3, %xmm5
-; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
; SSE41-NEXT: pmovsxwq %xmm3, %xmm6
-; SSE41-NEXT: pmovsxwq %xmm0, %xmm7
+; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
+; SSE41-NEXT: pmovsxwq %xmm3, %xmm7
+; SSE41-NEXT: pmovsxwq %xmm0, %xmm5
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
; SSE41-NEXT: pmovsxdq %xmm0, %xmm3
-; SSE41-NEXT: pmuldq %xmm4, %xmm3
; SSE41-NEXT: pmovsxdq %xmm2, %xmm2
-; SSE41-NEXT: pmuldq %xmm5, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE41-NEXT: pmovsxdq %xmm0, %xmm4
-; SSE41-NEXT: pmuldq %xmm6, %xmm4
; SSE41-NEXT: pmovsxdq %xmm1, %xmm0
-; SSE41-NEXT: pmuldq %xmm7, %xmm0
+; SSE41-NEXT: pmuldq %xmm5, %xmm0
+; SSE41-NEXT: pmuldq %xmm7, %xmm4
+; SSE41-NEXT: pmuldq %xmm6, %xmm2
+; SSE41-NEXT: pmuldq %xmm8, %xmm3
; SSE41-NEXT: movdqa %xmm4, %xmm1
; SSE41-NEXT: retq
;
@@ -1493,10 +1493,9 @@ define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3
-; AVX2-NEXT: vpmuldq %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1
; AVX2-NEXT: vpmuldq %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vmovdqa %ymm2, %ymm1
+; AVX2-NEXT: vpmuldq %ymm3, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: mul_v8i64_sext:
diff --git a/test/CodeGen/X86/pr27591.ll b/test/CodeGen/X86/pr27591.ll
index 3ff6c096d0976..b71cb8c4b3a25 100644
--- a/test/CodeGen/X86/pr27591.ll
+++ b/test/CodeGen/X86/pr27591.ll
@@ -9,12 +9,6 @@ define void @test1(i32 %x) #0 {
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: setne %al
-; CHECK-NEXT: # implicit-def: %EDI
-; CHECK-NEXT: movb %al, %dil
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: kmovd %k0, %edi
-; CHECK-NEXT: movb %dil, %al
; CHECK-NEXT: andb $1, %al
; CHECK-NEXT: movzbl %al, %edi
; CHECK-NEXT: callq callee1
@@ -32,17 +26,9 @@ define void @test2(i32 %x) #0 {
; CHECK-NEXT: pushq %rax
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: setne %al
-; CHECK-NEXT: # implicit-def: %EDI
-; CHECK-NEXT: movb %al, %dil
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovd %edi, %k0
-; CHECK-NEXT: kmovd %k0, %edi
+; CHECK-NEXT: movzbl %al, %edi
; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: movb %dil, %al
-; CHECK-NEXT: xorl %edi, %edi
-; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: movl $-1, %ecx
-; CHECK-NEXT: cmovnel %ecx, %edi
+; CHECK-NEXT: negl %edi
; CHECK-NEXT: callq callee2
; CHECK-NEXT: popq %rax
; CHECK-NEXT: retq
diff --git a/test/CodeGen/X86/pr28173.ll b/test/CodeGen/X86/pr28173.ll
index d9622b99bd98e..3279982e46417 100644
--- a/test/CodeGen/X86/pr28173.ll
+++ b/test/CodeGen/X86/pr28173.ll
@@ -8,9 +8,8 @@ target triple = "x86_64-unknown-linux-gnu"
define i64 @foo64(i1 zeroext %i) #0 {
; CHECK-LABEL: foo64:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
-; CHECK-NEXT: orq $-2, %rdi
-; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: orq $-2, %rax
; CHECK-NEXT: retq
br label %bb
@@ -26,8 +25,9 @@ end:
define i16 @foo16(i1 zeroext %i) #0 {
; CHECK-LABEL: foo16:
; CHECK: # BB#0:
-; CHECK-NEXT: orl $65534, %edi # imm = 0xFFFE
-; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: orl $65534, %eax # imm = 0xFFFE
+; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
br label %bb
@@ -43,9 +43,9 @@ end:
define i16 @foo16_1(i1 zeroext %i, i32 %j) #0 {
; CHECK-LABEL: foo16_1:
; CHECK: # BB#0:
-; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: orl $2, %edi
-; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: orl $2, %eax
+; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; CHECK-NEXT: retq
br label %bb
@@ -61,8 +61,8 @@ end:
define i32 @foo32(i1 zeroext %i) #0 {
; CHECK-LABEL: foo32:
; CHECK: # BB#0:
-; CHECK-NEXT: orl $-2, %edi
-; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: movzbl %dil, %eax
+; CHECK-NEXT: orl $-2, %eax
; CHECK-NEXT: retq
br label %bb
diff --git a/test/CodeGen/X86/pr29112.ll b/test/CodeGen/X86/pr29112.ll
index 94904018872ba..8c970b3d47718 100644
--- a/test/CodeGen/X86/pr29112.ll
+++ b/test/CodeGen/X86/pr29112.ll
@@ -38,8 +38,7 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, <
; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm8[0],xmm0[0],xmm8[2,3]
; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[1],xmm1[3]
; CHECK-NEXT: vinsertps {{.*#+}} xmm14 = xmm1[0,1,2],xmm3[1]
-; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm10[0,1,2],xmm3[1]
-; CHECK-NEXT: vaddps %xmm14, %xmm1, %xmm10
+; CHECK-NEXT: vinsertps {{.*#+}} xmm10 = xmm10[0,1,2],xmm3[1]
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[2,3]
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[1],xmm0[3]
@@ -53,9 +52,10 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, <
; CHECK-NEXT: vmovaps %xmm15, %xmm1
; CHECK-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm9
+; CHECK-NEXT: vaddps %xmm14, %xmm10, %xmm0
; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm8
-; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm0
-; CHECK-NEXT: vaddps %xmm10, %xmm0, %xmm0
+; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm3
+; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp)
; CHECK-NEXT: vmovaps %xmm9, (%rsp)
diff --git a/test/CodeGen/X86/pr31088.ll b/test/CodeGen/X86/pr31088.ll
index d7a546c7396d5..0dd8eb0ece85a 100644
--- a/test/CodeGen/X86/pr31088.ll
+++ b/test/CodeGen/X86/pr31088.ll
@@ -150,12 +150,12 @@ define <2 x half> @ir_fadd_v2f16(<2 x half> %arg0, <2 x half> %arg1) nounwind {
; F16C-NEXT: vcvtph2ps %xmm3, %xmm3
; F16C-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
-; F16C-NEXT: vaddss %xmm3, %xmm1, %xmm1
; F16C-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; F16C-NEXT: vcvtph2ps %xmm2, %xmm2
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
; F16C-NEXT: vaddss %xmm2, %xmm0, %xmm0
+; F16C-NEXT: vaddss %xmm3, %xmm1, %xmm1
; F16C-NEXT: retq
%retval = fadd <2 x half> %arg0, %arg1
ret <2 x half> %retval
diff --git a/test/CodeGen/X86/pr32241.ll b/test/CodeGen/X86/pr32241.ll
index d8ce230057ea3..e1f726f0c625d 100644
--- a/test/CodeGen/X86/pr32241.ll
+++ b/test/CodeGen/X86/pr32241.ll
@@ -4,49 +4,57 @@
define i32 @_Z3foov() {
; CHECK-LABEL: _Z3foov:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: subl $20, %esp
+; CHECK-NEXT: pushl %esi
; CHECK-NEXT: .Lcfi0:
-; CHECK-NEXT: .cfi_def_cfa_offset 24
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: subl $24, %esp
+; CHECK-NEXT: .Lcfi1:
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .Lcfi2:
+; CHECK-NEXT: .cfi_offset %esi, -8
+; CHECK-NEXT: movb $1, %al
; CHECK-NEXT: movw $10959, {{[0-9]+}}(%esp) # imm = 0x2ACF
; CHECK-NEXT: movw $-15498, {{[0-9]+}}(%esp) # imm = 0xC376
; CHECK-NEXT: movw $19417, {{[0-9]+}}(%esp) # imm = 0x4BD9
-; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: movw {{[0-9]+}}(%esp), %cx
-; CHECK-NEXT: kxnorw %k0, %k0, %k0
-; CHECK-NEXT: kshiftrw $15, %k0, %k0
-; CHECK-NEXT: testw %cx, %cx
-; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
-; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill
+; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: cmpw $0, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
+; CHECK-NEXT: movb %al, {{[0-9]+}}(%esp) # 1-byte Spill
; CHECK-NEXT: jne .LBB0_2
-; CHECK-NEXT: jmp .LBB0_1
-; CHECK-NEXT: .LBB0_1: # %lor.rhs
+; CHECK-NEXT: # BB#1: # %lor.rhs
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: kmovd %eax, %k0
-; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill
+; CHECK-NEXT: movb %al, %cl
+; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill
; CHECK-NEXT: jmp .LBB0_2
; CHECK-NEXT: .LBB0_2: # %lor.end
-; CHECK-NEXT: kmovw {{[0-9]+}}(%esp), %k0 # 2-byte Reload
-; CHECK-NEXT: kxnorw %k0, %k0, %k1
-; CHECK-NEXT: kshiftrw $15, %k1, %k1
-; CHECK-NEXT: movb $1, %al
-; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill
-; CHECK-NEXT: kmovw %k1, {{[0-9]+}}(%esp) # 2-byte Spill
+; CHECK-NEXT: movb {{[0-9]+}}(%esp), %al # 1-byte Reload
+; CHECK-NEXT: movb $1, %cl
+; CHECK-NEXT: andb $1, %al
+; CHECK-NEXT: movzbl %al, %edx
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
+; CHECK-NEXT: subl %edx, %esi
+; CHECK-NEXT: setl %al
+; CHECK-NEXT: andb $1, %al
+; CHECK-NEXT: movzbl %al, %edx
+; CHECK-NEXT: xorl $-1, %edx
+; CHECK-NEXT: cmpl $0, %edx
+; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
+; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill
; CHECK-NEXT: jne .LBB0_4
-; CHECK-NEXT: jmp .LBB0_3
-; CHECK-NEXT: .LBB0_3: # %lor.rhs4
+; CHECK-NEXT: # BB#3: # %lor.rhs4
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: kmovd %eax, %k0
-; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%esp) # 2-byte Spill
+; CHECK-NEXT: movb %al, %cl
+; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp) # 1-byte Spill
; CHECK-NEXT: jmp .LBB0_4
; CHECK-NEXT: .LBB0_4: # %lor.end5
-; CHECK-NEXT: kmovw {{[0-9]+}}(%esp), %k0 # 2-byte Reload
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: movw %ax, %cx
-; CHECK-NEXT: movw %cx, {{[0-9]+}}(%esp)
+; CHECK-NEXT: movb {{[0-9]+}}(%esp), %al # 1-byte Reload
+; CHECK-NEXT: andb $1, %al
+; CHECK-NEXT: movzbl %al, %ecx
+; CHECK-NEXT: movw %cx, %dx
+; CHECK-NEXT: movw %dx, {{[0-9]+}}(%esp)
; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: addl $20, %esp
+; CHECK-NEXT: addl $24, %esp
+; CHECK-NEXT: popl %esi
; CHECK-NEXT: retl
entry:
%aa = alloca i16, align 2
diff --git a/test/CodeGen/X86/pr32256.ll b/test/CodeGen/X86/pr32256.ll
index cb26c13e53eb2..e29b56236e262 100644
--- a/test/CodeGen/X86/pr32256.ll
+++ b/test/CodeGen/X86/pr32256.ll
@@ -7,39 +7,27 @@
define void @_Z1av() {
; CHECK-LABEL: _Z1av:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: subl $6, %esp
+; CHECK-NEXT: subl $2, %esp
; CHECK-NEXT: .Lcfi0:
-; CHECK-NEXT: .cfi_def_cfa_offset 10
+; CHECK-NEXT: .cfi_def_cfa_offset 6
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: kmovd %eax, %k0
-; CHECK-NEXT: movb c, %cl
-; CHECK-NEXT: # implicit-def: %EAX
-; CHECK-NEXT: movb %cl, %al
-; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: kmovd %eax, %k1
-; CHECK-NEXT: kmovq %k1, %k2
-; CHECK-NEXT: kxnorw %k0, %k0, %k3
-; CHECK-NEXT: kshiftrw $15, %k3, %k3
-; CHECK-NEXT: kxorw %k3, %k1, %k1
-; CHECK-NEXT: kmovd %k1, %eax
; CHECK-NEXT: movb %al, %cl
-; CHECK-NEXT: testb $1, %cl
-; CHECK-NEXT: kmovw %k2, {{[0-9]+}}(%esp) # 2-byte Spill
-; CHECK-NEXT: kmovw %k0, (%esp) # 2-byte Spill
+; CHECK-NEXT: movb c, %dl
+; CHECK-NEXT: xorb $-1, %dl
+; CHECK-NEXT: testb $1, %dl
+; CHECK-NEXT: movb %cl, (%esp) # 1-byte Spill
; CHECK-NEXT: jne .LBB0_1
; CHECK-NEXT: jmp .LBB0_2
; CHECK-NEXT: .LBB0_1: # %land.rhs
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: kmovd %eax, %k0
-; CHECK-NEXT: kmovw %k0, (%esp) # 2-byte Spill
+; CHECK-NEXT: movb %al, %cl
+; CHECK-NEXT: movb %cl, (%esp) # 1-byte Spill
; CHECK-NEXT: jmp .LBB0_2
; CHECK-NEXT: .LBB0_2: # %land.end
-; CHECK-NEXT: kmovw (%esp), %k0 # 2-byte Reload
-; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: movb %al, %cl
-; CHECK-NEXT: andb $1, %cl
-; CHECK-NEXT: movb %cl, {{[0-9]+}}(%esp)
-; CHECK-NEXT: addl $6, %esp
+; CHECK-NEXT: movb (%esp), %al # 1-byte Reload
+; CHECK-NEXT: andb $1, %al
+; CHECK-NEXT: movb %al, {{[0-9]+}}(%esp)
+; CHECK-NEXT: addl $2, %esp
; CHECK-NEXT: retl
entry:
%b = alloca i8, align 1
diff --git a/test/CodeGen/X86/pr32284.ll b/test/CodeGen/X86/pr32284.ll
index 143e3af82eb71..571dd6774906a 100644
--- a/test/CodeGen/X86/pr32284.ll
+++ b/test/CodeGen/X86/pr32284.ll
@@ -39,12 +39,6 @@ define void @foo() {
; X86-O0-NEXT: movzbl %al, %edx
; X86-O0-NEXT: subl %ecx, %edx
; X86-O0-NEXT: setle %al
-; X86-O0-NEXT: # implicit-def: %ECX
-; X86-O0-NEXT: movb %al, %cl
-; X86-O0-NEXT: andl $1, %ecx
-; X86-O0-NEXT: kmovd %ecx, %k0
-; X86-O0-NEXT: kmovd %k0, %ecx
-; X86-O0-NEXT: movb %cl, %al
; X86-O0-NEXT: andb $1, %al
; X86-O0-NEXT: movzbl %al, %ecx
; X86-O0-NEXT: movl %ecx, {{[0-9]+}}(%esp)
@@ -77,12 +71,6 @@ define void @foo() {
; X64-O0-NEXT: movzbl %al, %edx
; X64-O0-NEXT: subl %ecx, %edx
; X64-O0-NEXT: setle %al
-; X64-O0-NEXT: # implicit-def: %ECX
-; X64-O0-NEXT: movb %al, %cl
-; X64-O0-NEXT: andl $1, %ecx
-; X64-O0-NEXT: kmovd %ecx, %k0
-; X64-O0-NEXT: kmovd %k0, %ecx
-; X64-O0-NEXT: movb %cl, %al
; X64-O0-NEXT: andb $1, %al
; X64-O0-NEXT: movzbl %al, %ecx
; X64-O0-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
diff --git a/test/CodeGen/X86/pr32451.ll b/test/CodeGen/X86/pr32451.ll
index d980b7ff284cf..e4643a863f941 100644
--- a/test/CodeGen/X86/pr32451.ll
+++ b/test/CodeGen/X86/pr32451.ll
@@ -25,12 +25,6 @@ define i8** @japi1_convert_690(i8**, i8***, i32) {
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
; CHECK-NEXT: movl 4(%ecx), %edx
; CHECK-NEXT: movb (%edx), %bl
-; CHECK-NEXT: # implicit-def: %EDX
-; CHECK-NEXT: movb %bl, %dl
-; CHECK-NEXT: andl $1, %edx
-; CHECK-NEXT: kmovw %edx, %k0
-; CHECK-NEXT: kmovw %k0, %edx
-; CHECK-NEXT: movb %dl, %bl
; CHECK-NEXT: andb $1, %bl
; CHECK-NEXT: movzbl %bl, %edx
; CHECK-NEXT: movl %edx, (%esp)
diff --git a/test/CodeGen/X86/rotate.ll b/test/CodeGen/X86/rotate.ll
index 4be3a4c2391b4..5d5150ad62d60 100644
--- a/test/CodeGen/X86/rotate.ll
+++ b/test/CodeGen/X86/rotate.ll
@@ -33,8 +33,8 @@ define i64 @rotl64(i64 %A, i8 %Amt) nounwind {
; 32-NEXT: movl %ebx, %esi
; 32-NEXT: xorl %ebx, %ebx
; 32-NEXT: .LBB0_4:
-; 32-NEXT: orl %ebx, %edx
; 32-NEXT: orl %esi, %eax
+; 32-NEXT: orl %ebx, %edx
; 32-NEXT: popl %esi
; 32-NEXT: popl %edi
; 32-NEXT: popl %ebx
@@ -86,8 +86,8 @@ define i64 @rotr64(i64 %A, i8 %Amt) nounwind {
; 32-NEXT: movl %ebx, %esi
; 32-NEXT: xorl %ebx, %ebx
; 32-NEXT: .LBB1_4:
-; 32-NEXT: orl %esi, %edx
; 32-NEXT: orl %ebx, %eax
+; 32-NEXT: orl %esi, %edx
; 32-NEXT: popl %esi
; 32-NEXT: popl %edi
; 32-NEXT: popl %ebx
@@ -546,7 +546,7 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind {
; 32-LABEL: rotr1_64_mem:
; 32: # BB#0:
; 32-NEXT: pushl %esi
-; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; 32-NEXT: movl 8(%esp), %eax
; 32-NEXT: movl (%eax), %ecx
; 32-NEXT: movl 4(%eax), %edx
; 32-NEXT: movl %edx, %esi
@@ -555,13 +555,11 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind {
; 32-NEXT: movl %ecx, 4(%eax)
; 32-NEXT: movl %esi, (%eax)
; 32-NEXT: popl %esi
-; 32-NEXT: retl
-;
+
; 64-LABEL: rotr1_64_mem:
; 64: # BB#0:
; 64-NEXT: rorq (%rdi)
; 64-NEXT: retq
-
%A = load i64, i64 *%Aptr
%B = shl i64 %A, 63
%C = lshr i64 %A, 1
@@ -573,7 +571,7 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind {
define void @rotr1_32_mem(i32* %Aptr) nounwind {
; 32-LABEL: rotr1_32_mem:
; 32: # BB#0:
-; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; 32-NEXT: movl 4(%esp), %eax
; 32-NEXT: rorl (%eax)
; 32-NEXT: retl
;
@@ -592,7 +590,7 @@ define void @rotr1_32_mem(i32* %Aptr) nounwind {
define void @rotr1_16_mem(i16* %Aptr) nounwind {
; 32-LABEL: rotr1_16_mem:
; 32: # BB#0:
-; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; 32-NEXT: movl 4(%esp), %eax
; 32-NEXT: rorw (%eax)
; 32-NEXT: retl
;
@@ -611,7 +609,7 @@ define void @rotr1_16_mem(i16* %Aptr) nounwind {
define void @rotr1_8_mem(i8* %Aptr) nounwind {
; 32-LABEL: rotr1_8_mem:
; 32: # BB#0:
-; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; 32-NEXT: movl 4(%esp), %eax
; 32-NEXT: rorb (%eax)
; 32-NEXT: retl
;
diff --git a/test/CodeGen/X86/rtm.ll b/test/CodeGen/X86/rtm.ll
index 7215c482ffa28..a8562677c7bfe 100644
--- a/test/CodeGen/X86/rtm.ll
+++ b/test/CodeGen/X86/rtm.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+rtm | FileCheck %s --check-prefix=X86
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+rtm | FileCheck %s --check-prefix=X64
+; RUN: llc -verify-machineinstrs < %s -mtriple=i686-unknown-unknown -mattr=+rtm | FileCheck %s --check-prefix=X86
+; RUN: llc -verify-machineinstrs < %s -mtriple=x86_64-unknown-unknown -mattr=+rtm | FileCheck %s --check-prefix=X64
declare i32 @llvm.x86.xbegin() nounwind
declare void @llvm.x86.xend() nounwind
@@ -13,7 +13,8 @@ define i32 @test_xbegin() nounwind uwtable {
; X86-NEXT: xbegin .LBB0_2
; X86-NEXT: # BB#1: # %entry
; X86-NEXT: movl $-1, %eax
-; X86-NEXT: .LBB0_2: # %entry
+; X86: .LBB0_2: # %entry
+; X86-NEXT: # XABORT DEF
; X86-NEXT: retl
;
; X64-LABEL: test_xbegin:
@@ -21,7 +22,8 @@ define i32 @test_xbegin() nounwind uwtable {
; X64-NEXT: xbegin .LBB0_2
; X64-NEXT: # BB#1: # %entry
; X64-NEXT: movl $-1, %eax
-; X64-NEXT: .LBB0_2: # %entry
+; X64: .LBB0_2: # %entry
+; X64-NEXT: # XABORT DEF
; X64-NEXT: retq
entry:
%0 = tail call i32 @llvm.x86.xbegin() nounwind
diff --git a/test/CodeGen/X86/sad.ll b/test/CodeGen/X86/sad.ll
index 6a565a5c76f0b..b8a8b8afd14fd 100644
--- a/test/CodeGen/X86/sad.ll
+++ b/test/CodeGen/X86/sad.ll
@@ -149,131 +149,127 @@ middle.block:
define i32 @sad_32i8() nounwind {
; SSE2-LABEL: sad_32i8:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: pxor %xmm12, %xmm12
+; SSE2-NEXT: pxor %xmm11, %xmm11
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
-; SSE2-NEXT: pxor %xmm13, %xmm13
-; SSE2-NEXT: pxor %xmm6, %xmm6
-; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm14, %xmm14
-; SSE2-NEXT: pxor %xmm15, %xmm15
+; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: pxor %xmm12, %xmm12
+; SSE2-NEXT: pxor %xmm15, %xmm15
+; SSE2-NEXT: pxor %xmm13, %xmm13
+; SSE2-NEXT: pxor %xmm14, %xmm14
; SSE2-NEXT: .p2align 4, 0x90
; SSE2-NEXT: .LBB1_1: # %vector.body
; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa a+1040(%rax), %xmm8
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa a+1040(%rax), %xmm6
; SSE2-NEXT: movdqa a+1024(%rax), %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3],xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
-; SSE2-NEXT: movdqa %xmm4, %xmm7
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1],xmm7[2],xmm12[2],xmm7[3],xmm12[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7]
-; SSE2-NEXT: movdqa %xmm8, %xmm0
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm12[8],xmm8[9],xmm12[9],xmm8[10],xmm12[10],xmm8[11],xmm12[11],xmm8[12],xmm12[12],xmm8[13],xmm12[13],xmm8[14],xmm12[14],xmm8[15],xmm12[15]
-; SSE2-NEXT: movdqa b+1024(%rax), %xmm11
-; SSE2-NEXT: movdqa %xmm11, %xmm10
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3],xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7]
-; SSE2-NEXT: movdqa %xmm10, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
-; SSE2-NEXT: psubd %xmm2, %xmm7
+; SSE2-NEXT: movdqa %xmm3, %xmm8
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm11[0],xmm8[1],xmm11[1],xmm8[2],xmm11[2],xmm8[3],xmm11[3],xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm11[8],xmm3[9],xmm11[9],xmm3[10],xmm11[10],xmm3[11],xmm11[11],xmm3[12],xmm11[12],xmm3[13],xmm11[13],xmm3[14],xmm11[14],xmm3[15],xmm11[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm11[4],xmm3[5],xmm11[5],xmm3[6],xmm11[6],xmm3[7],xmm11[7]
+; SSE2-NEXT: movdqa %xmm6, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3],xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm11[8],xmm6[9],xmm11[9],xmm6[10],xmm11[10],xmm6[11],xmm11[11],xmm6[12],xmm11[12],xmm6[13],xmm11[13],xmm6[14],xmm11[14],xmm6[15],xmm11[15]
+; SSE2-NEXT: movdqa %xmm6, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
; SSE2-NEXT: movdqa b+1040(%rax), %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7]
-; SSE2-NEXT: psubd %xmm10, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
-; SSE2-NEXT: movdqa %xmm11, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
+; SSE2-NEXT: movdqa %xmm9, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm11[8],xmm9[9],xmm11[9],xmm9[10],xmm11[10],xmm9[11],xmm11[11],xmm9[12],xmm11[12],xmm9[13],xmm11[13],xmm9[14],xmm11[14],xmm9[15],xmm11[15]
+; SSE2-NEXT: movdqa %xmm9, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7]
+; SSE2-NEXT: psubd %xmm9, %xmm6
+; SSE2-NEXT: movdqa b+1024(%rax), %xmm4
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; SSE2-NEXT: psubd %xmm10, %xmm7
+; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
; SSE2-NEXT: psubd %xmm2, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
-; SSE2-NEXT: psubd %xmm11, %xmm3
-; SSE2-NEXT: movdqa %xmm6, %xmm10
-; SSE2-NEXT: movdqa %xmm9, %xmm6
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
-; SSE2-NEXT: movdqa %xmm6, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
-; SSE2-NEXT: psubd %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm4, %xmm2
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm11[8],xmm4[9],xmm11[9],xmm4[10],xmm11[10],xmm4[11],xmm11[11],xmm4[12],xmm11[12],xmm4[13],xmm11[13],xmm4[14],xmm11[14],xmm4[15],xmm11[15]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3]
+; SSE2-NEXT: psubd %xmm9, %xmm0
+; SSE2-NEXT: movdqa %xmm4, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm11[4],xmm4[5],xmm11[5],xmm4[6],xmm11[6],xmm4[7],xmm11[7]
+; SSE2-NEXT: psubd %xmm4, %xmm3
+; SSE2-NEXT: movdqa %xmm8, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm11[4],xmm8[5],xmm11[5],xmm8[6],xmm11[6],xmm8[7],xmm11[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm11[0],xmm5[1],xmm11[1],xmm5[2],xmm11[2],xmm5[3],xmm11[3]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1],xmm2[2],xmm11[2],xmm2[3],xmm11[3],xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3]
+; SSE2-NEXT: psubd %xmm9, %xmm5
+; SSE2-NEXT: movdqa %xmm2, %xmm4
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm11[4],xmm2[5],xmm11[5],xmm2[6],xmm11[6],xmm2[7],xmm11[7]
+; SSE2-NEXT: psubd %xmm2, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3]
+; SSE2-NEXT: psubd %xmm4, %xmm10
+; SSE2-NEXT: movdqa %xmm10, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm10
+; SSE2-NEXT: pxor %xmm2, %xmm10
; SSE2-NEXT: movdqa %xmm8, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
-; SSE2-NEXT: psubd %xmm6, %xmm0
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm12[8],xmm9[9],xmm12[9],xmm9[10],xmm12[10],xmm9[11],xmm12[11],xmm9[12],xmm12[12],xmm9[13],xmm12[13],xmm9[14],xmm12[14],xmm9[15],xmm12[15]
-; SSE2-NEXT: movdqa %xmm9, %xmm6
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3]
-; SSE2-NEXT: psubd %xmm6, %xmm2
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm12[4],xmm9[5],xmm12[5],xmm9[6],xmm12[6],xmm9[7],xmm12[7]
-; SSE2-NEXT: psubd %xmm9, %xmm8
-; SSE2-NEXT: movdqa %xmm7, %xmm6
-; SSE2-NEXT: psrad $31, %xmm6
-; SSE2-NEXT: paddd %xmm6, %xmm7
-; SSE2-NEXT: pxor %xmm6, %xmm7
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm8
+; SSE2-NEXT: pxor %xmm2, %xmm8
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm5
+; SSE2-NEXT: pxor %xmm2, %xmm5
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm3
+; SSE2-NEXT: pxor %xmm2, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm1
+; SSE2-NEXT: pxor %xmm2, %xmm1
+; SSE2-NEXT: movdqa %xmm7, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm7
+; SSE2-NEXT: pxor %xmm2, %xmm7
+; SSE2-NEXT: movdqa %xmm6, %xmm2
+; SSE2-NEXT: psrad $31, %xmm2
+; SSE2-NEXT: paddd %xmm2, %xmm6
+; SSE2-NEXT: pxor %xmm2, %xmm6
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm6, %xmm14
; SSE2-NEXT: paddd %xmm7, %xmm13
-; SSE2-NEXT: movdqa %xmm4, %xmm6
-; SSE2-NEXT: psrad $31, %xmm6
-; SSE2-NEXT: paddd %xmm6, %xmm4
-; SSE2-NEXT: pxor %xmm6, %xmm4
-; SSE2-NEXT: movdqa %xmm10, %xmm6
-; SSE2-NEXT: paddd %xmm4, %xmm6
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: psrad $31, %xmm4
-; SSE2-NEXT: paddd %xmm4, %xmm1
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm1, %xmm4
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm3
-; SSE2-NEXT: pxor %xmm1, %xmm3
+; SSE2-NEXT: paddd %xmm1, %xmm15
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm0, %xmm12
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: paddd %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm5, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm5
-; SSE2-NEXT: pxor %xmm1, %xmm5
-; SSE2-NEXT: paddd %xmm5, %xmm14
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm0, %xmm15
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm2
-; SSE2-NEXT: pxor %xmm0, %xmm2
-; SSE2-NEXT: paddd %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm8, %xmm0
-; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm8
-; SSE2-NEXT: pxor %xmm0, %xmm8
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm8, %xmm0
+; SSE2-NEXT: paddd %xmm5, %xmm2
+; SSE2-NEXT: paddd %xmm8, %xmm3
+; SSE2-NEXT: paddd %xmm10, %xmm0
; SSE2-NEXT: addq $4, %rax
; SSE2-NEXT: jne .LBB1_1
; SSE2-NEXT: # BB#2: # %middle.block
-; SSE2-NEXT: paddd %xmm15, %xmm6
-; SSE2-NEXT: paddd %xmm0, %xmm3
-; SSE2-NEXT: paddd %xmm6, %xmm3
-; SSE2-NEXT: paddd %xmm14, %xmm13
-; SSE2-NEXT: paddd %xmm1, %xmm4
-; SSE2-NEXT: paddd %xmm3, %xmm4
-; SSE2-NEXT: paddd %xmm13, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,3,0,1]
-; SSE2-NEXT: paddd %xmm4, %xmm0
+; SSE2-NEXT: paddd %xmm15, %xmm3
+; SSE2-NEXT: paddd %xmm14, %xmm1
+; SSE2-NEXT: paddd %xmm12, %xmm0
+; SSE2-NEXT: paddd %xmm13, %xmm2
+; SSE2-NEXT: paddd %xmm3, %xmm1
+; SSE2-NEXT: paddd %xmm2, %xmm1
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT: paddd %xmm1, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE2-NEXT: paddd %xmm0, %xmm1
; SSE2-NEXT: movd %xmm1, %eax
@@ -402,284 +398,288 @@ middle.block:
define i32 @sad_avx64i8() nounwind {
; SSE2-LABEL: sad_avx64i8:
; SSE2: # BB#0: # %entry
-; SSE2-NEXT: subq $200, %rsp
-; SSE2-NEXT: pxor %xmm14, %xmm14
-; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
+; SSE2-NEXT: subq $184, %rsp
; SSE2-NEXT: pxor %xmm15, %xmm15
-; SSE2-NEXT: pxor %xmm10, %xmm10
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pxor %xmm5, %xmm5
-; SSE2-NEXT: pxor %xmm13, %xmm13
-; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
+; SSE2-NEXT: pxor %xmm12, %xmm12
; SSE2-NEXT: pxor %xmm8, %xmm8
+; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pxor %xmm0, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm2
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: pxor %xmm14, %xmm14
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: pxor %xmm6, %xmm6
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: pxor %xmm11, %xmm11
-; SSE2-NEXT: pxor %xmm4, %xmm4
-; SSE2-NEXT: movdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: pxor %xmm7, %xmm7
-; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: pxor %xmm7, %xmm7
-; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: pxor %xmm7, %xmm7
-; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: pxor %xmm7, %xmm7
-; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: pxor %xmm0, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm7, %xmm7
-; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: pxor %xmm13, %xmm13
+; SSE2-NEXT: pxor %xmm1, %xmm1
+; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pxor %xmm5, %xmm5
; SSE2-NEXT: .p2align 4, 0x90
; SSE2-NEXT: .LBB2_1: # %vector.body
; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
-; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm3, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm11, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm5, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm13, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm10, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm13, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm11, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm7, {{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm15, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movaps a+1040(%rax), %xmm0
-; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa a+1024(%rax), %xmm12
-; SSE2-NEXT: movdqa a+1056(%rax), %xmm15
-; SSE2-NEXT: movdqa a+1072(%rax), %xmm4
-; SSE2-NEXT: movdqa %xmm4, %xmm6
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
-; SSE2-NEXT: movdqa %xmm6, %xmm1
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
-; SSE2-NEXT: movdqa %xmm4, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3]
-; SSE2-NEXT: movdqa %xmm15, %xmm11
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm14[8],xmm11[9],xmm14[9],xmm11[10],xmm14[10],xmm11[11],xmm14[11],xmm11[12],xmm14[12],xmm11[13],xmm14[13],xmm11[14],xmm14[14],xmm11[15],xmm14[15]
-; SSE2-NEXT: movdqa %xmm11, %xmm8
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm14[0],xmm11[1],xmm14[1],xmm11[2],xmm14[2],xmm11[3],xmm14[3]
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
-; SSE2-NEXT: movdqa %xmm15, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
-; SSE2-NEXT: movdqa %xmm12, %xmm10
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3],xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
-; SSE2-NEXT: movdqa %xmm10, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
-; SSE2-NEXT: movdqa %xmm0, %xmm9
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm14[8],xmm12[9],xmm14[9],xmm12[10],xmm14[10],xmm12[11],xmm14[11],xmm12[12],xmm14[12],xmm12[13],xmm14[13],xmm12[14],xmm14[14],xmm12[15],xmm14[15]
+; SSE2-NEXT: movdqa %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm14, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm4, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm6, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm8, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm12, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa a+1040(%rax), %xmm6
+; SSE2-NEXT: movdqa a+1024(%rax), %xmm4
+; SSE2-NEXT: movdqa a+1056(%rax), %xmm11
+; SSE2-NEXT: movdqa a+1072(%rax), %xmm3
+; SSE2-NEXT: movdqa %xmm3, %xmm5
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
+; SSE2-NEXT: movdqa %xmm11, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm15[8],xmm1[9],xmm15[9],xmm1[10],xmm15[10],xmm1[11],xmm15[11],xmm1[12],xmm15[12],xmm1[13],xmm15[13],xmm1[14],xmm15[14],xmm1[15],xmm15[15]
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm15[0],xmm11[1],xmm15[1],xmm11[2],xmm15[2],xmm11[3],xmm15[3],xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7]
+; SSE2-NEXT: movdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm15[0],xmm11[1],xmm15[1],xmm11[2],xmm15[2],xmm11[3],xmm15[3]
+; SSE2-NEXT: movdqa %xmm4, %xmm12
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm15[0],xmm12[1],xmm15[1],xmm12[2],xmm15[2],xmm12[3],xmm15[3],xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7]
; SSE2-NEXT: movdqa %xmm12, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
-; SSE2-NEXT: movdqa %xmm0, %xmm13
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7]
-; SSE2-NEXT: movdqa b+1072(%rax), %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm14[8],xmm7[9],xmm14[9],xmm7[10],xmm14[10],xmm7[11],xmm14[11],xmm7[12],xmm14[12],xmm7[13],xmm14[13],xmm7[14],xmm14[14],xmm7[15],xmm14[15]
-; SSE2-NEXT: movdqa %xmm7, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
-; SSE2-NEXT: psubd %xmm0, %xmm1
-; SSE2-NEXT: movdqa b+1056(%rax), %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3]
-; SSE2-NEXT: psubd %xmm7, %xmm6
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
-; SSE2-NEXT: psubd %xmm7, %xmm5
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
-; SSE2-NEXT: psubd %xmm3, %xmm4
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm14[8],xmm3[9],xmm14[9],xmm3[10],xmm14[10],xmm3[11],xmm14[11],xmm3[12],xmm14[12],xmm3[13],xmm14[13],xmm3[14],xmm14[14],xmm3[15],xmm14[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
-; SSE2-NEXT: psubd %xmm7, %xmm8
-; SSE2-NEXT: movdqa b+1024(%rax), %xmm7
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
-; SSE2-NEXT: psubd %xmm3, %xmm11
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
-; SSE2-NEXT: psubd %xmm3, %xmm2
-; SSE2-NEXT: movdqa %xmm2, (%rsp) # 16-byte Spill
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
-; SSE2-NEXT: psubd %xmm0, %xmm15
-; SSE2-NEXT: movdqa %xmm7, %xmm0
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
-; SSE2-NEXT: psubd %xmm3, %xmm9
-; SSE2-NEXT: movdqa %xmm9, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm15[8],xmm4[9],xmm15[9],xmm4[10],xmm15[10],xmm4[11],xmm15[11],xmm4[12],xmm15[12],xmm4[13],xmm15[13],xmm4[14],xmm15[14],xmm4[15],xmm15[15]
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm15[4],xmm4[5],xmm15[5],xmm4[6],xmm15[6],xmm4[7],xmm15[7]
+; SSE2-NEXT: movdqa %xmm6, %xmm14
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm15[0],xmm14[1],xmm15[1],xmm14[2],xmm15[2],xmm14[3],xmm15[3],xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
+; SSE2-NEXT: movdqa %xmm14, %xmm7
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm15[0],xmm7[1],xmm15[1],xmm7[2],xmm15[2],xmm7[3],xmm15[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm15[8],xmm6[9],xmm15[9],xmm6[10],xmm15[10],xmm6[11],xmm15[11],xmm6[12],xmm15[12],xmm6[13],xmm15[13],xmm6[14],xmm15[14],xmm6[15],xmm15[15]
+; SSE2-NEXT: movdqa %xmm6, %xmm8
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm15[0],xmm8[1],xmm15[1],xmm8[2],xmm15[2],xmm8[3],xmm15[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
+; SSE2-NEXT: movdqa b+1040(%rax), %xmm9
+; SSE2-NEXT: movdqa %xmm9, %xmm13
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm15[8],xmm9[9],xmm15[9],xmm9[10],xmm15[10],xmm9[11],xmm15[11],xmm9[12],xmm15[12],xmm9[13],xmm15[13],xmm9[14],xmm15[14],xmm9[15],xmm15[15]
+; SSE2-NEXT: movdqa %xmm9, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm9, %xmm6
+; SSE2-NEXT: movdqa b+1024(%rax), %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm15[0],xmm13[1],xmm15[1],xmm13[2],xmm15[2],xmm13[3],xmm15[3],xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3]
+; SSE2-NEXT: psubd %xmm10, %xmm8
+; SSE2-NEXT: movdqa %xmm13, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm13, %xmm14
+; SSE2-NEXT: movdqa %xmm2, %xmm10
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm15[8],xmm2[9],xmm15[9],xmm2[10],xmm15[10],xmm2[11],xmm15[11],xmm2[12],xmm15[12],xmm2[13],xmm15[13],xmm2[14],xmm15[14],xmm2[15],xmm15[15]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
+; SSE2-NEXT: psubd %xmm9, %xmm7
; SSE2-NEXT: movdqa %xmm2, %xmm9
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm14[0],xmm9[1],xmm14[1],xmm9[2],xmm14[2],xmm9[3],xmm14[3],xmm9[4],xmm14[4],xmm9[5],xmm14[5],xmm9[6],xmm14[6],xmm9[7],xmm14[7]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
-; SSE2-NEXT: psubd %xmm0, %xmm10
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm14[8],xmm7[9],xmm14[9],xmm7[10],xmm14[10],xmm7[11],xmm14[11],xmm7[12],xmm14[12],xmm7[13],xmm14[13],xmm7[14],xmm14[14],xmm7[15],xmm14[15]
-; SSE2-NEXT: movdqa %xmm7, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
-; SSE2-NEXT: psubd %xmm0, %xmm13
-; SSE2-NEXT: movdqa %xmm13, {{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm9, %xmm0
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
-; SSE2-NEXT: psubd %xmm7, %xmm12
-; SSE2-NEXT: movdqa b+1040(%rax), %xmm13
-; SSE2-NEXT: movdqa %xmm13, %xmm3
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3]
-; SSE2-NEXT: psubd %xmm7, %xmm0
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm14[4],xmm9[5],xmm14[5],xmm9[6],xmm14[6],xmm9[7],xmm14[7]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
-; SSE2-NEXT: psubd %xmm3, %xmm9
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm14[8],xmm2[9],xmm14[9],xmm2[10],xmm14[10],xmm2[11],xmm14[11],xmm2[12],xmm14[12],xmm2[13],xmm14[13],xmm2[14],xmm14[14],xmm2[15],xmm14[15]
-; SSE2-NEXT: movdqa %xmm2, %xmm7
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm14[8],xmm13[9],xmm14[9],xmm13[10],xmm14[10],xmm13[11],xmm14[11],xmm13[12],xmm14[12],xmm13[13],xmm14[13],xmm13[14],xmm14[14],xmm13[15],xmm14[15]
-; SSE2-NEXT: movdqa %xmm13, %xmm3
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
-; SSE2-NEXT: psubd %xmm3, %xmm7
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7]
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm14[4],xmm13[5],xmm14[5],xmm13[6],xmm14[6],xmm13[7],xmm14[7]
-; SSE2-NEXT: psubd %xmm13, %xmm2
-; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: psrad $31, %xmm3
-; SSE2-NEXT: paddd %xmm3, %xmm1
-; SSE2-NEXT: pxor %xmm3, %xmm1
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm1, %xmm3
-; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm6, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm6
-; SSE2-NEXT: pxor %xmm1, %xmm6
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm6, %xmm1
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm5, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm5
-; SSE2-NEXT: pxor %xmm1, %xmm5
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm5, %xmm1
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm4
-; SSE2-NEXT: pxor %xmm1, %xmm4
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm13 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm8, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm8
-; SSE2-NEXT: pxor %xmm1, %xmm8
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm8, %xmm1
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm11, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm11
-; SSE2-NEXT: pxor %xmm1, %xmm11
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm11, %xmm1
-; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
-; SSE2-NEXT: movdqa (%rsp), %xmm4 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm4
-; SSE2-NEXT: pxor %xmm1, %xmm4
-; SSE2-NEXT: paddd %xmm4, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm11
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm15, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm15
-; SSE2-NEXT: pxor %xmm1, %xmm15
-; SSE2-NEXT: paddd %xmm15, %xmm2
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm4
-; SSE2-NEXT: pxor %xmm1, %xmm4
-; SSE2-NEXT: paddd %xmm4, %xmm6
-; SSE2-NEXT: movdqa %xmm6, %xmm15
-; SSE2-NEXT: movdqa %xmm10, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm10
-; SSE2-NEXT: pxor %xmm1, %xmm10
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm10, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm10
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
-; SSE2-NEXT: movdqa %xmm6, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm6
-; SSE2-NEXT: pxor %xmm1, %xmm6
-; SSE2-NEXT: paddd %xmm6, %xmm3
-; SSE2-NEXT: movdqa %xmm12, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm12
-; SSE2-NEXT: pxor %xmm1, %xmm12
-; SSE2-NEXT: paddd %xmm12, %xmm5
-; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm1, %xmm0
-; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm0, %xmm13
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm2, %xmm4
+; SSE2-NEXT: movdqa b+1056(%rax), %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3],xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
+; SSE2-NEXT: psubd %xmm9, %xmm0
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm10, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm10, %xmm12
+; SSE2-NEXT: movdqa %xmm2, %xmm10
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3],xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm15[0],xmm9[1],xmm15[1],xmm9[2],xmm15[2],xmm9[3],xmm15[3]
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: psubd %xmm9, %xmm0
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
+; SSE2-NEXT: psubd %xmm2, %xmm11
+; SSE2-NEXT: movdqa %xmm1, %xmm13
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3]
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm15[4],xmm0[5],xmm15[5],xmm0[6],xmm15[6],xmm0[7],xmm15[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8],xmm15[8],xmm10[9],xmm15[9],xmm10[10],xmm15[10],xmm10[11],xmm15[11],xmm10[12],xmm15[12],xmm10[13],xmm15[13],xmm10[14],xmm15[14],xmm10[15],xmm15[15]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm9, %xmm0
+; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm10, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1],xmm10[2],xmm15[2],xmm10[3],xmm15[3]
+; SSE2-NEXT: psubd %xmm10, %xmm1
+; SSE2-NEXT: movdqa %xmm3, %xmm10
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm2, %xmm13
+; SSE2-NEXT: movdqa b+1072(%rax), %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3],xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
+; SSE2-NEXT: movdqa %xmm2, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
+; SSE2-NEXT: psubd %xmm2, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm9, %xmm10
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm15[8],xmm5[9],xmm15[9],xmm5[10],xmm15[10],xmm5[11],xmm15[11],xmm5[12],xmm15[12],xmm5[13],xmm15[13],xmm5[14],xmm15[14],xmm5[15],xmm15[15]
+; SSE2-NEXT: movdqa %xmm5, %xmm9
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm15[0],xmm5[1],xmm15[1],xmm5[2],xmm15[2],xmm5[3],xmm15[3]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm15[8],xmm0[9],xmm15[9],xmm0[10],xmm15[10],xmm0[11],xmm15[11],xmm0[12],xmm15[12],xmm0[13],xmm15[13],xmm0[14],xmm15[14],xmm0[15],xmm15[15]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
+; SSE2-NEXT: psubd %xmm0, %xmm5
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
+; SSE2-NEXT: psubd %xmm2, %xmm9
; SSE2-NEXT: movdqa %xmm9, %xmm0
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: paddd %xmm0, %xmm9
; SSE2-NEXT: pxor %xmm0, %xmm9
-; SSE2-NEXT: paddd %xmm9, %xmm1
-; SSE2-NEXT: movdqa %xmm7, %xmm0
+; SSE2-NEXT: movdqa %xmm5, %xmm0
; SSE2-NEXT: psrad $31, %xmm0
-; SSE2-NEXT: paddd %xmm0, %xmm7
-; SSE2-NEXT: pxor %xmm0, %xmm7
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm7, %xmm0
-; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Reload
-; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm0, %xmm5
+; SSE2-NEXT: pxor %xmm0, %xmm5
+; SSE2-NEXT: movdqa %xmm10, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm10
+; SSE2-NEXT: pxor %xmm0, %xmm10
+; SSE2-NEXT: movdqa %xmm3, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm3
+; SSE2-NEXT: pxor %xmm0, %xmm3
+; SSE2-NEXT: movdqa %xmm13, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm13
+; SSE2-NEXT: pxor %xmm0, %xmm13
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: pxor %xmm0, %xmm1
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm11, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm11
+; SSE2-NEXT: pxor %xmm0, %xmm11
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm12, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm12
+; SSE2-NEXT: pxor %xmm0, %xmm12
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm2
+; SSE2-NEXT: pxor %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm4
+; SSE2-NEXT: pxor %xmm0, %xmm4
; SSE2-NEXT: movdqa %xmm7, %xmm0
; SSE2-NEXT: psrad $31, %xmm0
; SSE2-NEXT: paddd %xmm0, %xmm7
; SSE2-NEXT: pxor %xmm0, %xmm7
+; SSE2-NEXT: movdqa %xmm14, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm14
+; SSE2-NEXT: pxor %xmm0, %xmm14
+; SSE2-NEXT: movdqa %xmm8, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm8
+; SSE2-NEXT: pxor %xmm0, %xmm8
+; SSE2-NEXT: movdqa %xmm6, %xmm0
+; SSE2-NEXT: psrad $31, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm6
+; SSE2-NEXT: pxor %xmm0, %xmm6
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; SSE2-NEXT: paddd %xmm7, %xmm0
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm6, %xmm2
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm8, %xmm6
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8 # 16-byte Reload
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm14, %xmm2
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm7, %xmm2
+; SSE2-NEXT: movdqa %xmm2, {{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm14 # 16-byte Reload
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm4, %xmm2
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Folded Reload
+; SSE2-NEXT: paddd %xmm12, %xmm8
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
+; SSE2-NEXT: movdqa %xmm0, %xmm12
+; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm11, %xmm0
+; SSE2-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa (%rsp), %xmm11 # 16-byte Reload
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
+; SSE2-NEXT: paddd %xmm1, %xmm2
+; SSE2-NEXT: paddd %xmm13, %xmm7
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm3, %xmm1
+; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm13 # 16-byte Reload
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm10, %xmm1
+; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm5, %xmm3
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm9, %xmm5
+; SSE2-NEXT: movdqa %xmm5, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Reload
; SSE2-NEXT: addq $4, %rax
; SSE2-NEXT: jne .LBB2_1
; SSE2-NEXT: # BB#2: # %middle.block
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Folded Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm8 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm3, %xmm8
-; SSE2-NEXT: paddd %xmm2, %xmm15
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm13 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm8, %xmm13
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Folded Reload
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm5, %xmm0
-; SSE2-NEXT: paddd %xmm11, %xmm10
-; SSE2-NEXT: paddd -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Folded Reload
-; SSE2-NEXT: paddd %xmm0, %xmm1
-; SSE2-NEXT: paddd %xmm10, %xmm1
-; SSE2-NEXT: paddd %xmm13, %xmm1
-; SSE2-NEXT: paddd %xmm15, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT: paddd %xmm2, %xmm4
+; SSE2-NEXT: paddd %xmm3, %xmm6
+; SSE2-NEXT: movdqa %xmm12, %xmm2
+; SSE2-NEXT: paddd %xmm11, %xmm2
+; SSE2-NEXT: paddd %xmm13, %xmm14
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm7, %xmm3
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
+; SSE2-NEXT: paddd %xmm5, %xmm7
+; SSE2-NEXT: paddd %xmm0, %xmm8
+; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: paddd %xmm1, %xmm0
+; SSE2-NEXT: paddd %xmm3, %xmm7
+; SSE2-NEXT: paddd %xmm4, %xmm6
+; SSE2-NEXT: paddd %xmm14, %xmm6
+; SSE2-NEXT: paddd %xmm0, %xmm7
+; SSE2-NEXT: paddd %xmm8, %xmm7
+; SSE2-NEXT: paddd %xmm6, %xmm7
+; SSE2-NEXT: paddd %xmm2, %xmm7
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm7[2,3,0,1]
+; SSE2-NEXT: paddd %xmm7, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
; SSE2-NEXT: paddd %xmm0, %xmm1
; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: addq $200, %rsp
+; SSE2-NEXT: addq $184, %rsp
; SSE2-NEXT: retq
;
; AVX2-LABEL: sad_avx64i8:
@@ -688,8 +688,8 @@ define i32 @sad_avx64i8() nounwind {
; AVX2-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX2-NEXT: vpxor %ymm3, %ymm3, %ymm3
+; AVX2-NEXT: vpxor %ymm4, %ymm4, %ymm4
; AVX2-NEXT: vpxor %ymm6, %ymm6, %ymm6
; AVX2-NEXT: vpxor %ymm5, %ymm5, %ymm5
; AVX2-NEXT: vpxor %ymm7, %ymm7, %ymm7
@@ -697,6 +697,7 @@ define i32 @sad_avx64i8() nounwind {
; AVX2-NEXT: .LBB2_1: # %vector.body
; AVX2-NEXT: # =>This Inner Loop Header: Depth=1
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vmovdqu %ymm8, -{{[0-9]+}}(%rsp) # 32-byte Spill
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
@@ -704,49 +705,48 @@ define i32 @sad_avx64i8() nounwind {
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm13 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm14 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vmovdqu %ymm15, -{{[0-9]+}}(%rsp) # 32-byte Spill
+; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; AVX2-NEXT: vpsubd %ymm8, %ymm15, %ymm8
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpsubd %ymm15, %ymm8, %ymm8
-; AVX2-NEXT: vmovdqu %ymm8, -{{[0-9]+}}(%rsp) # 32-byte Spill
+; AVX2-NEXT: vpsubd %ymm15, %ymm14, %ymm14
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpsubd %ymm15, %ymm9, %ymm9
+; AVX2-NEXT: vpsubd %ymm15, %ymm13, %ymm13
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpsubd %ymm15, %ymm10, %ymm10
+; AVX2-NEXT: vpsubd %ymm15, %ymm12, %ymm12
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; AVX2-NEXT: vpsubd %ymm15, %ymm11, %ymm11
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpsubd %ymm15, %ymm12, %ymm12
-; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpsubd %ymm15, %ymm13, %ymm13
+; AVX2-NEXT: vpsubd %ymm15, %ymm10, %ymm10
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vpsubd %ymm15, %ymm14, %ymm14
+; AVX2-NEXT: vpsubd %ymm15, %ymm9, %ymm9
+; AVX2-NEXT: vmovdqu %ymm9, -{{[0-9]+}}(%rsp) # 32-byte Spill
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; AVX2-NEXT: vmovdqu -{{[0-9]+}}(%rsp), %ymm8 # 32-byte Reload
-; AVX2-NEXT: vpsubd %ymm15, %ymm8, %ymm15
-; AVX2-NEXT: vpabsd -{{[0-9]+}}(%rsp), %ymm8 # 32-byte Folded Reload
-; AVX2-NEXT: vpaddd %ymm7, %ymm8, %ymm7
-; AVX2-NEXT: vpabsd %ymm9, %ymm8
-; AVX2-NEXT: vpaddd %ymm5, %ymm8, %ymm5
-; AVX2-NEXT: vpabsd %ymm10, %ymm8
-; AVX2-NEXT: vpaddd %ymm6, %ymm8, %ymm6
-; AVX2-NEXT: vpabsd %ymm11, %ymm8
+; AVX2-NEXT: vmovdqu -{{[0-9]+}}(%rsp), %ymm9 # 32-byte Reload
+; AVX2-NEXT: vpsubd %ymm15, %ymm9, %ymm15
+; AVX2-NEXT: vpabsd %ymm8, %ymm8
; AVX2-NEXT: vpaddd %ymm3, %ymm8, %ymm3
-; AVX2-NEXT: vpabsd %ymm12, %ymm8
-; AVX2-NEXT: vpaddd %ymm0, %ymm8, %ymm0
-; AVX2-NEXT: vpabsd %ymm13, %ymm8
-; AVX2-NEXT: vpaddd %ymm2, %ymm8, %ymm2
; AVX2-NEXT: vpabsd %ymm14, %ymm8
; AVX2-NEXT: vpaddd %ymm1, %ymm8, %ymm1
-; AVX2-NEXT: vpabsd %ymm15, %ymm8
+; AVX2-NEXT: vpabsd %ymm13, %ymm8
+; AVX2-NEXT: vpaddd %ymm2, %ymm8, %ymm2
+; AVX2-NEXT: vpabsd %ymm12, %ymm8
+; AVX2-NEXT: vpaddd %ymm0, %ymm8, %ymm0
+; AVX2-NEXT: vpabsd %ymm11, %ymm8
; AVX2-NEXT: vpaddd %ymm4, %ymm8, %ymm4
+; AVX2-NEXT: vpabsd %ymm10, %ymm8
+; AVX2-NEXT: vpaddd %ymm6, %ymm8, %ymm6
+; AVX2-NEXT: vpabsd -{{[0-9]+}}(%rsp), %ymm8 # 32-byte Folded Reload
+; AVX2-NEXT: vpaddd %ymm5, %ymm8, %ymm5
+; AVX2-NEXT: vpabsd %ymm15, %ymm8
+; AVX2-NEXT: vpaddd %ymm7, %ymm8, %ymm7
; AVX2-NEXT: addq $4, %rax
; AVX2-NEXT: jne .LBB2_1
; AVX2-NEXT: # BB#2: # %middle.block
; AVX2-NEXT: vpaddd %ymm6, %ymm2, %ymm2
-; AVX2-NEXT: vpaddd %ymm7, %ymm4, %ymm4
-; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2
-; AVX2-NEXT: vpaddd %ymm3, %ymm0, %ymm0
+; AVX2-NEXT: vpaddd %ymm7, %ymm3, %ymm3
+; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpaddd %ymm5, %ymm1, %ymm1
+; AVX2-NEXT: vpaddd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
@@ -773,21 +773,21 @@ define i32 @sad_avx64i8() nounwind {
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
+; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
+; AVX512F-NEXT: vpsubd %zmm11, %zmm7, %zmm7
+; AVX512F-NEXT: vpsubd %zmm10, %zmm6, %zmm6
+; AVX512F-NEXT: vpsubd %zmm9, %zmm5, %zmm5
; AVX512F-NEXT: vpsubd %zmm8, %zmm4, %zmm4
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpsubd %zmm8, %zmm5, %zmm5
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpsubd %zmm8, %zmm6, %zmm6
-; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
-; AVX512F-NEXT: vpsubd %zmm8, %zmm7, %zmm7
; AVX512F-NEXT: vpabsd %zmm4, %zmm4
+; AVX512F-NEXT: vpabsd %zmm5, %zmm5
+; AVX512F-NEXT: vpabsd %zmm6, %zmm6
+; AVX512F-NEXT: vpabsd %zmm7, %zmm7
+; AVX512F-NEXT: vpaddd %zmm3, %zmm7, %zmm3
+; AVX512F-NEXT: vpaddd %zmm2, %zmm6, %zmm2
+; AVX512F-NEXT: vpaddd %zmm1, %zmm5, %zmm1
; AVX512F-NEXT: vpaddd %zmm0, %zmm4, %zmm0
-; AVX512F-NEXT: vpabsd %zmm5, %zmm4
-; AVX512F-NEXT: vpaddd %zmm1, %zmm4, %zmm1
-; AVX512F-NEXT: vpabsd %zmm6, %zmm4
-; AVX512F-NEXT: vpaddd %zmm2, %zmm4, %zmm2
-; AVX512F-NEXT: vpabsd %zmm7, %zmm4
-; AVX512F-NEXT: vpaddd %zmm3, %zmm4, %zmm3
; AVX512F-NEXT: addq $4, %rax
; AVX512F-NEXT: jne .LBB2_1
; AVX512F-NEXT: # BB#2: # %middle.block
@@ -1154,54 +1154,59 @@ define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* n
; SSE2-LABEL: sad_nonloop_32i8:
; SSE2: # BB#0:
; SSE2-NEXT: movdqu (%rdi), %xmm0
-; SSE2-NEXT: movdqu 16(%rdi), %xmm12
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: movdqa %xmm12, %xmm8
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3],xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
-; SSE2-NEXT: movdqa %xmm8, %xmm10
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm1[4],xmm10[5],xmm1[5],xmm10[6],xmm1[6],xmm10[7],xmm1[7]
-; SSE2-NEXT: movdqa %xmm0, %xmm9
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3],xmm9[4],xmm1[4],xmm9[5],xmm1[5],xmm9[6],xmm1[6],xmm9[7],xmm1[7]
-; SSE2-NEXT: movdqa %xmm9, %xmm11
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm1[4],xmm11[5],xmm1[5],xmm11[6],xmm1[6],xmm11[7],xmm1[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm1[8],xmm12[9],xmm1[9],xmm12[10],xmm1[10],xmm12[11],xmm1[11],xmm12[12],xmm1[12],xmm12[13],xmm1[13],xmm12[14],xmm1[14],xmm12[15],xmm1[15]
-; SSE2-NEXT: movdqa %xmm12, %xmm13
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7]
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm1[0],xmm12[1],xmm1[1],xmm12[2],xmm1[2],xmm12[3],xmm1[3]
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; SSE2-NEXT: movdqu (%rdx), %xmm7
-; SSE2-NEXT: movdqu 16(%rdx), %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm6
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3],xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
-; SSE2-NEXT: movdqa %xmm6, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
-; SSE2-NEXT: psubd %xmm5, %xmm10
-; SSE2-NEXT: movdqa %xmm7, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
-; SSE2-NEXT: movdqa %xmm2, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
-; SSE2-NEXT: psubd %xmm5, %xmm11
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
-; SSE2-NEXT: movdqa %xmm3, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
-; SSE2-NEXT: psubd %xmm5, %xmm13
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm1[8],xmm7[9],xmm1[9],xmm7[10],xmm1[10],xmm7[11],xmm1[11],xmm7[12],xmm1[12],xmm7[13],xmm1[13],xmm7[14],xmm1[14],xmm7[15],xmm1[15]
-; SSE2-NEXT: movdqa %xmm7, %xmm5
-; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
-; SSE2-NEXT: psubd %xmm5, %xmm4
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
-; SSE2-NEXT: psubd %xmm6, %xmm8
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE2-NEXT: psubd %xmm2, %xmm9
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; SSE2-NEXT: psubd %xmm3, %xmm12
-; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
-; SSE2-NEXT: psubd %xmm7, %xmm0
+; SSE2-NEXT: movdqu 16(%rdi), %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm4
+; SSE2-NEXT: movdqa %xmm3, %xmm12
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3],xmm12[4],xmm4[4],xmm12[5],xmm4[5],xmm12[6],xmm4[6],xmm12[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm12, %xmm9
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm4[4],xmm9[5],xmm4[5],xmm9[6],xmm4[6],xmm9[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm0, %xmm13
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1],xmm13[2],xmm4[2],xmm13[3],xmm4[3],xmm13[4],xmm4[4],xmm13[5],xmm4[5],xmm13[6],xmm4[6],xmm13[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm13, %xmm10
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm3, %xmm11
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm4[4],xmm11[5],xmm4[5],xmm11[6],xmm4[6],xmm11[7],xmm4[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm4[8],xmm0[9],xmm4[9],xmm0[10],xmm4[10],xmm0[11],xmm4[11],xmm0[12],xmm4[12],xmm0[13],xmm4[13],xmm0[14],xmm4[14],xmm0[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm0, %xmm6
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1],xmm12[2],xmm4[2],xmm12[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1],xmm13[2],xmm4[2],xmm13[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE2-NEXT: movdqu (%rdx), %xmm5
+; SSE2-NEXT: movdqu 16(%rdx), %xmm7
+; SSE2-NEXT: movdqa %xmm7, %xmm1
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm2, -{{[0-9]+}}(%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm5, %xmm2
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
+; SSE2-NEXT: movdqa %xmm2, %xmm14
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm4[4],xmm14[5],xmm4[5],xmm14[6],xmm4[6],xmm14[7],xmm4[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm7, %xmm15
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm4[4],xmm15[5],xmm4[5],xmm15[6],xmm4[6],xmm15[7],xmm4[7]
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm4[8],xmm5[9],xmm4[9],xmm5[10],xmm4[10],xmm5[11],xmm4[11],xmm5[12],xmm4[12],xmm5[13],xmm4[13],xmm5[14],xmm4[14],xmm5[15],xmm4[15]
+; SSE2-NEXT: movdqa %xmm5, %xmm8
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
+; SSE2-NEXT: psubd %xmm5, %xmm0
+; SSE2-NEXT: psubd %xmm7, %xmm3
+; SSE2-NEXT: psubd %xmm2, %xmm13
+; SSE2-NEXT: psubd %xmm1, %xmm12
+; SSE2-NEXT: psubd %xmm8, %xmm6
+; SSE2-NEXT: psubd %xmm15, %xmm11
+; SSE2-NEXT: psubd %xmm14, %xmm10
+; SSE2-NEXT: psubd -{{[0-9]+}}(%rsp), %xmm9 # 16-byte Folded Reload
+; SSE2-NEXT: movdqa %xmm9, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm9
+; SSE2-NEXT: pxor %xmm1, %xmm9
; SSE2-NEXT: movdqa %xmm10, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: paddd %xmm1, %xmm10
@@ -1210,37 +1215,33 @@ define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* n
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: paddd %xmm1, %xmm11
; SSE2-NEXT: pxor %xmm1, %xmm11
-; SSE2-NEXT: movdqa %xmm13, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm13
-; SSE2-NEXT: pxor %xmm1, %xmm13
-; SSE2-NEXT: movdqa %xmm4, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm4
-; SSE2-NEXT: pxor %xmm1, %xmm4
-; SSE2-NEXT: paddd %xmm13, %xmm4
-; SSE2-NEXT: paddd %xmm10, %xmm4
-; SSE2-NEXT: paddd %xmm11, %xmm4
-; SSE2-NEXT: movdqa %xmm8, %xmm1
-; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm8
-; SSE2-NEXT: pxor %xmm1, %xmm8
-; SSE2-NEXT: movdqa %xmm9, %xmm1
+; SSE2-NEXT: movdqa %xmm6, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
-; SSE2-NEXT: paddd %xmm1, %xmm9
-; SSE2-NEXT: pxor %xmm1, %xmm9
+; SSE2-NEXT: paddd %xmm1, %xmm6
+; SSE2-NEXT: pxor %xmm1, %xmm6
; SSE2-NEXT: movdqa %xmm12, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: paddd %xmm1, %xmm12
; SSE2-NEXT: pxor %xmm1, %xmm12
+; SSE2-NEXT: movdqa %xmm13, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm13
+; SSE2-NEXT: pxor %xmm1, %xmm13
+; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: psrad $31, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm3
+; SSE2-NEXT: pxor %xmm1, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: paddd %xmm1, %xmm0
; SSE2-NEXT: pxor %xmm1, %xmm0
+; SSE2-NEXT: paddd %xmm3, %xmm0
+; SSE2-NEXT: paddd %xmm11, %xmm6
+; SSE2-NEXT: paddd %xmm9, %xmm6
+; SSE2-NEXT: paddd %xmm10, %xmm6
; SSE2-NEXT: paddd %xmm12, %xmm0
-; SSE2-NEXT: paddd %xmm8, %xmm0
-; SSE2-NEXT: paddd %xmm4, %xmm0
-; SSE2-NEXT: paddd %xmm9, %xmm0
+; SSE2-NEXT: paddd %xmm6, %xmm0
+; SSE2-NEXT: paddd %xmm13, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
; SSE2-NEXT: paddd %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
diff --git a/test/CodeGen/X86/select.ll b/test/CodeGen/X86/select.ll
index 1afef86a5f11d..ce42d0d643e8b 100644
--- a/test/CodeGen/X86/select.ll
+++ b/test/CodeGen/X86/select.ll
@@ -299,21 +299,20 @@ define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2)
; GENERIC-NEXT: testb %dil, %dil
; GENERIC-NEXT: jne LBB7_4
; GENERIC-NEXT: ## BB#5:
-; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; GENERIC-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; GENERIC-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; GENERIC-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; GENERIC-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; GENERIC-NEXT: jmp LBB7_6
; GENERIC-NEXT: LBB7_4:
-; GENERIC-NEXT: movd %r9d, %xmm1
-; GENERIC-NEXT: movd %ecx, %xmm2
-; GENERIC-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; GENERIC-NEXT: movd %r8d, %xmm3
+; GENERIC-NEXT: movd %r9d, %xmm2
+; GENERIC-NEXT: movd %ecx, %xmm3
+; GENERIC-NEXT: movd %r8d, %xmm4
; GENERIC-NEXT: movd %edx, %xmm1
; GENERIC-NEXT: LBB7_6:
+; GENERIC-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; GENERIC-NEXT: psubd {{.*}}(%rip), %xmm1
; GENERIC-NEXT: psubd {{.*}}(%rip), %xmm0
; GENERIC-NEXT: movq %xmm0, 16(%rsi)
@@ -340,19 +339,16 @@ define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2)
; ATOM-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; ATOM-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
-; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; ATOM-NEXT: jmp LBB7_6
; ATOM-NEXT: LBB7_4:
-; ATOM-NEXT: movd %r9d, %xmm1
-; ATOM-NEXT: movd %ecx, %xmm2
-; ATOM-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; ATOM-NEXT: movd %r8d, %xmm3
+; ATOM-NEXT: movd %r9d, %xmm2
+; ATOM-NEXT: movd %ecx, %xmm3
+; ATOM-NEXT: movd %r8d, %xmm4
; ATOM-NEXT: movd %edx, %xmm1
-; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; ATOM-NEXT: LBB7_6:
+; ATOM-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
+; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; ATOM-NEXT: psubd {{.*}}(%rip), %xmm0
; ATOM-NEXT: psubd {{.*}}(%rip), %xmm1
; ATOM-NEXT: movq %xmm0, 16(%rsi)
diff --git a/test/CodeGen/X86/setcc-wide-types.ll b/test/CodeGen/X86/setcc-wide-types.ll
index 332bf2887fb05..2996edaec3e0e 100644
--- a/test/CodeGen/X86/setcc-wide-types.ll
+++ b/test/CodeGen/X86/setcc-wide-types.ll
@@ -58,25 +58,25 @@ define i32 @ne_i256(<4 x i64> %x, <4 x i64> %y) {
; SSE2-LABEL: ne_i256:
; SSE2: # BB#0:
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
-; SSE2-NEXT: movq %xmm4, %rax
+; SSE2-NEXT: movq %xmm4, %r8
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
-; SSE2-NEXT: movq %xmm4, %rcx
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: movq %xmm1, %r8
+; SSE2-NEXT: movq %xmm4, %r9
+; SSE2-NEXT: movq %xmm0, %r10
+; SSE2-NEXT: movq %xmm1, %rsi
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
; SSE2-NEXT: movq %xmm0, %rdi
-; SSE2-NEXT: xorq %rax, %rdi
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
-; SSE2-NEXT: movq %xmm0, %rsi
-; SSE2-NEXT: xorq %rcx, %rsi
-; SSE2-NEXT: orq %rdi, %rsi
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: xorq %rdx, %rax
-; SSE2-NEXT: movq %xmm3, %rcx
-; SSE2-NEXT: xorq %r8, %rcx
-; SSE2-NEXT: orq %rax, %rcx
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %xmm2, %rcx
+; SSE2-NEXT: movq %xmm3, %rdx
+; SSE2-NEXT: xorq %rsi, %rdx
+; SSE2-NEXT: xorq %r10, %rcx
+; SSE2-NEXT: orq %rdx, %rcx
+; SSE2-NEXT: xorq %r9, %rax
+; SSE2-NEXT: xorq %r8, %rdi
+; SSE2-NEXT: orq %rax, %rdi
; SSE2-NEXT: xorl %eax, %eax
-; SSE2-NEXT: orq %rsi, %rcx
+; SSE2-NEXT: orq %rcx, %rdi
; SSE2-NEXT: setne %al
; SSE2-NEXT: retq
;
@@ -100,25 +100,25 @@ define i32 @eq_i256(<4 x i64> %x, <4 x i64> %y) {
; SSE2-LABEL: eq_i256:
; SSE2: # BB#0:
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
-; SSE2-NEXT: movq %xmm4, %rax
+; SSE2-NEXT: movq %xmm4, %r8
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
-; SSE2-NEXT: movq %xmm4, %rcx
-; SSE2-NEXT: movq %xmm0, %rdx
-; SSE2-NEXT: movq %xmm1, %r8
+; SSE2-NEXT: movq %xmm4, %r9
+; SSE2-NEXT: movq %xmm0, %r10
+; SSE2-NEXT: movq %xmm1, %rsi
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
; SSE2-NEXT: movq %xmm0, %rdi
-; SSE2-NEXT: xorq %rax, %rdi
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
-; SSE2-NEXT: movq %xmm0, %rsi
-; SSE2-NEXT: xorq %rcx, %rsi
-; SSE2-NEXT: orq %rdi, %rsi
-; SSE2-NEXT: movq %xmm2, %rax
-; SSE2-NEXT: xorq %rdx, %rax
-; SSE2-NEXT: movq %xmm3, %rcx
-; SSE2-NEXT: xorq %r8, %rcx
-; SSE2-NEXT: orq %rax, %rcx
+; SSE2-NEXT: movq %xmm0, %rax
+; SSE2-NEXT: movq %xmm2, %rcx
+; SSE2-NEXT: movq %xmm3, %rdx
+; SSE2-NEXT: xorq %rsi, %rdx
+; SSE2-NEXT: xorq %r10, %rcx
+; SSE2-NEXT: orq %rdx, %rcx
+; SSE2-NEXT: xorq %r9, %rax
+; SSE2-NEXT: xorq %r8, %rdi
+; SSE2-NEXT: orq %rax, %rdi
; SSE2-NEXT: xorl %eax, %eax
-; SSE2-NEXT: orq %rsi, %rcx
+; SSE2-NEXT: orq %rcx, %rdi
; SSE2-NEXT: sete %al
; SSE2-NEXT: retq
;
diff --git a/test/CodeGen/X86/shrink_vmul_sse.ll b/test/CodeGen/X86/shrink_vmul_sse.ll
index 6701c247e6fc5..c869dff9e6423 100644
--- a/test/CodeGen/X86/shrink_vmul_sse.ll
+++ b/test/CodeGen/X86/shrink_vmul_sse.ll
@@ -20,9 +20,9 @@ define void @mul_2xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64
; CHECK-NEXT: movzbl 1(%edx,%ecx), %edi
; CHECK-NEXT: movzbl (%edx,%ecx), %edx
; CHECK-NEXT: movzbl 1(%eax,%ecx), %ebx
-; CHECK-NEXT: imull %edi, %ebx
; CHECK-NEXT: movzbl (%eax,%ecx), %eax
; CHECK-NEXT: imull %edx, %eax
+; CHECK-NEXT: imull %edi, %ebx
; CHECK-NEXT: movl %ebx, 4(%esi,%ecx,4)
; CHECK-NEXT: movl %eax, (%esi,%ecx,4)
; CHECK-NEXT: popl %esi
diff --git a/test/CodeGen/X86/sse-intrinsics-fast-isel.ll b/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
index d99cfaf535de9..0b03dffe99b55 100644
--- a/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/sse-intrinsics-fast-isel.ll
@@ -1537,9 +1537,9 @@ define <4 x float> @test_mm_set_ps(float %a0, float %a1, float %a2, float %a3) n
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; X32-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X32-NEXT: retl
;
@@ -1673,13 +1673,13 @@ define void @test_mm_setcsr(i32 %a0) nounwind {
define <4 x float> @test_mm_setr_ps(float %a0, float %a1, float %a2, float %a3) nounwind {
; X32-LABEL: test_mm_setr_ps:
; X32: # BB#0:
-; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; X32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X32-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; X32-NEXT: retl
;
; X64-LABEL: test_mm_setr_ps:
diff --git a/test/CodeGen/X86/sse-scalar-fp-arith.ll b/test/CodeGen/X86/sse-scalar-fp-arith.ll
index f711dc615742c..4b2af6fce8de7 100644
--- a/test/CodeGen/X86/sse-scalar-fp-arith.ll
+++ b/test/CodeGen/X86/sse-scalar-fp-arith.ll
@@ -1119,9 +1119,9 @@ define <4 x float> @add_ss_mask(<4 x float> %a, <4 x float> %b, <4 x float> %c,
;
; AVX512-LABEL: add_ss_mask:
; AVX512: # BB#0:
-; AVX512-NEXT: andl $1, %edi
+; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512-NEXT: kmovw %edi, %k1
-; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm2 {%k1}
+; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1}
; AVX512-NEXT: vmovaps %xmm2, %xmm0
; AVX512-NEXT: retq
%1 = extractelement <4 x float> %a, i64 0
@@ -1174,9 +1174,9 @@ define <2 x double> @add_sd_mask(<2 x double> %a, <2 x double> %b, <2 x double>
;
; AVX512-LABEL: add_sd_mask:
; AVX512: # BB#0:
-; AVX512-NEXT: andl $1, %edi
+; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX512-NEXT: kmovw %edi, %k1
-; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm2 {%k1}
+; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1}
; AVX512-NEXT: vmovapd %xmm2, %xmm0
; AVX512-NEXT: retq
%1 = extractelement <2 x double> %a, i64 0
diff --git a/test/CodeGen/X86/sse1.ll b/test/CodeGen/X86/sse1.ll
index 68ab3f9f32056..f4964b5a6f661 100644
--- a/test/CodeGen/X86/sse1.ll
+++ b/test/CodeGen/X86/sse1.ll
@@ -66,8 +66,8 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
; X32-NEXT: jne .LBB1_8
; X32-NEXT: .LBB1_7:
; X32-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
+; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X32-NEXT: je .LBB1_10
; X32-NEXT: jmp .LBB1_11
; X32-NEXT: .LBB1_1:
@@ -80,8 +80,8 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
; X32-NEXT: je .LBB1_7
; X32-NEXT: .LBB1_8: # %entry
; X32-NEXT: xorps %xmm3, %xmm3
-; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X32-NEXT: cmpl $0, {{[0-9]+}}(%esp)
+; X32-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X32-NEXT: jne .LBB1_11
; X32-NEXT: .LBB1_10:
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -105,8 +105,8 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
; X64-NEXT: jne .LBB1_8
; X64-NEXT: .LBB1_7:
; X64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
-; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X64-NEXT: testl %esi, %esi
+; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X64-NEXT: je .LBB1_10
; X64-NEXT: jmp .LBB1_11
; X64-NEXT: .LBB1_1:
@@ -119,8 +119,8 @@ define <4 x float> @vselect(<4 x float>*%p, <4 x i32> %q) {
; X64-NEXT: je .LBB1_7
; X64-NEXT: .LBB1_8: # %entry
; X64-NEXT: xorps %xmm3, %xmm3
-; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X64-NEXT: testl %esi, %esi
+; X64-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; X64-NEXT: jne .LBB1_11
; X64-NEXT: .LBB1_10:
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
diff --git a/test/CodeGen/X86/sse3-avx-addsub-2.ll b/test/CodeGen/X86/sse3-avx-addsub-2.ll
index aed5e0d1c32e0..4d895ea264c5d 100644
--- a/test/CodeGen/X86/sse3-avx-addsub-2.ll
+++ b/test/CodeGen/X86/sse3-avx-addsub-2.ll
@@ -412,14 +412,14 @@ define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
; SSE-NEXT: movaps %xmm1, %xmm4
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
; SSE-NEXT: subss %xmm4, %xmm3
-; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; SSE-NEXT: addss %xmm0, %xmm3
+; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE-NEXT: addss %xmm0, %xmm4
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE-NEXT: addss %xmm0, %xmm1
-; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
+; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
@@ -431,12 +431,12 @@ define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
; AVX-NEXT: vsubss %xmm4, %xmm3, %xmm3
; AVX-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm0, %xmm4, %xmm4
-; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3]
-; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm4[0],xmm2[2,3]
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX-NEXT: retq
%1 = extractelement <4 x float> %A, i32 0
%2 = extractelement <4 x float> %B, i32 0
diff --git a/test/CodeGen/X86/sse41.ll b/test/CodeGen/X86/sse41.ll
index 4a0dc9c1eb171..503b9416c8d38 100644
--- a/test/CodeGen/X86/sse41.ll
+++ b/test/CodeGen/X86/sse41.ll
@@ -273,8 +273,8 @@ define <2 x float> @buildvector(<2 x float> %A, <2 x float> %B) nounwind {
; X32: ## BB#0: ## %entry
; X32-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X32-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; X32-NEXT: addss %xmm2, %xmm3
; X32-NEXT: addss %xmm1, %xmm0
+; X32-NEXT: addss %xmm2, %xmm3
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
; X32-NEXT: retl
;
@@ -282,8 +282,8 @@ define <2 x float> @buildvector(<2 x float> %A, <2 x float> %B) nounwind {
; X64: ## BB#0: ## %entry
; X64-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X64-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
-; X64-NEXT: addss %xmm2, %xmm3
; X64-NEXT: addss %xmm1, %xmm0
+; X64-NEXT: addss %xmm2, %xmm3
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[2,3]
; X64-NEXT: retq
entry:
@@ -896,9 +896,9 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
; X32-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
-; X32-NEXT: addps %xmm1, %xmm0
; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
; X32-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
+; X32-NEXT: addps %xmm1, %xmm0
; X32-NEXT: addps %xmm2, %xmm3
; X32-NEXT: addps %xmm3, %xmm0
; X32-NEXT: retl
@@ -908,9 +908,9 @@ define <4 x float> @insertps_from_broadcast_multiple_use(<4 x float> %a, <4 x fl
; X64-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0]
; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
-; X64-NEXT: addps %xmm1, %xmm0
; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
; X64-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
+; X64-NEXT: addps %xmm1, %xmm0
; X64-NEXT: addps %xmm2, %xmm3
; X64-NEXT: addps %xmm3, %xmm0
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/subcarry.ll b/test/CodeGen/X86/subcarry.ll
new file mode 100644
index 0000000000000..df676328f6824
--- /dev/null
+++ b/test/CodeGen/X86/subcarry.ll
@@ -0,0 +1,137 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s
+
+%S = type { [4 x i64] }
+
+define %S @negate(%S* nocapture readonly %this) {
+; CHECK-LABEL: negate:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: movq (%rsi), %rax
+; CHECK-NEXT: movq 8(%rsi), %rcx
+; CHECK-NEXT: notq %rax
+; CHECK-NEXT: addq $1, %rax
+; CHECK-NEXT: notq %rcx
+; CHECK-NEXT: adcq $0, %rcx
+; CHECK-NEXT: movq 16(%rsi), %rdx
+; CHECK-NEXT: notq %rdx
+; CHECK-NEXT: adcq $0, %rdx
+; CHECK-NEXT: movq 24(%rsi), %rsi
+; CHECK-NEXT: notq %rsi
+; CHECK-NEXT: adcq $0, %rsi
+; CHECK-NEXT: movq %rax, (%rdi)
+; CHECK-NEXT: movq %rcx, 8(%rdi)
+; CHECK-NEXT: movq %rdx, 16(%rdi)
+; CHECK-NEXT: movq %rsi, 24(%rdi)
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 0
+ %1 = load i64, i64* %0, align 8
+ %2 = xor i64 %1, -1
+ %3 = zext i64 %2 to i128
+ %4 = add nuw nsw i128 %3, 1
+ %5 = trunc i128 %4 to i64
+ %6 = lshr i128 %4, 64
+ %7 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 1
+ %8 = load i64, i64* %7, align 8
+ %9 = xor i64 %8, -1
+ %10 = zext i64 %9 to i128
+ %11 = add nuw nsw i128 %6, %10
+ %12 = trunc i128 %11 to i64
+ %13 = lshr i128 %11, 64
+ %14 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 2
+ %15 = load i64, i64* %14, align 8
+ %16 = xor i64 %15, -1
+ %17 = zext i64 %16 to i128
+ %18 = add nuw nsw i128 %13, %17
+ %19 = lshr i128 %18, 64
+ %20 = trunc i128 %18 to i64
+ %21 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 3
+ %22 = load i64, i64* %21, align 8
+ %23 = xor i64 %22, -1
+ %24 = zext i64 %23 to i128
+ %25 = add nuw nsw i128 %19, %24
+ %26 = trunc i128 %25 to i64
+ %27 = insertvalue [4 x i64] undef, i64 %5, 0
+ %28 = insertvalue [4 x i64] %27, i64 %12, 1
+ %29 = insertvalue [4 x i64] %28, i64 %20, 2
+ %30 = insertvalue [4 x i64] %29, i64 %26, 3
+ %31 = insertvalue %S undef, [4 x i64] %30, 0
+ ret %S %31
+}
+
+define %S @sub(%S* nocapture readonly %this, %S %arg.b) local_unnamed_addr {
+; CHECK-LABEL: sub:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: notq %rdx
+; CHECK-NEXT: xorl %r10d, %r10d
+; CHECK-NEXT: addq (%rsi), %rdx
+; CHECK-NEXT: setb %r10b
+; CHECK-NEXT: addq $1, %rdx
+; CHECK-NEXT: adcq 8(%rsi), %r10
+; CHECK-NEXT: setb %al
+; CHECK-NEXT: movzbl %al, %r11d
+; CHECK-NEXT: notq %rcx
+; CHECK-NEXT: addq %r10, %rcx
+; CHECK-NEXT: adcq 16(%rsi), %r11
+; CHECK-NEXT: setb %al
+; CHECK-NEXT: movzbl %al, %eax
+; CHECK-NEXT: notq %r8
+; CHECK-NEXT: addq %r11, %r8
+; CHECK-NEXT: adcq 24(%rsi), %rax
+; CHECK-NEXT: notq %r9
+; CHECK-NEXT: addq %rax, %r9
+; CHECK-NEXT: movq %rdx, (%rdi)
+; CHECK-NEXT: movq %rcx, 8(%rdi)
+; CHECK-NEXT: movq %r8, 16(%rdi)
+; CHECK-NEXT: movq %r9, 24(%rdi)
+; CHECK-NEXT: movq %rdi, %rax
+; CHECK-NEXT: retq
+entry:
+ %0 = extractvalue %S %arg.b, 0
+ %.elt6 = extractvalue [4 x i64] %0, 1
+ %.elt8 = extractvalue [4 x i64] %0, 2
+ %.elt10 = extractvalue [4 x i64] %0, 3
+ %.elt = extractvalue [4 x i64] %0, 0
+ %1 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 0
+ %2 = load i64, i64* %1, align 8
+ %3 = zext i64 %2 to i128
+ %4 = add nuw nsw i128 %3, 1
+ %5 = xor i64 %.elt, -1
+ %6 = zext i64 %5 to i128
+ %7 = add nuw nsw i128 %4, %6
+ %8 = trunc i128 %7 to i64
+ %9 = lshr i128 %7, 64
+ %10 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 1
+ %11 = load i64, i64* %10, align 8
+ %12 = zext i64 %11 to i128
+ %13 = add nuw nsw i128 %9, %12
+ %14 = xor i64 %.elt6, -1
+ %15 = zext i64 %14 to i128
+ %16 = add nuw nsw i128 %13, %15
+ %17 = trunc i128 %16 to i64
+ %18 = lshr i128 %16, 64
+ %19 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 2
+ %20 = load i64, i64* %19, align 8
+ %21 = zext i64 %20 to i128
+ %22 = add nuw nsw i128 %18, %21
+ %23 = xor i64 %.elt8, -1
+ %24 = zext i64 %23 to i128
+ %25 = add nuw nsw i128 %22, %24
+ %26 = lshr i128 %25, 64
+ %27 = trunc i128 %25 to i64
+ %28 = getelementptr inbounds %S, %S* %this, i64 0, i32 0, i64 3
+ %29 = load i64, i64* %28, align 8
+ %30 = zext i64 %29 to i128
+ %31 = add nuw nsw i128 %26, %30
+ %32 = xor i64 %.elt10, -1
+ %33 = zext i64 %32 to i128
+ %34 = add nuw nsw i128 %31, %33
+ %35 = trunc i128 %34 to i64
+ %36 = insertvalue [4 x i64] undef, i64 %8, 0
+ %37 = insertvalue [4 x i64] %36, i64 %17, 1
+ %38 = insertvalue [4 x i64] %37, i64 %27, 2
+ %39 = insertvalue [4 x i64] %38, i64 %35, 3
+ %40 = insertvalue %S undef, [4 x i64] %39, 0
+ ret %S %40
+}
diff --git a/test/CodeGen/X86/vec_int_to_fp.ll b/test/CodeGen/X86/vec_int_to_fp.ll
index 1eef67764ab9b..a42b3c96c3ae6 100644
--- a/test/CodeGen/X86/vec_int_to_fp.ll
+++ b/test/CodeGen/X86/vec_int_to_fp.ll
@@ -4344,7 +4344,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_4
; AVX1-NEXT: # BB#5:
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm4
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3
; AVX1-NEXT: jmp .LBB80_6
; AVX1-NEXT: .LBB80_4:
; AVX1-NEXT: movq %rax, %rcx
@@ -4352,22 +4352,22 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: orq %rcx, %rax
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3
-; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm4
+; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm3
; AVX1-NEXT: .LBB80_6:
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; AVX1-NEXT: vmovq %xmm2, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_7
; AVX1-NEXT: # BB#8:
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4
; AVX1-NEXT: jmp .LBB80_9
; AVX1-NEXT: .LBB80_7:
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq %rcx
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: orq %rcx, %rax
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3
-; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4
+; AVX1-NEXT: vaddss %xmm4, %xmm4, %xmm4
; AVX1-NEXT: .LBB80_9:
; AVX1-NEXT: vpextrq $1, %xmm2, %rax
; AVX1-NEXT: testq %rax, %rax
@@ -4397,29 +4397,29 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5
; AVX1-NEXT: vaddss %xmm5, %xmm5, %xmm5
; AVX1-NEXT: .LBB80_15:
-; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[2,3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3]
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_16
; AVX1-NEXT: # BB#17:
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3
; AVX1-NEXT: jmp .LBB80_18
; AVX1-NEXT: .LBB80_16:
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq %rcx
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: orq %rcx, %rax
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4
-; AVX1-NEXT: vaddss %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3
+; AVX1-NEXT: vaddss %xmm3, %xmm3, %xmm3
; AVX1-NEXT: .LBB80_18:
-; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
-; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vmovq %xmm3, %rax
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0],xmm1[3]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vmovq %xmm4, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_19
; AVX1-NEXT: # BB#20:
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm5
; AVX1-NEXT: jmp .LBB80_21
; AVX1-NEXT: .LBB80_19:
; AVX1-NEXT: movq %rax, %rcx
@@ -4427,25 +4427,25 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: orq %rcx, %rax
; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0
-; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm0
+; AVX1-NEXT: vaddss %xmm0, %xmm0, %xmm5
; AVX1-NEXT: .LBB80_21:
-; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0],xmm4[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm2[0]
-; AVX1-NEXT: vpextrq $1, %xmm3, %rax
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0,1],xmm5[0],xmm3[3]
+; AVX1-NEXT: vpextrq $1, %xmm4, %rax
; AVX1-NEXT: testq %rax, %rax
; AVX1-NEXT: js .LBB80_22
; AVX1-NEXT: # BB#23:
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2
; AVX1-NEXT: jmp .LBB80_24
; AVX1-NEXT: .LBB80_22:
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq %rcx
; AVX1-NEXT: andl $1, %eax
; AVX1-NEXT: orq %rcx, %rax
-; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1
-; AVX1-NEXT: vaddss %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2
+; AVX1-NEXT: vaddss %xmm2, %xmm2, %xmm2
; AVX1-NEXT: .LBB80_24:
-; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[0]
+; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
;
@@ -4471,7 +4471,7 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_4
; AVX2-NEXT: # BB#5:
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm4
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3
; AVX2-NEXT: jmp .LBB80_6
; AVX2-NEXT: .LBB80_4:
; AVX2-NEXT: movq %rax, %rcx
@@ -4479,22 +4479,22 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: orq %rcx, %rax
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm3, %xmm3
-; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm4
+; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm3
; AVX2-NEXT: .LBB80_6:
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX2-NEXT: vmovq %xmm2, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_7
; AVX2-NEXT: # BB#8:
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4
; AVX2-NEXT: jmp .LBB80_9
; AVX2-NEXT: .LBB80_7:
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq %rcx
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: orq %rcx, %rax
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm3
-; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm3
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm4, %xmm4
+; AVX2-NEXT: vaddss %xmm4, %xmm4, %xmm4
; AVX2-NEXT: .LBB80_9:
; AVX2-NEXT: vpextrq $1, %xmm2, %rax
; AVX2-NEXT: testq %rax, %rax
@@ -4524,29 +4524,29 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm5, %xmm5
; AVX2-NEXT: vaddss %xmm5, %xmm5, %xmm5
; AVX2-NEXT: .LBB80_15:
-; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[2,3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[2,3]
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_16
; AVX2-NEXT: # BB#17:
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3
; AVX2-NEXT: jmp .LBB80_18
; AVX2-NEXT: .LBB80_16:
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq %rcx
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: orq %rcx, %rax
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm4
-; AVX2-NEXT: vaddss %xmm4, %xmm4, %xmm4
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm3
+; AVX2-NEXT: vaddss %xmm3, %xmm3, %xmm3
; AVX2-NEXT: .LBB80_18:
-; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
-; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
-; AVX2-NEXT: vmovq %xmm3, %rax
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm4[0],xmm1[3]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; AVX2-NEXT: vmovq %xmm4, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_19
; AVX2-NEXT: # BB#20:
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm5
; AVX2-NEXT: jmp .LBB80_21
; AVX2-NEXT: .LBB80_19:
; AVX2-NEXT: movq %rax, %rcx
@@ -4554,25 +4554,25 @@ define <8 x float> @uitofp_load_8i64_to_8f32(<8 x i64> *%a) {
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: orq %rcx, %rax
; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm0
-; AVX2-NEXT: vaddss %xmm0, %xmm0, %xmm0
+; AVX2-NEXT: vaddss %xmm0, %xmm0, %xmm5
; AVX2-NEXT: .LBB80_21:
-; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm0[0],xmm4[3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm2[0]
-; AVX2-NEXT: vpextrq $1, %xmm3, %rax
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0,1],xmm5[0],xmm3[3]
+; AVX2-NEXT: vpextrq $1, %xmm4, %rax
; AVX2-NEXT: testq %rax, %rax
; AVX2-NEXT: js .LBB80_22
; AVX2-NEXT: # BB#23:
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2
; AVX2-NEXT: jmp .LBB80_24
; AVX2-NEXT: .LBB80_22:
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq %rcx
; AVX2-NEXT: andl $1, %eax
; AVX2-NEXT: orq %rcx, %rax
-; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm1
-; AVX2-NEXT: vaddss %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vcvtsi2ssq %rax, %xmm6, %xmm2
+; AVX2-NEXT: vaddss %xmm2, %xmm2, %xmm2
; AVX2-NEXT: .LBB80_24:
-; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0,1,2],xmm1[0]
+; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
diff --git a/test/CodeGen/X86/vector-bitreverse.ll b/test/CodeGen/X86/vector-bitreverse.ll
index 2fb821555dba5..226c0adbaf3c3 100644
--- a/test/CodeGen/X86/vector-bitreverse.ll
+++ b/test/CodeGen/X86/vector-bitreverse.ll
@@ -2372,10 +2372,10 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
; AVX512F-NEXT: vporq %zmm1, %zmm2, %zmm1
; AVX512F-NEXT: vpsrlq $24, %zmm0, %zmm2
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm2, %zmm2
+; AVX512F-NEXT: vpsrlq $8, %zmm0, %zmm3
+; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm3, %zmm3
; AVX512F-NEXT: vporq %zmm1, %zmm2, %zmm1
-; AVX512F-NEXT: vpsrlq $8, %zmm0, %zmm2
-; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm2, %zmm2
-; AVX512F-NEXT: vporq %zmm1, %zmm2, %zmm1
+; AVX512F-NEXT: vporq %zmm1, %zmm3, %zmm1
; AVX512F-NEXT: vpsllq $8, %zmm0, %zmm2
; AVX512F-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm2, %zmm2
; AVX512F-NEXT: vpsllq $24, %zmm0, %zmm3
diff --git a/test/CodeGen/X86/vector-blend.ll b/test/CodeGen/X86/vector-blend.ll
index f0a5fe1dbfffb..a05a981daa1f0 100644
--- a/test/CodeGen/X86/vector-blend.ll
+++ b/test/CodeGen/X86/vector-blend.ll
@@ -848,10 +848,10 @@ define <8 x i32> @blend_logic_v8i32(<8 x i32> %b, <8 x i32> %a, <8 x i32> %c) {
; SSE2-NEXT: psrad $31, %xmm1
; SSE2-NEXT: pand %xmm1, %xmm3
; SSE2-NEXT: pandn %xmm5, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: pand %xmm0, %xmm2
; SSE2-NEXT: pandn %xmm4, %xmm0
; SSE2-NEXT: por %xmm2, %xmm0
+; SSE2-NEXT: por %xmm3, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: blend_logic_v8i32:
@@ -860,10 +860,10 @@ define <8 x i32> @blend_logic_v8i32(<8 x i32> %b, <8 x i32> %a, <8 x i32> %c) {
; SSSE3-NEXT: psrad $31, %xmm1
; SSSE3-NEXT: pand %xmm1, %xmm3
; SSSE3-NEXT: pandn %xmm5, %xmm1
-; SSSE3-NEXT: por %xmm3, %xmm1
; SSSE3-NEXT: pand %xmm0, %xmm2
; SSSE3-NEXT: pandn %xmm4, %xmm0
; SSSE3-NEXT: por %xmm2, %xmm0
+; SSSE3-NEXT: por %xmm3, %xmm1
; SSSE3-NEXT: retq
;
; SSE41-LABEL: blend_logic_v8i32:
diff --git a/test/CodeGen/X86/vector-sqrt.ll b/test/CodeGen/X86/vector-sqrt.ll
index 8081e9482d674..c5ac4466b5faa 100644
--- a/test/CodeGen/X86/vector-sqrt.ll
+++ b/test/CodeGen/X86/vector-sqrt.ll
@@ -29,11 +29,11 @@ define <4 x float> @sqrtf4(float* nocapture readonly %v) local_unnamed_addr #0 {
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vsqrtss (%rdi), %xmm0, %xmm0
; CHECK-NEXT: vsqrtss 4(%rdi), %xmm1, %xmm1
+; CHECK-NEXT: vsqrtss 8(%rdi), %xmm2, %xmm2
+; CHECK-NEXT: vsqrtss 12(%rdi), %xmm3, %xmm3
; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
-; CHECK-NEXT: vsqrtss 8(%rdi), %xmm2, %xmm1
-; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
-; CHECK-NEXT: vsqrtss 12(%rdi), %xmm2, %xmm1
-; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm2[0],xmm0[3]
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm3[0]
; CHECK-NEXT: retq
entry:
%0 = load float, float* %v, align 4
diff --git a/test/CodeGen/X86/x86-interleaved-access.ll b/test/CodeGen/X86/x86-interleaved-access.ll
index 450e255313b32..6fbec91e77a37 100644
--- a/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/test/CodeGen/X86/x86-interleaved-access.ll
@@ -11,13 +11,13 @@ define <4 x double> @load_factorf64_4(<16 x double>* %ptr) {
; AVX-NEXT: vmovupd 96(%rdi), %ymm3
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
-; AVX-NEXT: vhaddpd %ymm5, %ymm4, %ymm4
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX-NEXT: vaddpd %ymm2, %ymm4, %ymm2
; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX-NEXT: vaddpd %ymm0, %ymm2, %ymm0
+; AVX-NEXT: vhaddpd %ymm5, %ymm4, %ymm1
+; AVX-NEXT: vaddpd %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vaddpd %ymm0, %ymm1, %ymm0
; AVX-NEXT: retq
%wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
%strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
@@ -39,11 +39,11 @@ define <4 x double> @load_factorf64_2(<16 x double>* %ptr) {
; AVX-NEXT: vmovupd 96(%rdi), %ymm3
; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
-; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX-NEXT: vmulpd %ymm0, %ymm4, %ymm0
+; AVX-NEXT: vmulpd %ymm0, %ymm2, %ymm0
; AVX-NEXT: retq
%wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
%strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
@@ -124,9 +124,9 @@ define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) {
; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
-; AVX2-NEXT: vpaddq %ymm3, %ymm4, %ymm3
; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
-; AVX2-NEXT: vpaddq %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vpaddq %ymm3, %ymm4, %ymm1
+; AVX2-NEXT: vpaddq %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; AVX2-NEXT: retq
%wide.vec = load <16 x i64>, <16 x i64>* %ptr, align 16
diff --git a/test/CodeGen/X86/xmulo.ll b/test/CodeGen/X86/xmulo.ll
index aed305058f0b6..03f284d87a666 100644
--- a/test/CodeGen/X86/xmulo.ll
+++ b/test/CodeGen/X86/xmulo.ll
@@ -712,17 +712,11 @@ define i1 @bug27873(i64 %c1, i1 %c2) {
;
; KNL-LABEL: bug27873:
; KNL: ## BB#0:
-; KNL-NEXT: andl $1, %esi
; KNL-NEXT: movl $160, %ecx
; KNL-NEXT: movq %rdi, %rax
; KNL-NEXT: mulq %rcx
-; KNL-NEXT: kmovw %esi, %k0
; KNL-NEXT: seto %al
-; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: kmovw %eax, %k1
-; KNL-NEXT: korw %k1, %k0, %k0
-; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: orb %sil, %al
; KNL-NEXT: retq
%mul = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %c1, i64 160)
%mul.overflow = extractvalue { i64, i1 } %mul, 1
diff --git a/test/CodeGen/X86/xor-select-i1-combine.ll b/test/CodeGen/X86/xor-select-i1-combine.ll
index 6507ddcc7697c..c9383282a0cc9 100644
--- a/test/CodeGen/X86/xor-select-i1-combine.ll
+++ b/test/CodeGen/X86/xor-select-i1-combine.ll
@@ -7,10 +7,10 @@
define i32 @main(i8 %small) {
; CHECK-LABEL: main:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: movl $n, %eax
-; CHECK-NEXT: movl $m, %ecx
; CHECK-NEXT: testb $1, %dil
-; CHECK-NEXT: cmovneq %rax, %rcx
+; CHECK-NEXT: movl $m, %eax
+; CHECK-NEXT: movl $n, %ecx
+; CHECK-NEXT: cmoveq %rax, %rcx
; CHECK-NEXT: movl (%rcx), %eax
; CHECK-NEXT: retq
entry: